diff --git a/.github/workflows/cluster-it-1c1d.yml b/.github/workflows/cluster-it-1c1d.yml index 5b1c3e1966745..e05c8680366b6 100644 --- a/.github/workflows/cluster-it-1c1d.yml +++ b/.github/workflows/cluster-it-1c1d.yml @@ -5,7 +5,8 @@ on: branches: - master - 'rel/1.*' - - pipe-meta-sync + - 'rc/1.*' + - 'dev/1.*' paths-ignore: - 'docs/**' - 'site/**' @@ -13,7 +14,8 @@ on: branches: - master - 'rel/1.*' - - pipe-meta-sync + - 'rc/1.*' + - 'dev/1.*' paths-ignore: - 'docs/**' - 'site/**' diff --git a/.github/workflows/cluster-it-1c1d1a.yml b/.github/workflows/cluster-it-1c1d1a.yml new file mode 100644 index 0000000000000..121bc37c49578 --- /dev/null +++ b/.github/workflows/cluster-it-1c1d1a.yml @@ -0,0 +1,64 @@ +name: Cluster IT - 1C1D1A + +on: + push: + branches: + - master + - 'rel/1.*' + - 'rc/1.*' + - 'dev/1.*' + paths-ignore: + - 'docs/**' + - 'site/**' + pull_request: + branches: + - master + - 'rel/1.*' + - 'rc/1.*' + - 'dev/1.*' + paths-ignore: + - 'docs/**' + - 'site/**' + # allow manually run the action: + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +env: + MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 + MAVEN_ARGS: --batch-mode --no-transfer-progress + DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + +jobs: + AINode: + strategy: + fail-fast: false + max-parallel: 15 + matrix: + os: [ ubuntu-latest ] + runs-on: ${{ matrix.os }} + + steps: + - uses: actions/checkout@v4 + - name: Build AINode + shell: bash + run: mvn clean package -DskipTests -P with-ainode + - name: IT Test + shell: bash + run: | + mvn clean verify \ + -P with-integration-tests \ + -DskipUTs \ + -DintegrationTest.forkCount=2 \ + -pl integration-test \ + -am \ + -PAIClusterIT + - name: Upload Artifact + if: failure() + uses: actions/upload-artifact@v4 + with: + name: cluster-log-ainode-${{ matrix.os }} + path: integration-test/target/ainode-logs + retention-days: 30 diff --git a/.github/workflows/cluster-it-1c3d.yml b/.github/workflows/cluster-it-1c3d.yml index a850009e6f658..8f1adf10df4bc 100644 --- a/.github/workflows/cluster-it-1c3d.yml +++ b/.github/workflows/cluster-it-1c3d.yml @@ -5,7 +5,8 @@ on: branches: - master - 'rel/1.*' - - pipe-meta-sync + - 'rc/1.*' + - 'dev/1.*' paths-ignore: - 'docs/**' - 'site/**' @@ -13,7 +14,8 @@ on: branches: - master - 'rel/1.*' - - pipe-meta-sync + - 'rc/1.*' + - 'dev/1.*' paths-ignore: - 'docs/**' - 'site/**' @@ -35,7 +37,7 @@ jobs: fail-fast: false max-parallel: 20 matrix: - java: [ 8, 11, 17 ] + java: [ 17 ] runs-on: [self-hosted, iotdb] # group: self-hosted # labels: iotdb @@ -44,7 +46,7 @@ jobs: - name: Set up JDK ${{ matrix.java }} uses: actions/setup-java@v4 with: - distribution: liberica + distribution: oracle java-version: ${{ matrix.java }} - name: IT/UT Test shell: bash diff --git a/.github/workflows/compile-check.yml b/.github/workflows/compile-check.yml new file mode 100644 index 0000000000000..b20e8edd999a5 --- /dev/null +++ b/.github/workflows/compile-check.yml @@ -0,0 +1,52 @@ +# This workflow will compile IoTDB under jdk8 to check for compatibility issues + +name: Compile Check + +on: + push: + branches: + - master + - 'rel/1.*' + - 'rc/1.*' + paths-ignore: + - 'docs/**' + - 'site/**' + pull_request: + branches: + - master + - 'rel/1.*' + - 'rc/1.*' + paths-ignore: + - 'docs/**' + - 'site/**' + # allow manually run the action: + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +env: + MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 + MAVEN_ARGS: --batch-mode --no-transfer-progress + DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + +jobs: + compile-check: + strategy: + fail-fast: false + matrix: + java: [8] + os: [ ubuntu-latest ] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v4 + - name: Set up JDK ${{ matrix.java }} + uses: actions/setup-java@v4 + with: + distribution: liberica + java-version: ${{ matrix.java }} + - name: Compiler Test + shell: bash + run: | + mvn clean compile -P with-integration-tests -ntp diff --git a/.github/workflows/daily-it.yml b/.github/workflows/daily-it.yml index 3783e563e35f8..3deeb4834c4c9 100644 --- a/.github/workflows/daily-it.yml +++ b/.github/workflows/daily-it.yml @@ -49,3 +49,433 @@ jobs: name: cluster-log-java${{ matrix.java }}-${{ runner.os }} path: integration-test/target/cluster-logs retention-days: 3 + PipeAutoCreateSchema: + strategy: + fail-fast: false + max-parallel: 15 + matrix: + java: [17] + # StrongConsistencyClusterMode is ignored now because RatisConsensus has not been supported yet. + cluster: [LightWeightStandaloneMode, ScalableSingleNodeMode, HighPerformanceMode] + os: [ ubuntu-latest ] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v4 + - name: Set up JDK ${{ matrix.java }} + uses: actions/setup-java@v4 + with: + distribution: liberica + java-version: ${{ matrix.java }} + - name: Cache Maven packages + uses: actions/cache@v4 + with: + path: ~/.m2 + key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} + restore-keys: ${{ runner.os }}-m2- + - name: Sleep for a random duration between 0 and 10000 milliseconds + run: | + sleep $(( $(( RANDOM % 10000 + 1 )) / 1000)) + - name: IT Test + shell: bash + # we do not compile client-cpp for saving time, it is tested in client.yml + # we can skip influxdb-protocol because it has been tested separately in influxdb-protocol.yml + run: | + retry() { + local -i max_attempts=3 + local -i attempt=1 + local -i retry_sleep=5 + local test_output + + while [ $attempt -le $max_attempts ]; do + mvn clean verify \ + -P with-integration-tests \ + -DskipUTs \ + -DintegrationTest.forkCount=1 -DConfigNodeMaxHeapSize=256 -DDataNodeMaxHeapSize=1024 -DDataNodeMaxDirectMemorySize=768 \ + -DClusterConfigurations=${{ matrix.cluster }},${{ matrix.cluster }} \ + -pl integration-test \ + -am -PMultiClusterIT2AutoCreateSchema \ + -ntp >> ~/run-tests-$attempt.log && return 0 + test_output=$(cat ~/run-tests-$attempt.log) + + mv ~/run-tests-$attempt.log integration-test/target/cluster-logs/ + + echo "==================== BEGIN: ~/run-tests-$attempt.log ====================" + echo "$test_output" + echo "==================== END: ~/run-tests-$attempt.log ======================" + + if ! mv ~/run-tests-$attempt.log integration-test/target/cluster-logs/ 2>/dev/null; then + echo "Failed to move log file ~/run-tests-$attempt.log to integration-test/target/cluster-logs/. Skipping..." + fi + + if echo "$test_output" | grep -q "Could not transfer artifact"; then + if [ $attempt -lt $max_attempts ]; then + echo "Test failed with artifact transfer issue, attempt $attempt. Retrying in $retry_sleep seconds..." + sleep $retry_sleep + attempt=$((attempt + 1)) + else + echo "Test failed after $max_attempts attempts due to artifact transfer issue." + echo "Treating this as a success because the issue is likely transient." + return 0 + fi + elif [ $? -ne 0 ]; then + echo "Test failed with a different error." + return 1 + else + echo "Tests passed" + return 0 + fi + done + } + retry + - name: Upload Artifact + if: failure() + uses: actions/upload-artifact@v4 + with: + name: cluster-log-auto-create-schema-java${{ matrix.java }}-${{ runner.os }}-${{ matrix.cluster1 }}-${{ matrix.cluster2 }} + path: integration-test/target/cluster-logs + retention-days: 30 + PipeManualCreateSchema: + strategy: + fail-fast: false + max-parallel: 15 + matrix: + java: [17] + # StrongConsistencyClusterMode is ignored now because RatisConsensus has not been supported yet. + cluster1: [LightWeightStandaloneMode, ScalableSingleNodeMode, HighPerformanceMode] + cluster2: [LightWeightStandaloneMode, ScalableSingleNodeMode, HighPerformanceMode] + os: [ ubuntu-latest ] + exclude: + - cluster1: LightWeightStandaloneMode + cluster2: LightWeightStandaloneMode + - cluster1: LightWeightStandaloneMode + cluster2: ScalableSingleNodeMode + - cluster1: ScalableSingleNodeMode + cluster2: LightWeightStandaloneMode + - cluster1: HighPerformanceMode + cluster2: LightWeightStandaloneMode + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v4 + - name: Set up JDK ${{ matrix.java }} + uses: actions/setup-java@v4 + with: + distribution: liberica + java-version: ${{ matrix.java }} + - name: Cache Maven packages + uses: actions/cache@v4 + with: + path: ~/.m2 + key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} + restore-keys: ${{ runner.os }}-m2- + - name: Sleep for a random duration between 0 and 10000 milliseconds + run: | + sleep $(( $(( RANDOM % 10000 + 1 )) / 1000)) + - name: IT Test + shell: bash + # we do not compile client-cpp for saving time, it is tested in client.yml + # we can skip influxdb-protocol because it has been tested separately in influxdb-protocol.yml + run: | + retry() { + local -i max_attempts=3 + local -i attempt=1 + local -i retry_sleep=5 + local test_output + + while [ $attempt -le $max_attempts ]; do + mvn clean verify \ + -P with-integration-tests \ + -DskipUTs \ + -DintegrationTest.forkCount=1 -DConfigNodeMaxHeapSize=256 -DDataNodeMaxHeapSize=1024 -DDataNodeMaxDirectMemorySize=768 \ + -DClusterConfigurations=${{ matrix.cluster1 }},${{ matrix.cluster2 }} \ + -pl integration-test \ + -am -PMultiClusterIT2ManualCreateSchema \ + -ntp >> ~/run-tests-$attempt.log && return 0 + test_output=$(cat ~/run-tests-$attempt.log) + + echo "==================== BEGIN: ~/run-tests-$attempt.log ====================" + echo "$test_output" + echo "==================== END: ~/run-tests-$attempt.log ======================" + + if ! mv ~/run-tests-$attempt.log integration-test/target/cluster-logs/ 2>/dev/null; then + echo "Failed to move log file ~/run-tests-$attempt.log to integration-test/target/cluster-logs/. Skipping..." + fi + + if echo "$test_output" | grep -q "Could not transfer artifact"; then + if [ $attempt -lt $max_attempts ]; then + echo "Test failed with artifact transfer issue, attempt $attempt. Retrying in $retry_sleep seconds..." + sleep $retry_sleep + attempt=$((attempt + 1)) + else + echo "Test failed after $max_attempts attempts due to artifact transfer issue." + echo "Treating this as a success because the issue is likely transient." + return 0 + fi + elif [ $? -ne 0 ]; then + echo "Test failed with a different error." + return 1 + else + echo "Tests passed" + return 0 + fi + done + } + retry + - name: Upload Artifact + if: failure() + uses: actions/upload-artifact@v4 + with: + name: cluster-log-manual-create-schema-java${{ matrix.java }}-${{ runner.os }}-${{ matrix.cluster1 }}-${{ matrix.cluster2 }} + path: integration-test/target/cluster-logs + retention-days: 30 + SubscriptionArchVerification: + strategy: + fail-fast: false + max-parallel: 15 + matrix: + java: [ 17 ] + # StrongConsistencyClusterMode is ignored now because RatisConsensus has not been supported yet. + cluster1: [ LightWeightStandaloneMode, ScalableSingleNodeMode, HighPerformanceMode ] + cluster2: [ ScalableSingleNodeMode ] + os: [ ubuntu-latest ] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v4 + - name: Set up JDK ${{ matrix.java }} + uses: actions/setup-java@v4 + with: + distribution: liberica + java-version: ${{ matrix.java }} + - name: Cache Maven packages + uses: actions/cache@v4 + with: + path: ~/.m2 + key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} + restore-keys: ${{ runner.os }}-m2- + - name: Sleep for a random duration between 0 and 10000 milliseconds + run: | + sleep $(( $(( RANDOM % 10000 + 1 )) / 1000)) + - name: IT Test + shell: bash + # we do not compile client-cpp for saving time, it is tested in client.yml + # we can skip influxdb-protocol because it has been tested separately in influxdb-protocol.yml + run: | + retry() { + local -i max_attempts=3 + local -i attempt=1 + local -i retry_sleep=5 + local test_output + + while [ $attempt -le $max_attempts ]; do + mvn clean verify \ + -P with-integration-tests \ + -DskipUTs \ + -DintegrationTest.forkCount=1 -DConfigNodeMaxHeapSize=256 -DDataNodeMaxHeapSize=1024 -DDataNodeMaxDirectMemorySize=768 \ + -DClusterConfigurations=${{ matrix.cluster1 }},${{ matrix.cluster2 }} \ + -pl integration-test \ + -am -PMultiClusterIT2SubscriptionArchVerification \ + -ntp >> ~/run-tests-$attempt.log && return 0 + test_output=$(cat ~/run-tests-$attempt.log) + + echo "==================== BEGIN: ~/run-tests-$attempt.log ====================" + echo "$test_output" + echo "==================== END: ~/run-tests-$attempt.log ======================" + + if ! mv ~/run-tests-$attempt.log integration-test/target/cluster-logs/ 2>/dev/null; then + echo "Failed to move log file ~/run-tests-$attempt.log to integration-test/target/cluster-logs/. Skipping..." + fi + + if echo "$test_output" | grep -q "Could not transfer artifact"; then + if [ $attempt -lt $max_attempts ]; then + echo "Test failed with artifact transfer issue, attempt $attempt. Retrying in $retry_sleep seconds..." + sleep $retry_sleep + attempt=$((attempt + 1)) + else + echo "Test failed after $max_attempts attempts due to artifact transfer issue." + echo "Treating this as a success because the issue is likely transient." + return 0 + fi + elif [ $? -ne 0 ]; then + echo "Test failed with a different error." + return 1 + else + echo "Tests passed" + return 0 + fi + done + } + retry + - name: Upload Artifact + if: failure() + uses: actions/upload-artifact@v4 + with: + name: cluster-log-subscription-java${{ matrix.java }}-${{ runner.os }}-${{ matrix.cluster1 }}-${{ matrix.cluster2 }} + path: integration-test/target/cluster-logs + retention-days: 30 + SubscriptionRegressionConsumer: + strategy: + fail-fast: false + max-parallel: 15 + matrix: + java: [ 17 ] + # do not use HighPerformanceMode here, otherwise some tests will cause the GH runner to receive a shutdown signal + cluster1: [ ScalableSingleNodeMode ] + cluster2: [ ScalableSingleNodeMode ] + os: [ ubuntu-latest ] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v4 + - name: Set up JDK ${{ matrix.java }} + uses: actions/setup-java@v4 + with: + distribution: liberica + java-version: ${{ matrix.java }} + - name: Cache Maven packages + uses: actions/cache@v4 + with: + path: ~/.m2 + key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} + restore-keys: ${{ runner.os }}-m2- + - name: Sleep for a random duration between 0 and 10000 milliseconds + run: | + sleep $(( $(( RANDOM % 10000 + 1 )) / 1000)) + - name: IT Test + shell: bash + # we do not compile client-cpp for saving time, it is tested in client.yml + # we can skip influxdb-protocol because it has been tested separately in influxdb-protocol.yml + run: | + retry() { + local -i max_attempts=3 + local -i attempt=1 + local -i retry_sleep=5 + local test_output + + while [ $attempt -le $max_attempts ]; do + mvn clean verify \ + -P with-integration-tests \ + -DskipUTs \ + -DintegrationTest.forkCount=1 -DConfigNodeMaxHeapSize=256 -DDataNodeMaxHeapSize=1024 -DDataNodeMaxDirectMemorySize=768 \ + -DClusterConfigurations=${{ matrix.cluster1 }},${{ matrix.cluster2 }} \ + -pl integration-test \ + -am -PMultiClusterIT2SubscriptionRegressionConsumer \ + -ntp >> ~/run-tests-$attempt.log && return 0 + test_output=$(cat ~/run-tests-$attempt.log) + + echo "==================== BEGIN: ~/run-tests-$attempt.log ====================" + echo "$test_output" + echo "==================== END: ~/run-tests-$attempt.log ======================" + + if ! mv ~/run-tests-$attempt.log integration-test/target/cluster-logs/ 2>/dev/null; then + echo "Failed to move log file ~/run-tests-$attempt.log to integration-test/target/cluster-logs/. Skipping..." + fi + + if echo "$test_output" | grep -q "Could not transfer artifact"; then + if [ $attempt -lt $max_attempts ]; then + echo "Test failed with artifact transfer issue, attempt $attempt. Retrying in $retry_sleep seconds..." + sleep $retry_sleep + attempt=$((attempt + 1)) + else + echo "Test failed after $max_attempts attempts due to artifact transfer issue." + echo "Treating this as a success because the issue is likely transient." + return 0 + fi + elif [ $? -ne 0 ]; then + echo "Test failed with a different error." + return 1 + else + echo "Tests passed" + return 0 + fi + done + } + retry + - name: Upload Artifact + if: failure() + uses: actions/upload-artifact@v4 + with: + name: cluster-log-subscription-regression-consumer-java${{ matrix.java }}-${{ runner.os }}-${{ matrix.cluster1 }}-${{ matrix.cluster2 }} + path: integration-test/target/cluster-logs + retention-days: 30 + SubscriptionRegressionMisc: + strategy: + fail-fast: false + max-parallel: 15 + matrix: + java: [ 17 ] + # do not use HighPerformanceMode here, otherwise some tests will cause the GH runner to receive a shutdown signal + cluster1: [ ScalableSingleNodeMode ] + cluster2: [ ScalableSingleNodeMode ] + os: [ ubuntu-latest ] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v4 + - name: Set up JDK ${{ matrix.java }} + uses: actions/setup-java@v4 + with: + distribution: liberica + java-version: ${{ matrix.java }} + - name: Cache Maven packages + uses: actions/cache@v4 + with: + path: ~/.m2 + key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} + restore-keys: ${{ runner.os }}-m2- + - name: Sleep for a random duration between 0 and 10000 milliseconds + run: | + sleep $(( $(( RANDOM % 10000 + 1 )) / 1000)) + - name: IT Test + shell: bash + # we do not compile client-cpp for saving time, it is tested in client.yml + # we can skip influxdb-protocol because it has been tested separately in influxdb-protocol.yml + run: | + retry() { + local -i max_attempts=3 + local -i attempt=1 + local -i retry_sleep=5 + local test_output + + while [ $attempt -le $max_attempts ]; do + mvn clean verify \ + -P with-integration-tests \ + -DskipUTs \ + -DintegrationTest.forkCount=1 -DConfigNodeMaxHeapSize=256 -DDataNodeMaxHeapSize=1024 -DDataNodeMaxDirectMemorySize=768 \ + -DClusterConfigurations=${{ matrix.cluster1 }},${{ matrix.cluster2 }} \ + -pl integration-test \ + -am -PMultiClusterIT2SubscriptionRegressionMisc \ + -ntp >> ~/run-tests-$attempt.log && return 0 + test_output=$(cat ~/run-tests-$attempt.log) + + echo "==================== BEGIN: ~/run-tests-$attempt.log ====================" + echo "$test_output" + echo "==================== END: ~/run-tests-$attempt.log ======================" + + if ! mv ~/run-tests-$attempt.log integration-test/target/cluster-logs/ 2>/dev/null; then + echo "Failed to move log file ~/run-tests-$attempt.log to integration-test/target/cluster-logs/. Skipping..." + fi + + if echo "$test_output" | grep -q "Could not transfer artifact"; then + if [ $attempt -lt $max_attempts ]; then + echo "Test failed with artifact transfer issue, attempt $attempt. Retrying in $retry_sleep seconds..." + sleep $retry_sleep + attempt=$((attempt + 1)) + else + echo "Test failed after $max_attempts attempts due to artifact transfer issue." + echo "Treating this as a success because the issue is likely transient." + return 0 + fi + elif [ $? -ne 0 ]; then + echo "Test failed with a different error." + return 1 + else + echo "Tests passed" + return 0 + fi + done + } + retry + - name: Upload Artifact + if: failure() + uses: actions/upload-artifact@v4 + with: + name: cluster-log-subscription-regression-misc-java${{ matrix.java }}-${{ runner.os }}-${{ matrix.cluster1 }}-${{ matrix.cluster2 }} + path: integration-test/target/cluster-logs + retention-days: 30 diff --git a/.github/workflows/daily-ut.yml b/.github/workflows/daily-ut.yml new file mode 100644 index 0000000000000..b8ff46ca24fb8 --- /dev/null +++ b/.github/workflows/daily-ut.yml @@ -0,0 +1,57 @@ +name: Daily UT + +on: + schedule: + # Run at UTC 19:00 every day (CST 03:00 AM) + - cron: '0 19 * * *' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +env: + MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 + MAVEN_ARGS: --batch-mode --no-transfer-progress + DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + +jobs: + unit-test: + strategy: + fail-fast: false + max-parallel: 15 + matrix: + java: [ 8 ] + os: [ ubuntu-latest, windows-latest ] + it_task: [ 'others', 'datanode' ] + include: + - java: 17 + os: macos-latest + it_task: 'datanode' + - java: 17 + os: macos-latest + it_task: 'others' + runs-on: ${{ matrix.os }} + + steps: + - uses: actions/checkout@v4 + - name: Set up JDK ${{ matrix.java }} + uses: actions/setup-java@v4 + with: + distribution: corretto + java-version: ${{ matrix.java }} + - name: Cache Maven packages + uses: actions/cache@v4 + with: + path: ~/.m2 + key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} + restore-keys: ${{ runner.os }}-m2- + - name: Test Datanode Module with Maven + shell: bash + if: ${{ matrix.it_task == 'datanode'}} + run: mvn clean integration-test -Dtest.port.closed=true -pl iotdb-core/datanode -am -DskipTests -Diotdb.test.only=true + - name: Test Other Modules with Maven + shell: bash + if: ${{ matrix.it_task == 'others'}} + run: | + mvn clean install -DskipTests + mvn -P get-jar-with-dependencies,with-integration-tests clean test -Dtest.port.closed=true -Diotdb.test.skip=true diff --git a/.github/workflows/multi-language-client.yml b/.github/workflows/multi-language-client.yml index 4d323e9bb412d..b1374ae2795e5 100644 --- a/.github/workflows/multi-language-client.yml +++ b/.github/workflows/multi-language-client.yml @@ -3,17 +3,31 @@ on: push: branches: - master - - "rel/*" - paths-ignore: - - 'docs/**' - - 'site/**' + - "rc/*" + - 'dev/*' + paths: + - 'pom.xml' + - 'iotdb-client/pom.xml' + - 'iotdb-client/client-py/**' + - 'iotdb-client/client-cpp/**' + - 'example/client-cpp-example/**' + - 'iotdb-protocol/thrift-datanode/src/main/thrift/client.thrift' + - 'iotdb-protocol/thrift-commons/src/main/thrift/common.thrift' + - '.github/workflows/multi-language-client.yml' pull_request: branches: - master - - "rel/*" - paths-ignore: - - 'docs/**' - - 'site/**' + - "rc/*" + - 'dev/*' + paths: + - 'pom.xml' + - 'iotdb-client/pom.xml' + - 'iotdb-client/client-py/**' + - 'iotdb-client/client-cpp/**' + - 'example/client-cpp-example/**' + - 'iotdb-protocol/thrift-datanode/src/main/thrift/client.thrift' + - 'iotdb-protocol/thrift-commons/src/main/thrift/common.thrift' + - '.github/workflows/multi-language-client.yml' # allow manually run the action: workflow_dispatch: @@ -44,11 +58,16 @@ jobs: sudo apt-get update sudo apt-get install libboost-all-dev - name: Install CPP Dependencies (Mac) + # remove some xcode to release disk space if: matrix.os == 'macos-latest' shell: bash run: | brew install boost - brew install bison + sudo rm -rf /Applications/Xcode_14.3.1.app + sudo rm -rf /Applications/Xcode_15.0.1.app + sudo rm -rf /Applications/Xcode_15.1.app + sudo rm -rf /Applications/Xcode_15.2.app + sudo rm -rf /Applications/Xcode_15.3.app - name: Install CPP Dependencies (Windows) if: matrix.os == 'windows-latest' run: | @@ -60,7 +79,7 @@ jobs: uses: actions/cache@v4 with: path: ~/.m2 - key: client-${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} + key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} restore-keys: ${{ runner.os }}-m2- - name: Build IoTDB server shell: bash @@ -76,7 +95,7 @@ jobs: uses: actions/upload-artifact@v4 with: name: cpp-IT-${{ runner.os }} - path: iotdb-client/client-cpp/target/build/test/Testing + path: distribution/target/apache-iotdb-*-all-bin/apache-iotdb-*-all-bin/logs retention-days: 1 go: @@ -100,7 +119,8 @@ jobs: run: | cd iotdb-client git clone https://github.com/apache/iotdb-client-go.git - cd iotdb-client-go + cd iotdb-client-go + git checkout rc/1.3.3 make e2e_test_for_parent_git_repo e2e_test_clean_for_parent_git_repo python: @@ -112,6 +132,9 @@ jobs: runs-on: ${{ (matrix.python == '3.6' && 'ubuntu-20.04') || 'ubuntu-latest' }} steps: + - uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} - uses: actions/checkout@v4 - name: Cache Maven packages uses: actions/cache@v4 @@ -119,24 +142,26 @@ jobs: path: ~/.m2 key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} restore-keys: ${{ runner.os }}-m2- + - name: Cache pip packages + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }} + restore-keys: ${{ runner.os }}-pip- - name: Build IoTDB server distribution zip and python client run: mvn -B clean install -pl distribution,iotdb-client/client-py -am -DskipTests - name: Build IoTDB server docker image run: | docker build . -f docker/src/main/Dockerfile-1c1d -t "iotdb:dev" docker images - - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python }} - name: Install IoTDB python client requirements run: pip3 install -r iotdb-client/client-py/requirements_dev.txt - name: Check code style if: ${{ matrix.python == '3.x'}} shell: bash run: black iotdb-client/client-py/ --check --diff - - name: Integration test + - name: Integration test and test make package shell: bash run: | cd iotdb-client/client-py/ && pytest . - - + ./release.sh diff --git a/.github/workflows/pipe-it-2cluster.yml b/.github/workflows/pipe-it-2cluster.yml index 676fae32d5139..2f999b0fd9e20 100644 --- a/.github/workflows/pipe-it-2cluster.yml +++ b/.github/workflows/pipe-it-2cluster.yml @@ -5,16 +5,22 @@ on: branches: - master - 'rel/1.*' + - 'rc/1.*' + - 'dev/1.*' paths-ignore: - 'docs/**' - 'site/**' + - 'iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/**' #queryengine pull_request: branches: - master - 'rel/1.*' + - 'rc/1.*' + - 'dev/1.*' paths-ignore: - 'docs/**' - 'site/**' + - 'iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/**' #queryengine # allow manually run the action: workflow_dispatch: @@ -35,7 +41,7 @@ jobs: matrix: java: [17] # StrongConsistencyClusterMode is ignored now because RatisConsensus has not been supported yet. - cluster: [LightWeightStandaloneMode, ScalableSingleNodeMode, HighPerformanceMode] + cluster: [HighPerformanceMode] os: [ ubuntu-latest ] runs-on: ${{ matrix.os }} steps: @@ -45,18 +51,67 @@ jobs: with: distribution: liberica java-version: ${{ matrix.java }} + - name: Cache Maven packages + uses: actions/cache@v4 + with: + path: ~/.m2 + key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} + restore-keys: ${{ runner.os }}-m2- + - name: Sleep for a random duration between 0 and 10000 milliseconds + run: | + sleep $(( $(( RANDOM % 10000 + 1 )) / 1000)) - name: IT Test shell: bash # we do not compile client-cpp for saving time, it is tested in client.yml # we can skip influxdb-protocol because it has been tested separately in influxdb-protocol.yml run: | - mvn clean verify \ - -P with-integration-tests \ - -DskipUTs \ - -DintegrationTest.forkCount=1 -DConfigNodeMaxHeapSize=256 -DDataNodeMaxHeapSize=1024 -DDataNodeMaxDirectMemorySize=768 \ - -DClusterConfigurations=${{ matrix.cluster }},${{ matrix.cluster }} \ - -pl integration-test \ - -am -PMultiClusterIT2AutoCreateSchema + retry() { + local -i max_attempts=3 + local -i attempt=1 + local -i retry_sleep=5 + local test_output + + while [ $attempt -le $max_attempts ]; do + mvn clean verify \ + -P with-integration-tests \ + -DskipUTs \ + -DintegrationTest.forkCount=1 -DConfigNodeMaxHeapSize=256 -DDataNodeMaxHeapSize=1024 -DDataNodeMaxDirectMemorySize=768 \ + -DClusterConfigurations=${{ matrix.cluster }},${{ matrix.cluster }} \ + -pl integration-test \ + -am -PMultiClusterIT2AutoCreateSchema \ + -ntp >> ~/run-tests-$attempt.log && return 0 + test_output=$(cat ~/run-tests-$attempt.log) + + mv ~/run-tests-$attempt.log integration-test/target/cluster-logs/ + + echo "==================== BEGIN: ~/run-tests-$attempt.log ====================" + echo "$test_output" + echo "==================== END: ~/run-tests-$attempt.log ======================" + + if ! mv ~/run-tests-$attempt.log integration-test/target/cluster-logs/ 2>/dev/null; then + echo "Failed to move log file ~/run-tests-$attempt.log to integration-test/target/cluster-logs/. Skipping..." + fi + + if echo "$test_output" | grep -q "Could not transfer artifact"; then + if [ $attempt -lt $max_attempts ]; then + echo "Test failed with artifact transfer issue, attempt $attempt. Retrying in $retry_sleep seconds..." + sleep $retry_sleep + attempt=$((attempt + 1)) + else + echo "Test failed after $max_attempts attempts due to artifact transfer issue." + echo "Treating this as a success because the issue is likely transient." + return 0 + fi + elif [ $? -ne 0 ]; then + echo "Test failed with a different error." + return 1 + else + echo "Tests passed" + return 0 + fi + done + } + retry - name: Upload Artifact if: failure() uses: actions/upload-artifact@v4 @@ -71,8 +126,8 @@ jobs: matrix: java: [17] # StrongConsistencyClusterMode is ignored now because RatisConsensus has not been supported yet. - cluster1: [LightWeightStandaloneMode, ScalableSingleNodeMode, HighPerformanceMode] - cluster2: [LightWeightStandaloneMode, ScalableSingleNodeMode, HighPerformanceMode] + cluster1: [HighPerformanceMode] + cluster2: [HighPerformanceMode] os: [ ubuntu-latest ] runs-on: ${{ matrix.os }} steps: @@ -82,18 +137,65 @@ jobs: with: distribution: liberica java-version: ${{ matrix.java }} + - name: Cache Maven packages + uses: actions/cache@v4 + with: + path: ~/.m2 + key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} + restore-keys: ${{ runner.os }}-m2- + - name: Sleep for a random duration between 0 and 10000 milliseconds + run: | + sleep $(( $(( RANDOM % 10000 + 1 )) / 1000)) - name: IT Test shell: bash # we do not compile client-cpp for saving time, it is tested in client.yml # we can skip influxdb-protocol because it has been tested separately in influxdb-protocol.yml run: | - mvn clean verify \ - -P with-integration-tests \ - -DskipUTs \ - -DintegrationTest.forkCount=1 -DConfigNodeMaxHeapSize=256 -DDataNodeMaxHeapSize=1024 -DDataNodeMaxDirectMemorySize=768 \ - -DClusterConfigurations=${{ matrix.cluster1 }},${{ matrix.cluster2 }} \ - -pl integration-test \ - -am -PMultiClusterIT2ManualCreateSchema + retry() { + local -i max_attempts=3 + local -i attempt=1 + local -i retry_sleep=5 + local test_output + + while [ $attempt -le $max_attempts ]; do + mvn clean verify \ + -P with-integration-tests \ + -DskipUTs \ + -DintegrationTest.forkCount=1 -DConfigNodeMaxHeapSize=256 -DDataNodeMaxHeapSize=1024 -DDataNodeMaxDirectMemorySize=768 \ + -DClusterConfigurations=${{ matrix.cluster1 }},${{ matrix.cluster2 }} \ + -pl integration-test \ + -am -PMultiClusterIT2ManualCreateSchema \ + -ntp >> ~/run-tests-$attempt.log && return 0 + test_output=$(cat ~/run-tests-$attempt.log) + + echo "==================== BEGIN: ~/run-tests-$attempt.log ====================" + echo "$test_output" + echo "==================== END: ~/run-tests-$attempt.log ======================" + + if ! mv ~/run-tests-$attempt.log integration-test/target/cluster-logs/ 2>/dev/null; then + echo "Failed to move log file ~/run-tests-$attempt.log to integration-test/target/cluster-logs/. Skipping..." + fi + + if echo "$test_output" | grep -q "Could not transfer artifact"; then + if [ $attempt -lt $max_attempts ]; then + echo "Test failed with artifact transfer issue, attempt $attempt. Retrying in $retry_sleep seconds..." + sleep $retry_sleep + attempt=$((attempt + 1)) + else + echo "Test failed after $max_attempts attempts due to artifact transfer issue." + echo "Treating this as a success because the issue is likely transient." + return 0 + fi + elif [ $? -ne 0 ]; then + echo "Test failed with a different error." + return 1 + else + echo "Tests passed" + return 0 + fi + done + } + retry - name: Upload Artifact if: failure() uses: actions/upload-artifact@v4 @@ -101,14 +203,14 @@ jobs: name: cluster-log-manual-create-schema-java${{ matrix.java }}-${{ runner.os }}-${{ matrix.cluster1 }}-${{ matrix.cluster2 }} path: integration-test/target/cluster-logs retention-days: 30 - subscription: + subscription-arch-verification: strategy: fail-fast: false max-parallel: 15 matrix: java: [ 17 ] # StrongConsistencyClusterMode is ignored now because RatisConsensus has not been supported yet. - cluster1: [ LightWeightStandaloneMode, ScalableSingleNodeMode, HighPerformanceMode ] + cluster1: [ ScalableSingleNodeMode ] cluster2: [ ScalableSingleNodeMode ] os: [ ubuntu-latest ] runs-on: ${{ matrix.os }} @@ -119,18 +221,65 @@ jobs: with: distribution: liberica java-version: ${{ matrix.java }} + - name: Cache Maven packages + uses: actions/cache@v4 + with: + path: ~/.m2 + key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} + restore-keys: ${{ runner.os }}-m2- + - name: Sleep for a random duration between 0 and 10000 milliseconds + run: | + sleep $(( $(( RANDOM % 10000 + 1 )) / 1000)) - name: IT Test shell: bash # we do not compile client-cpp for saving time, it is tested in client.yml # we can skip influxdb-protocol because it has been tested separately in influxdb-protocol.yml run: | - mvn clean verify \ - -P with-integration-tests \ - -DskipUTs \ - -DintegrationTest.forkCount=1 -DConfigNodeMaxHeapSize=256 -DDataNodeMaxHeapSize=1024 -DDataNodeMaxDirectMemorySize=768 \ - -DClusterConfigurations=${{ matrix.cluster1 }},${{ matrix.cluster2 }} \ - -pl integration-test \ - -am -PMultiClusterIT2Subscription + retry() { + local -i max_attempts=3 + local -i attempt=1 + local -i retry_sleep=5 + local test_output + + while [ $attempt -le $max_attempts ]; do + mvn clean verify \ + -P with-integration-tests \ + -DskipUTs \ + -DintegrationTest.forkCount=1 -DConfigNodeMaxHeapSize=256 -DDataNodeMaxHeapSize=1024 -DDataNodeMaxDirectMemorySize=768 \ + -DClusterConfigurations=${{ matrix.cluster1 }},${{ matrix.cluster2 }} \ + -pl integration-test \ + -am -PMultiClusterIT2SubscriptionArchVerification \ + -ntp >> ~/run-tests-$attempt.log && return 0 + test_output=$(cat ~/run-tests-$attempt.log) + + echo "==================== BEGIN: ~/run-tests-$attempt.log ====================" + echo "$test_output" + echo "==================== END: ~/run-tests-$attempt.log ======================" + + if ! mv ~/run-tests-$attempt.log integration-test/target/cluster-logs/ 2>/dev/null; then + echo "Failed to move log file ~/run-tests-$attempt.log to integration-test/target/cluster-logs/. Skipping..." + fi + + if echo "$test_output" | grep -q "Could not transfer artifact"; then + if [ $attempt -lt $max_attempts ]; then + echo "Test failed with artifact transfer issue, attempt $attempt. Retrying in $retry_sleep seconds..." + sleep $retry_sleep + attempt=$((attempt + 1)) + else + echo "Test failed after $max_attempts attempts due to artifact transfer issue." + echo "Treating this as a success because the issue is likely transient." + return 0 + fi + elif [ $? -ne 0 ]; then + echo "Test failed with a different error." + return 1 + else + echo "Tests passed" + return 0 + fi + done + } + retry - name: Upload Artifact if: failure() uses: actions/upload-artifact@v4 @@ -138,3 +287,171 @@ jobs: name: cluster-log-subscription-java${{ matrix.java }}-${{ runner.os }}-${{ matrix.cluster1 }}-${{ matrix.cluster2 }} path: integration-test/target/cluster-logs retention-days: 30 + subscription-regression-consumer: + strategy: + fail-fast: false + max-parallel: 15 + matrix: + java: [ 17 ] + # do not use HighPerformanceMode here, otherwise some tests will cause the GH runner to receive a shutdown signal + cluster1: [ ScalableSingleNodeMode ] + cluster2: [ ScalableSingleNodeMode ] + os: [ ubuntu-latest ] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v4 + - name: Set up JDK ${{ matrix.java }} + uses: actions/setup-java@v4 + with: + distribution: liberica + java-version: ${{ matrix.java }} + - name: Cache Maven packages + uses: actions/cache@v4 + with: + path: ~/.m2 + key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} + restore-keys: ${{ runner.os }}-m2- + - name: Sleep for a random duration between 0 and 10000 milliseconds + run: | + sleep $(( $(( RANDOM % 10000 + 1 )) / 1000)) + - name: IT Test + shell: bash + # we do not compile client-cpp for saving time, it is tested in client.yml + # we can skip influxdb-protocol because it has been tested separately in influxdb-protocol.yml + run: | + retry() { + local -i max_attempts=3 + local -i attempt=1 + local -i retry_sleep=5 + local test_output + + while [ $attempt -le $max_attempts ]; do + mvn clean verify \ + -P with-integration-tests \ + -DskipUTs \ + -DintegrationTest.forkCount=1 -DConfigNodeMaxHeapSize=256 -DDataNodeMaxHeapSize=1024 -DDataNodeMaxDirectMemorySize=768 \ + -DClusterConfigurations=${{ matrix.cluster1 }},${{ matrix.cluster2 }} \ + -pl integration-test \ + -am -PMultiClusterIT2SubscriptionRegressionConsumer \ + -ntp >> ~/run-tests-$attempt.log && return 0 + test_output=$(cat ~/run-tests-$attempt.log) + + echo "==================== BEGIN: ~/run-tests-$attempt.log ====================" + echo "$test_output" + echo "==================== END: ~/run-tests-$attempt.log ======================" + + if ! mv ~/run-tests-$attempt.log integration-test/target/cluster-logs/ 2>/dev/null; then + echo "Failed to move log file ~/run-tests-$attempt.log to integration-test/target/cluster-logs/. Skipping..." + fi + + if echo "$test_output" | grep -q "Could not transfer artifact"; then + if [ $attempt -lt $max_attempts ]; then + echo "Test failed with artifact transfer issue, attempt $attempt. Retrying in $retry_sleep seconds..." + sleep $retry_sleep + attempt=$((attempt + 1)) + else + echo "Test failed after $max_attempts attempts due to artifact transfer issue." + echo "Treating this as a success because the issue is likely transient." + return 0 + fi + elif [ $? -ne 0 ]; then + echo "Test failed with a different error." + return 1 + else + echo "Tests passed" + return 0 + fi + done + } + retry + - name: Upload Artifact + if: failure() + uses: actions/upload-artifact@v4 + with: + name: cluster-log-subscription-regression-consumer-java${{ matrix.java }}-${{ runner.os }}-${{ matrix.cluster1 }}-${{ matrix.cluster2 }} + path: integration-test/target/cluster-logs + retention-days: 30 + subscription-regression-misc: + strategy: + fail-fast: false + max-parallel: 15 + matrix: + java: [ 17 ] + # do not use HighPerformanceMode here, otherwise some tests will cause the GH runner to receive a shutdown signal + cluster1: [ ScalableSingleNodeMode ] + cluster2: [ ScalableSingleNodeMode ] + os: [ ubuntu-latest ] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v4 + - name: Set up JDK ${{ matrix.java }} + uses: actions/setup-java@v4 + with: + distribution: liberica + java-version: ${{ matrix.java }} + - name: Cache Maven packages + uses: actions/cache@v4 + with: + path: ~/.m2 + key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} + restore-keys: ${{ runner.os }}-m2- + - name: Sleep for a random duration between 0 and 10000 milliseconds + run: | + sleep $(( $(( RANDOM % 10000 + 1 )) / 1000)) + - name: IT Test + shell: bash + # we do not compile client-cpp for saving time, it is tested in client.yml + # we can skip influxdb-protocol because it has been tested separately in influxdb-protocol.yml + run: | + retry() { + local -i max_attempts=3 + local -i attempt=1 + local -i retry_sleep=5 + local test_output + + while [ $attempt -le $max_attempts ]; do + mvn clean verify \ + -P with-integration-tests \ + -DskipUTs \ + -DintegrationTest.forkCount=1 -DConfigNodeMaxHeapSize=256 -DDataNodeMaxHeapSize=1024 -DDataNodeMaxDirectMemorySize=768 \ + -DClusterConfigurations=${{ matrix.cluster1 }},${{ matrix.cluster2 }} \ + -pl integration-test \ + -am -PMultiClusterIT2SubscriptionRegressionMisc \ + -ntp >> ~/run-tests-$attempt.log && return 0 + test_output=$(cat ~/run-tests-$attempt.log) + + echo "==================== BEGIN: ~/run-tests-$attempt.log ====================" + echo "$test_output" + echo "==================== END: ~/run-tests-$attempt.log ======================" + + if ! mv ~/run-tests-$attempt.log integration-test/target/cluster-logs/ 2>/dev/null; then + echo "Failed to move log file ~/run-tests-$attempt.log to integration-test/target/cluster-logs/. Skipping..." + fi + + if echo "$test_output" | grep -q "Could not transfer artifact"; then + if [ $attempt -lt $max_attempts ]; then + echo "Test failed with artifact transfer issue, attempt $attempt. Retrying in $retry_sleep seconds..." + sleep $retry_sleep + attempt=$((attempt + 1)) + else + echo "Test failed after $max_attempts attempts due to artifact transfer issue." + echo "Treating this as a success because the issue is likely transient." + return 0 + fi + elif [ $? -ne 0 ]; then + echo "Test failed with a different error." + return 1 + else + echo "Tests passed" + return 0 + fi + done + } + retry + - name: Upload Artifact + if: failure() + uses: actions/upload-artifact@v4 + with: + name: cluster-log-subscription-regression-misc-java${{ matrix.java }}-${{ runner.os }}-${{ matrix.cluster1 }}-${{ matrix.cluster2 }} + path: integration-test/target/cluster-logs + retention-days: 30 diff --git a/.github/workflows/sonar-codecov.yml b/.github/workflows/sonar-codecov.yml index 1c5c4827401b9..234a7caff5bd1 100644 --- a/.github/workflows/sonar-codecov.yml +++ b/.github/workflows/sonar-codecov.yml @@ -8,6 +8,8 @@ on: branches: - master - "rel/*" + - "rc/*" + - 'dev/1.*' paths-ignore: - "docs/**" - 'site/**' @@ -16,6 +18,8 @@ on: - master - "rel/*" - "new_*" + - "rc/*" + - 'dev/1.*' paths-ignore: - "docs/**" - 'site/**' diff --git a/.github/workflows/todos-check.yml b/.github/workflows/todos-check.yml new file mode 100644 index 0000000000000..4ab48c9e754cf --- /dev/null +++ b/.github/workflows/todos-check.yml @@ -0,0 +1,54 @@ +name: Check TODOs and FIXMEs in Changed Files + +on: + pull_request: + branches: + - master + - 'dev/*' + - 'rel/*' + - "rc/*" + - 'force_ci/**' + paths-ignore: + - 'docs/**' + - 'site/**' + # allow manually run the action: + workflow_dispatch: + +jobs: + todo-check: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Check for TODOs and FIXMEs in changed files + run: | + # Fetch the target branch + git fetch origin $GITHUB_BASE_REF + + git switch -c check_branch + + # Get the diff of the changes + echo Get the diff of the changes + DIFF=$(git diff origin/$GITHUB_BASE_REF check_branch -- . ':(exclude).github/workflows/todos-check.yml') + + if [ -z "$DIFF" ]; then + echo "No changes detected." + exit 0 + fi + + + # Check the diff for TODOs + + # Check the diff for TODOs + echo Check the diff for TODOs + TODOsCOUNT=$(echo "$DIFF" | grep -E '^\+.*(TODO|FIXME)' | wc -l) + if [ "$TODOsCOUNT" -eq 0 ]; then + echo "No TODOs or FIXMEs found in changed content."; + exit 0 + fi + + echo "TODO or FIXME found in the changes. Please resolve it before merging." + echo "$DIFF" | grep -E '^\+.*(TODO|FIXME)' | tee -a output.log + exit 1 \ No newline at end of file diff --git a/.github/workflows/unit-test.yml b/.github/workflows/unit-test.yml index 2379c66efe824..fbb54fcb41821 100644 --- a/.github/workflows/unit-test.yml +++ b/.github/workflows/unit-test.yml @@ -8,7 +8,8 @@ on: branches: - master - 'rel/*' - - pipe-meta-sync + - "rc/*" + - 'dev/*' paths-ignore: - 'docs/**' - 'site/**' @@ -16,7 +17,8 @@ on: branches: - master - 'rel/*' - - pipe-meta-sync + - "rc/*" + - 'dev/*' paths-ignore: - 'docs/**' - 'site/**' @@ -38,16 +40,9 @@ jobs: fail-fast: false max-parallel: 15 matrix: - java: [ 8, 17 ] + java: [ 17 ] os: [ ubuntu-latest, windows-latest ] it_task: [ 'others', 'datanode' ] - include: - - java: 17 - os: macos-latest - it_task: 'datanode' - - java: 17 - os: macos-latest - it_task: 'others' runs-on: ${{ matrix.os }} steps: diff --git a/LICENSE b/LICENSE index 9a30cbd72803b..6e1d4e59d3d95 100644 --- a/LICENSE +++ b/LICENSE @@ -204,88 +204,44 @@ APACHE IOTDB SUBCOMPONENTS -------------------------------------------------------------------------------- -The following class is copied from maven-wrapper (https://github.com/takari/maven-wrapper), -which is under Apache License 2.0: - -mvnw files from https://github.com/apache/maven-wrapper Apache 2.0 - --------------------------------------------------------------------------------- - -The following class is modified from Apache commons-collections - -./iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/utils/Murmur128Hash.java -Relevant pr is: https://github.com/apache/commons-collections/pull/83/ - --------------------------------------------------------------------------------- - -The following files include code modified from Michael Burman's gorilla-tsc project. - -./iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/encoder/GorillaEncoderV2.java -./iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/encoder/IntGorillaEncoder.java -./iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/encoder/LongGorillaEncoder.java -./iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/encoder/SinglePrecisionEncoderV2.java -./iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/encoder/DoublePrecisionEncoderV2.java -./iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/GorillaDecoderV2.java -./iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/IntGorillaDecoder.java -./iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/LongGorillaDecoder.java -./iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/SinglePrecisionDecoderV2.java -./iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/DoublePrecisionDecoderV2.java - -Copyright: 2016-2018 Michael Burman and/or other contributors -Project page: https://github.com/burmanm/gorilla-tsc -License: http://www.apache.org/licenses/LICENSE-2.0 - --------------------------------------------------------------------------------- - -The following files include code modified from Panagiotis Liakos, Katia Papakonstantinopoulou and Yannis Kotidis chimp project. - -./tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/DoublePrecisionChimpDecoder.java -./tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/IntChimpDecoder.java -./tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/LongChimpDecoder.java -./tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/SinglePrecisionChimpDecoder.java -./tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/encoder/DoublePrecisionChimpEncoder.java -./tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/encoder/IntChimpEncoder.java -./tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/encoder/LongChimpEncoder.java -./tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/encoder/SinglePrecisionChimpEncoder.java - -Copyright: 2022- Panagiotis Liakos, Katia Papakonstantinopoulou and Yannis Kotidis -Project page: https://github.com/panagiotisl/chimp -License: http://www.apache.org/licenses/LICENSE-2.0 - --------------------------------------------------------------------------------- - The following files include code modified from Apache HBase project. -./iotdb-core/confignode/src/main/java/org/apache/iotdb/procedure/Procedure.java -./iotdb-core/confignode/src/main/java/org/apache/iotdb/procedure/ProcedureExecutor.java -./iotdb-core/confignode/src/main/java/org/apache/iotdb/procedure/StateMachineProcedure.java -./iotdb-core/confignode/src/main/java/org/apache/iotdb/procedure/TimeoutExecutorThread.java -./iotdb-core/confignode/src/main/java/org/apache/iotdb/procedure/StoppableThread.java +./iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/Procedure.java +./iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/ProcedureExecutor.java +./iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/StateMachineProcedure.java +./iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/TimeoutExecutorThread.java +./iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/StoppableThread.java Project page: https://github.com/apache/hbase -License: http://www.apache.org/licenses/LICENSE-2.0 +License: https://github.com/apache/hbase/blob/master/LICENSE.txt -------------------------------------------------------------------------------- -The following files include code modified from Eclipse Collections project. +The following files include code modified from Largest-Triangle downsampling algorithm implementations for Java8 project, +which is under Apache License 2.0: -./tsfile/src/main/java/org/apache/iotdb/tsfile/utils/ByteArrayList.java +./library-udf/src/main/java/org/apache/iotdb/library/dprofile/util/Area.java +./library-udf/src/main/java/org/apache/iotdb/library/dprofile/util/Bucket.java +./library-udf/src/main/java/org/apache/iotdb/library/dprofile/util/LTThreeBuckets.java +./library-udf/src/main/java/org/apache/iotdb/library/dprofile/util/OnePassBucketizer.java +./library-udf/src/main/java/org/apache/iotdb/library/dprofile/util/SlidingCollector.java +./library-udf/src/main/java/org/apache/iotdb/library/dprofile/util/Triangle.java -Copyright: 2021 Goldman Sachs -Project page: https://www.eclipse.org/collections -License: https://github.com/eclipse/eclipse-collections/blob/master/LICENSE-EDL-1.0.txt +Copyright: 2016 Guillermo Gutierrez Almazor +Project page: https://github.com/ggalmazor/lt_downsampling_java8 +License: https://github.com/ggalmazor/lt_downsampling_java8/blob/main/LICENSE -------------------------------------------------------------------------------- The following files include code modified from Micrometer project. -./iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmClassLoaderMetrics -./iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmCompileMetrics -./iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmGcMetrics -./iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmMemoryMetrics -./iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmThreadMetrics -./iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/logback/LogbackMetrics -./iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/utils/JvmUtils +./iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/metricsets/jvm/JvmClassLoaderMetrics.java +./iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/metricsets/jvm/JvmCompileMetrics.java +./iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/metricsets/jvm/JvmGcMetrics.java +./iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/metricsets/jvm/JvmMemoryMetrics.java +./iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/metricsets/jvm/JvmThreadMetrics.java +./iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/metricsets/JvmUtils.java +./iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/logback/logback/LogbackMetrics.java Copyright: 2017 VMware Project page: https://github.com/micrometer-metrics/micrometer @@ -293,8 +249,7 @@ License: https://github.com/micrometer-metrics/micrometer/blob/main/LICENSE -------------------------------------------------------------------------------- -The following files include code modified from Trino project(https://github.com/trinodb/trino), -which is under Apache License 2.0: +The following files include code modified from Trino project. ./iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/QueryState.java ./iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/StateMachine.java @@ -302,5 +257,16 @@ which is under Apache License 2.0: ./iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceState.java ./iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceStateMachine.java ./iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/LocalExecutionPlanner.java -./iotdb-core/tsfile/src/main/java/org/apache/iotdb/tsfile/read/common/block/* +Trino is open source software licensed under the Apache License 2.0 and supported by the Trino Software Foundation. +Project page: https://github.com/trinodb/trino +License: https://github.com/trinodb/trino/blob/master/LICENSE + +-------------------------------------------------------------------------------- + +The following files include code modified from Apache Kafka project. + +./iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/util/PollTimer.java + +Project page: https://github.com/apache/kafka +License: https://github.com/apache/kafka/blob/trunk/LICENSE diff --git a/LICENSE-binary b/LICENSE-binary index 82b6baeea32f3..c088b00d7f3f5 100644 --- a/LICENSE-binary +++ b/LICENSE-binary @@ -215,96 +215,90 @@ following license. See licenses/ for text of these licenses. Apache Software Foundation License 2.0 -------------------------------------- -commons-cli:commons-cli:1.3.1 -commons-codec:commons-codec:1.13 +commons-cli:commons-cli:1.5.0 +commons-codec:commons-codec:1.16.1 org.apache.commons:commons-collections4:4.4 -commons-io:commons-io:2.5 -org.apache.commons:commons-lang3:3.8.1 -commons-lang:commons-lang:2.6 -com.nimbusds:content-type:2.0 -com.google.code.gson:gson:2.8.6 +commons-io:commons-io:2.14.0 +org.apache.commons:commons-lang3:3.13.0 +com.nimbusds:content-type:2.2 +com.google.code.gson:gson:2.10.1 com.google.guava.guava:32.1.2-jre -com.fasterxml.jackson.core:jackson-annotations:2.10.0 -com.fasterxml.jackson.core:jackson-core:2.10.0 -com.fasterxml.jackson.core:jackson-databind:2.10.0 -javax.inject:javax.inject:1 -net.jpountz.lz4:1.3.0 +com.fasterxml.jackson.core:jackson-annotations:2.15.4 +com.fasterxml.jackson.core:jackson-core:2.15.4 +com.fasterxml.jackson.core:jackson-databind:2.15.4 +jakarta.inject:jakarta.inject:2.6.1 +org.lz4:lz4-java:1.8.0 com.github.stephenc.jcip:jcip-annotations:1.0-1 -com.github.ben-manes.caffeine:caffeine:2.9.1 -org.eclipse.jetty:jetty-http:9.4.24.v20191120 -org.eclipse.jetty:jetty-io:9.4.24.v20191120 -org.eclipse.jetty:jetty-security:9.4.24.v20191120 -org.eclipse.jetty:jetty-server:9.4.24.v20191120 -org.eclipse.jetty:jetty-servlet:9.4.24.v20191120 -org.eclipse.jetty:jetty-util:9.4.24.v20191120 -org.eclipse.jetty:jetty-webapp:9.4.24.v20191120 -org.eclipse.jetty:jetty-xml:9.4.24.v20191120 -io.jsonwebtoken:jjwt-api:0.10.7 -io.jsonwebtoken:jjwt-impl:0.10.7 -io.jsonwebtoken:jjwt-jackson:0.10.7 -net.minidev:json-smart:2.3 +com.github.ben-manes.caffeine:caffeine:2.9.3 +org.eclipse.jetty:jetty-http:9.4.56.v20240826 +org.eclipse.jetty:jetty-io:9.4.56.v20240826 +org.eclipse.jetty:jetty-security:9.4.56.v20240826 +org.eclipse.jetty:jetty-server:9.4.56.v20240826 +org.eclipse.jetty:jetty-servlet:9.4.56.v20240826 +org.eclipse.jetty:jetty-util:9.4.56.v20240826 +io.jsonwebtoken:jjwt-api:0.11.5 +io.jsonwebtoken:jjwt-impl:0.11.5 +io.jsonwebtoken:jjwt-jackson:0.11.5 +net.minidev:json-smart:2.5.0 com.google.code.findbugs:jsr305:3.0.2 -com.nimbusds:lang-tag:1.4.4 +com.nimbusds:lang-tag:1.7 com.librato.metrics:librato-java:2.1.0 org.apache.thrift:libthrift:0.14.1 -io.dropwizard.metrics:metrics-core:3.2.6 -io.dropwizard.metrics:metrics-json:3.2.6 -io.dropwizard.metrics:metrics-jvm:3.2.6 +io.dropwizard.metrics:metrics-core:4.2.19 +io.dropwizard.metrics:metrics-jvm:3.2.2 com.librato.metrics:metrics-librato:5.1.0 -de.fraunhofer.iosb.io.moquette:moquette-broker:0.14.3 -io.netty:netty-buffer:4.1.53.Final -io.netty:netty-codec:4.1.53.Final -io.netty:netty-codec-http:4.1.53.Final -io.netty:netty-codec-mqtt:4.1.53.Final -io.netty:netty-common:4.1.53.Final -io.netty:netty-handler:4.1.53.Final -io.netty:netty-resolver:4.1.53.Final -io.netty:netty-transport:4.1.53.Final -io.netty:netty-transport-native-epoll:4.1.53.Final:linux-x86_64 -io.netty:netty-transport-native-unix-common:4.1.53.Final -com.nimbusds:nimbus-jose-jwt:8.14.1 -com.nimbusds:oauth2-oidc-sdk:8.3 -org.osgi:org.osgi.core:6.0.0 -org.osgi:osgi.cmpn:6.0.0 -org.ops4j.pax.jdbc:pax-jdbc-common:1.4.5 -org.xerial.snappy:snappy-java:1.1.8.4 -io.airlift.airline:0.8 -net.minidev:accessors-smart:1.2 +de.fraunhofer.iosb.io.moquette:moquette-broker:0.17 +io.netty:netty-buffer:4.1.110.Final +io.netty:netty-codec:4.1.110.Final +io.netty:netty-codec-http:4.1.110.Final +io.netty:netty-codec-mqtt:4.1.110.Final +io.netty:netty-common:4.1.110.Final +io.netty:netty-handler:4.1.110.Final +io.netty:netty-resolver:4.1.110.Final +io.netty:netty-transport:4.1.110.Final +io.netty:netty-transport-native-epoll:4.1.110.Final:linux-x86_64 +io.netty:netty-transport-native-unix-common:4.1.110.Final +com.nimbusds:nimbus-jose-jwt:9.37.3 +com.nimbusds:oauth2-oidc-sdk:10.15 +org.osgi:org.osgi.core:7.0.0 +org.osgi:osgi.cmpn:7.0.0 +org.ops4j.pax.jdbc:pax-jdbc-common:1.5.6 +org.xerial.snappy:snappy-java:1.1.10.5 +io.airlift.airline:0.9 +net.minidev:accessors-smart:2.5.0 BSD 3-Clause ------------ -org.antlr:antlr-runtime:4.8-1 -org.ow2.asm:asm:5.0.4 -org.jline:jline:3.21.0 +org.antlr:antlr-runtime:4.9.3 +org.ow2.asm:asm:9.3 +org.jline:jline:3.26.2 BSD 2-Clause ------------ -com.github.luben:zstd-jni:1.5.4-2 +com.github.luben:zstd-jni:1.5.6-3 MIT License ------------ -org.slf4j:slf4j-api -me.tongfei:progressbar:0.7.3 -com.bugsnag:bugsnag:3.6.1 -org.slf4j:jcl-over-slf4j:1.7.25 +org.slf4j:slf4j-api:2.0.9 +com.bugsnag:bugsnag:3.7.2 EPL 1.0 ------------ -com.h2database:h2-mvstore:1.4.199 -ch.qos.logback:logback-classic:1.2.10 -ch.qos.logback:logback-core:1.2.10 +com.h2database:h2-mvstore:2.1.212 +ch.qos.logback:logback-classic:1.3.14 +ch.qos.logback:logback-core:1.3.14 CDDL 1.1 ------------ -javax.annotation:javax.annotation-api:1.3.2 -javax.servlet:javax.servlet-api:3.1.0 +jakarta.annotation:jakarta.annotation-api:1.3.5 +jakarta.servlet:jakarta.servlet-api:4.0.4 javax.xml.bind:jaxb-api:2.4.0-b180725.0427 -org.glassfish.jaxb:jaxb-runtime:2.4.0-b180725.0644 +org.glassfish.jaxb:jaxb-runtime:2.3.6 Public Domain diff --git a/README.md b/README.md index 6c5a2fc68b37f..d219e4d965733 100644 --- a/README.md +++ b/README.md @@ -171,7 +171,7 @@ Mainly on the ARM-based models: Building `Thrift` requires us to add two more dependencies to the picture. -This however is only needed when enabling the `compile-cpp` profile: +This however is only needed when enabling the `with-cpp` profile: brew install boost brew install bison @@ -293,7 +293,7 @@ After being built, the IoTDB cli is located at the folder "cli/target". ### Build Others -Use `-P compile-cpp` for compiling the cpp client. (For more details, read client-cpp's Readme file.) +Use `-P with-cpp` for compiling the cpp client. (For more details, read client-cpp's Readme file.) **NOTE: Directories "`thrift/target/generated-sources/thrift`", "`thrift-sync/target/generated-sources/thrift`", "`thrift-cluster/target/generated-sources/thrift`", "`thrift-influxdb/target/generated-sources/thrift`" diff --git a/README_ZH.md b/README_ZH.md index 8b56048fc96e9..cf25fccd901ca 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -172,7 +172,7 @@ git checkout rel/x.x ### 编译其他模块 -通过添加 `-P compile-cpp` 可以进行c++客户端API的编译。 +通过添加 `-P with-cpp` 可以进行c++客户端API的编译。 **注意:"`thrift/target/generated-sources/thrift`", "`thrift-sync/target/generated-sources/thrift`","`thrift-cluster/target/generated-sources/thrift`","`thrift-influxdb/target/generated-sources/thrift`" 和 "`antlr/target/generated-sources/antlr4`" 目录需要添加到源代码根中,以免在 IDE 中产生编译错误。** diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index 6cc03a0e67601..359af0cba5cc2 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -19,6 +19,369 @@ --> +# Apache IoTDB 1.3.3 + +## Features & Improvements + +- Storage Engine: Added new data types String, Blob, Date, and Timestamp. +- Storage Engine: Multi-level storage has added a rate-limiting mechanism. +- Storage Engine: New merge target file splitting feature, with additional configuration file parameters, and improved memory control performance of the merge module. +- Data Query: Filter performance optimization, enhancing the speed of aggregate queries and where condition queries. +- Data Query: New client query requests load balancing optimization. +- Data Query: New active metadata statistics query added. +- Data Query: Optimized memory control strategy during the query planning phase. +- Data Synchronization: The sender supports transferring files to a specified directory, and the receiver automatically loads them into IoTDB. +- Data Synchronization: The receiver has a new automatic conversion mechanism for data type requests. +- Data Synchronization: Enhanced observability on the receiver side, supporting ops/latency statistics for multiple internal interfaces, consolidated into a single pipeTransfer display. +- Data Loading: DataNode actively listens and loads TsFiles, with additional observability metrics. +- Stream Processing Module: New data subscription capability, supporting subscription to database data in the form of data points or tsfile files. +- Stream Processing Module: Alter Pipe supports the ability to alter the source. +- System Management: Optimized configuration files, with the original three configuration files merged into one, reducing user operational costs. +- System Management: Optimized restart recovery performance, reducing startup time. +- System Management: Internal addition of monitoring items such as device count, estimated remaining time for data synchronization, size of data to be synchronized, and synchronization speed. +- Scripts and Tools: The import-tsfile script is expanded to support running the script on a different server from the IoTDB server. +- Scripts and Tools: New metadata import and export scripts added. +- Scripts and Tools: New support for Kubernetes Helm added. +- AINode: AINode module added. + ... + +## Bugs + +- Fixed the issue of NullPointerException (NPE) when merging chunks with modifications and empty pages in the sequential space. +- Fixed the issue where the wrong parent file was used when reassigning the file position for skipped files during merge, leading to failure in creating hard links. +- Fixed the issue where the newly added four data types had null values written, and the TsFile handling of the STRING type was incorrect, causing a BufferUnderflowException: null. +- Fixed the issue in the high availability scenario where stopping the DataNode resulted in a PipeException: Failed to start consensus pipe. +- Fixed the issue in Stream mode where the first batch of written data points might require a flush to be synchronized. +- Fixed the compatibility issue with pipe plugin upgrades. +- Fixed the issue where the `ORDER BY` clause became ineffective when used in combination with `LIMIT` in the last query. + ... + +# Apache IoTDB 1.3.2 + +## Features & Improvements + +- Storage Module: Performance improvement in the insertRecords interface for writing +- Query Module: New Explain Analyze statement added (monitoring the time spent on each stage of a single SQL execution) +- Query Module: New UDAF (User-Defined Aggregate Function) framework added +- Query Module: New MaxBy/MinBy functions added, supporting the retrieval of maximum/minimum values along with the corresponding timestamps +- Query Module: Performance improvement in value filtering queries +- Data Synchronization: Path matching supports path pattern +- Data Synchronization: Supports metadata synchronization (including time series and related attributes, permissions, etc.) +- Stream Processing: Added Alter Pipe statement, supporting hot updates of plugins for Pipe tasks +- System Module: System data point count statistics now include statistics for data imported by loading TsFile +- Scripts and Tools: New local upgrade backup tool added (backing up original data through hard links) +- Scripts and Tools: New export-data/import-data scripts added, supporting data export in CSV, TsFile formats or SQL statements +- Scripts and Tools: Windows environment now supports distinguishing ConfigNode, DataNode, and Cli by window name + ... +- +## Bugs + +- Optimize the error message when a NullPointerException (NPE) occurs due to a timeout when dropping a database. +- Add logs for notifyLeaderReady, notifyLeaderChanged, and procedure worker. +- Add compatibility handling for existing erroneous data during file merging. +- Fix the deadlock issue caused by flushing empty files during querying. +- Fix the issue where Ratis becomes unresponsive during read, write, and delete operations. +- Fix the concurrent bug in load and merge operations. +- Fix the issue where the system's compression ratio is recorded as a negative number in the file for certain scenarios. +- Fix the ConcurrentModificationException issue during memory estimation for merge tasks. +- Fix potential deadlocks that may occur when writing, automatically creating, and deleting databases concurrently. + ... + +# Apache IoTDB 1.3.1 + +## Features & Improvements +- Add cluster script for one-click start-stop (start-all/stop-all.sh & start-all/stop-all.bat) +- Add script for one-click instance information collection (collect-info.sh & collect-info.bat) +- Add new statistical aggregators include stddev and variance +- Add repair tsfile data command +- Support setting timeout threshold for Fill clause. When time beyond the threshold, do not fill value. +- Simplify the time range specification for data synchronization, directly set start and end times +- Improved system observability (adding dispersion monitoring of cluster nodes, observability of distributed task scheduling framework) +- Optimized default log output strategy +- Enhance memory control for Load TsFile, covering the entire process +- Rest API (Version 2) adds column type return. +- Improve the process of query execution +- Session automatically fetch all available DataNodes + ... + +# Apache IoTDB 1.3.0 + +## Bugs + +- Fix issue with abnormal behavior when time precision is not in milliseconds during grouping by month. +- Fix issue with abnormal behavior when duration contains multiple units during grouping by month. +- Fix bug where limit and offset cannot be pushed down when there is an order by clause. +- Fix abnormal behavior in combination scenario of grouping by month + align by device + limit. +- Fix deserialization errors during IoT protocol synchronization. +- Fix concurrent exceptions in deleting timeseries. +- Fix issue where group by level in view sequences does not execute correctly. +- Fix potential issue of metadata creation failure when increasing election timeout + +## Features & Improvements + +- Optimize the permission module and support timeseries permission control +- Optimize heap and off-heap memory configuration in startup script +- Computed-type view timeseries support LAST queries +- Add pipe-related monitoring metrics +- Pipe rename 'Extractor' to 'Source' and 'Connector' to 'Sink' +- [IOTDB-6138] Support Negative timestamp +- [IOTDB-6193] Reject Node startup when loading configuration file failed +- [IOTDB-6194] Rename target_config_node_list to seed_config_node +- [IOTDB-6200] Change schema template to device template +- [IOTDB-6207] Add Write Point Metrics for load function +- [IOTDB-6208] Node error detection through broken thrift pipe +- [IOTDB-6217] When the number of time series reaches the upper limit, the prompt message should be changed to prioritize using device templates +- [IOTDB-6218] Rename storage_query_schema_consensus_free_memory_proportion to datanode_memory_proportion +- [IOTDB-6219] Fix the display problem of explain that the print result is not aligned +- [IOTDB-6220] Pipe: Add check logic to avoid self-transmission +- [IOTDB-6222] Optimize the performance of Python client +- [IOTDB-6230] Add HEAPDUMP configuration in datanode-env.sh +- [IOTDB-6231]SchemaCache supports precise eviction +- [IOTDB-6232] Adding SSL function to dn_rpc_port + +# Apache IoTDB 1.2.2 + +## Bugs + +- [IOTDB-6160] while using ` in target path, select into will throw error +- [IOTDB-6167] DataNode can't register to cluster when fetch system configuration throws NPE +- [IOTDB-6168] ConfigNode register retry logic does not worked +- [IOTDB-6171] NPE will be thrown while printing FI with debug on +- [IOTDB-6184] Merge Sort finishes one iterator too long +- [IOTDB-6191] Fix group by year not considering leap years +- [IOTDB-6226] Fix the problem of inaccurate GC monitor detection at the beginning and adjusting the alert threshold +- [IOTDB-6239] Show regions display error create time + +## Features & Improvements + +- [IOTDB-6029] Implementing flink-sql-iotdb-connector +- [IOTDB-6084] Pipe: support node-urls in connector-v1 +- [IOTDB-6103] Adding count_time aggregation feature +- [IOTDB-6112] Limit & Offset push down doesn't take effect while there exist time filter +- [IOTDB-6115] Limit & Offset push down doesn't take effect while there exist null value +- [IOTDB-6120] push down limit/offset in query with group by time +- [IOTDB-6129] ConfigNode restarts without relying on Seed-ConfigNode +- [IOTDB-6131] Iotdb rest service supports insertRecords function +- [IOTDB-6151] Move DataNode's system.properties to upper dir +- [IOTDB-6173] Change default encoder of INT32 and INT64 from RLE to TS_2DIFF +- Adjust the default thrift timeout parameter to 60s +- Accelerate the deletion execution + +## Bugs + +- [IOTDB-6064] Pipe: Fix deadlock in rolling back procedures concurrently +- [IOTDB-6081] Pipe: use HybridExtractor instead of LogExtractor when realtime mode is set to log to avoid OOM under heavy insertion load +- [IOTDB-6145] Pipe: can not release TsFile or WAL resource after pipe is dropped +- [IOTDB-6146] Pipe: can not transfer data after 1000+ pipes' creating and dropping +- [IOTDB-6082] Improve disk space metrics +- [IOTDB-6104] tmp directory won't be cleaned after udf query end +- [IOTDB-6119] Add ConfigNode leader service check +- [IOTDB-6125] Fix DataPartition allocation bug when insert big batch data +- [IOTDB-6127] Pipe: buffered events in processor stage can not be consumed by connector +- [IOTDB-6132] CrossSpaceCompaction: The estimated memory size is too large for cross space compaction task +- [IOTDB-6133] NullPointerException occurs in unsequence InnerSpaceCompactionTask +- [IOTDB-6148] Pipe: Fixed the bug that some uncommited progresses may be reported +- [IOTDB-6156] Fixed TConfiguration invalidly in Thrift AsyncServer For IoTConsensus +- [IOTDB-6164] Can create illegal path through rest api +- Fix datanode status is ReadOnly because the disk is full +- Fix DataPartition allocation bug when insert big batch +- Fix flush point statistics +- Fix SchemaFileSketchTool is not found +- Refactoring DeleteOutdatedFileTask in WalNode +- Add compression and encoding type check for FastCompactionPerformer +- Add lazy page reader for aligned page reader to avoid huge memory cost when reading rows of aligned timeseries +- Pipe: use PipeTaskCoordinatorLock instead of ReentrantLock for multi thread sync +- Pipe: fix pipe procedure stuck because of data node async request forever waiting for response +- Pipe: fix NPE when HybridProgressIndex.updateToMinimumIsAfterProgressIndex after system reboot (DR: SimpleConsensus) +- Pipe: fix pipe coordinator deadlock causing CN election timeout +- Pipe: Improve performance for 10000+ pipes +- RATIS-1873. Remove RetryCache assertion that doesn't hold + +# Apache IoTDB 1.2.1 + +## Features & Improvements + +- [IOTDB-5557] The metadata query results are inconsistent +- [IOTDB-5997] Improve efficiency of ConfigNode PartitionInfo loadSnapshot +- [IOTDB-6019] Fix concurrent update of last query +- [IOTDB-6036] The mods file is too large, causing Query very slow even OOM problem +- [IOTDB-6055] Enable auto restart of the pipes stopped by ConfigNode because of critical exception +- [IOTDB-6066] Add ConfigNode timeslot metric +- [IOTDB-6073] Add ClientManager metrics +- [IOTDB-6077] Add force stop +- [IOTDB-6079] Cluster computing resource balance +- [IOTDB-6082] Improve disk space metrics +- [IOTDB-6087] Implement stream interface of Mods read +- [IOTDB-6090] Add memory estimator on inner space compaction +- [IOTDB-6092] Factor mods files into memory estimates for cross-space compaction tasks +- [IOTDB-6093] Add multiple validation methods after compaction +- [IOTDB-6106] Fixed the timeout parameter not working in thrift asyncClient +- [IOTDB-6108] AlignedTVList memory calculation is imprecise +- +## Bugs + +- [IOTDB-5855] DataRegion leader Distribution is same as DataRegion Distribution +- [IOTDB-5860] Total Number of file is wrong +- [IOTDB-5996] Incorrect time display of show queries +- [IOTDB-6057] Resolve the compatibility from 1.1.x to 1.2.0 +- [IOTDB-6065] Considering LastCacheContainer in the memory estimation of SchemaCacheEntry +- [IOTDB-6074] Ignore error message when TagManager createSnapshot +- [IOTDB-6075] Pipe: File resource races when different tsfile load operations concurrently modify the same tsfile at receiver +- [IOTDB-6076] Add duplicate checking when upsert alias +- [IOTDB-6078] fix timeChunk default compressType +- [IOTDB-6089] Improve the lock behaviour of the pipe heartbeat +- [IOTDB-6091] Add compression and encoding type check for FastCompactionPerformer +- [IOTDB-6094] Load:Fix construct tsFileResource bug +- [IOTDB-6095] Tsfiles in sequence space may be overlap with each other due to LastFlushTime bug +- [IOTDB-6096] M4 will output zero while meeting null +- [IOTDB-6097] ipe subscription running with the pattern option may cause OOM +- [IOTDB-6098] Flush error when writing aligned timeseries +- [IOTDB-6100] Pipe: Fix running in hybrid mode will cause wal cannot be deleted & some pipe data lost due to wrong ProducerType of Disruptor +- [IOTDB-6105] Load: NPE when analyzing tsfile + +# Apache IoTDB 1.2.0 + +## New Feature + +* [IOTDB-5567] add SQL for querying seriesslotid and timeslotid +* [IOTDB-5631] Add a built-in aggregation functions named time_duration +* [IOTDB-5636] Add round as built-in scalar function +* [IOTDB-5637] Add substr as built-in scalar function +* [IOTDB-5638] Support case when syntax in IoTDB +* [IOTDB-5643] Add REPLACE as a built-in scalar function +* [IOTDB-5683] Support aggregation function Mode for query +* [IOTDB-5711] Python API should support connecting multiple nodes +* [IOTDB-5752] Python Client supports write redirection +* [IOTDB-5765] Support Order By Expression +* [IOTDB-5771] add SPRINTZ and RLBE encodor and LZMA2 compressor +* [IOTDB-5924] Add SessionPool deletion API +* [IOTDB-5950] Support Dynamic Schema Template +* [IOTDB-5951] Support show timeseries/device with specific string contained in path +* [IOTDB-5955] Support create timeseries using schema template in Session API + +## Improvements + +* [IOTDB-5630] Make function cast a built-in function +* [IOTDB-5689] Close Isink when ISourceHandle is closed +* [IOTDB-5715] Improve the performance of query order by time desc +* [IOTDB-5763] Optimize the memory estimate for INTO operations +* [IOTDB-5887] Optimize the construction performance of PathPatternTree without wildcards +* [IOTDB-5888] TTL logs didn' t consider timestamp precision +* [IOTDB-5896] Failed to execute delete statement +* [IOTDB-5908] Add more query metrics +* [IOTDB-5911] print-iotdb-data-dir tool cannot work +* [IOTDB-5914] Remove redundant debug log in Session +* [IOTDB-5919] show variables add a variable timestamp_precision +* [IOTDB-5926] Optimize metric implementation +* [IOTDB-5929] Enable DataPartition inherit policy +* [IOTDB-5943] Avoid rpc invoking for SimpleQueryTerminator when endpoint is local address +* [IOTDB-5944] Follower doesn' t need to update last cache when using IoT_consensus +* [IOTDB-5945] Add a cache to avoid initialize duplicated device id object in write process +* [IOTDB-5946] Optimize the implement of tablet in Go client +* [IOTDB-5949] Support show timeseries with datatype filter +* [IOTDB-5952] Support FIFO strategy in DataNodeSchemaCache +* [IOTDB-6022] The WAL piles up when multi-replica iotconsensus is written at high concurrency + + +## Bug Fixes + +* [IOTDB-5604] NPE when execute Agg + align by device query without assigned DataRegion +* [IOTDB-5619] group by tags query NPE +* [IOTDB-5644] Unexpected result when there are no select expressions after analyzed in query +* [IOTDB-5657] Limit does not take effect in last query +* [IOTDB-5700] UDF query did not clean temp file after the query is finished +* [IOTDB-5716] Wrong dependency when pipeline consumeOneByOneOperator +* [IOTDB-5717] Incorrect result when querying with limit push-downing & order by time desc +* [IOTDB-5722] Wrong default execution branch in PlanVisitor +* [IOTDB-5735] The result of adding the distinct function to the align by device is incorrect +* [IOTDB-5755] Fix the problem that 123w can not be used in Identifier +* [IOTDB-5756] NPE when where predicate is NotEqualExpression and one of subExpression is not exist +* [IOTDB-5757] Not Supported Exception when use like ' s3 || false' in where even Type of s3 is Boolean +* [IOTDB-5760] Query is blocked because of no memory +* [IOTDB-5764] Cannot specify alias successfully when the FROM clause contains multiple path suffixes +* [IOTDB-5769] Offset doesn' t take effect in some special case +* [IOTDB-5774] The syntax that path nodes start or end with a wildcard to fuzzy match is not supported +* [IOTDB-5784] Incorrect result when querying with offset push-down and time filter +* [IOTDB-5815] NPE when using UDF to query +* [IOTDB-5837] Exceptions for select into using placeholders +* [IOTDB-5851] Using limit clause in show devices query will throw NPE +* [IOTDB-5858] Metric doesn' t display the schemaCache hit ratio +* [IOTDB-5861] Last quey is incomplete +* [IOTDB-5889] TTL Cannot delete expired tsfiles +* [IOTDB-5897] NullPointerException In compaction +* [IOTDB-5905] Some aligned timeseries data point lost after flush +* [IOTDB-5934] Optimize cluster partition policy +* [IOTDB-5953] LastCache memory control param does not take effect +* [IOTDB-5963] Sometimes we may get out-of-order query result +* [IOTDB-6016] Release file num cost after cross compaction task + + +# Apache IoTDB 1.1.2 +## New Feature + +* [IOTDB-5919]show variables add a variable timestamp_precision +* Add Python SessionPool + +## Improvement/Bugfix + +* [IOTDB-5901] Load: load tsfile without data will throw NPE +* [IOTDB-5903] Fix cannot select any inner space compaction task when there is only unsequence data +* [IOTDB-5878] Allow ratis-client retry when gRPC IO Unavailable +* [IOTDB-5939] Correct Flusing Task Timeout Detect Thread +* [IOTDB-5905] Fix aligned timeseries data point lost after flushed in some scenario +* [IOTDB-5963] Make sure that TsBlock blocked on memory is added in queue before the next TsBlock returned by root operator +* [IOTDB-5819] Fix npe when booting net metrics +* [IOTDB-6023] Pipe: Fix load tsfile error while handling empty value chunk +* [IOTDB-5971] Fix potential QUOTE problem in iotdb reporter +* [IOTDB-5993] ConfigNode leader changing causes lacking some DataPartition allocation result in the response of getOrCreateDataPartition method +* [IOTDB-5910] Fix compaction scheduler thread pool is not shutdown when aborting compaction +* [IOTDB-6056] Pipe: Failed to load tsfile with empty pages (NPE occurs when loading) +* [IOTDB-5916]Fix exception when file is deleted during compaction selection +* [IOTDB-5896] Fix the NPE issue when taking snapshot in WAL combined with Aligned Binary +* [IOTDB-5929] Enable DataPartition inherit policy +* [IOTDB-5934] Optimize cluster partition policy +* [IOTDB-5926] Remove Useless Rater in Timer +* [IOTDB-6030] Improve efficiency of ConfigNode PartitionInfo takeSnapshot +* [IOTDB-5997] Improve efficiency of ConfigNode PartitionInfo loadSnapshot +* Fix potential deadlock when freeing memory in MemoryPool +* Release resource of FI after all drivers have been closed +* Set default degree of parallelism back to the num of CPU +* Make SequenceStrategy and MaxDiskUsableSpaceFirstStrategy are allowed in cluster mode +* Fix npe exception when invalid in metric module +* Fix CQ does not take effect in ns time_precision +* Fix storage engine memory config initialization +* Fix template related schema query +* add default charset setting in start-datanode.bat and print default charset when starting +* Fix TsfileResource error after delete device in sequence working memtable +* load TsFile bugs: Not checking whether the tsfile data loaded locally is in the same time partition during the loading process & LoadTsFilePieceNode error when loading tsfile with empty value chunks +* Fix alias query failure after restarting DataNode + +# Apache IoTDB 1.1.1 +## New Feature + +* [IOTDB-2569] ZSTD compression + +## Improvement/Bugfix + +* [IOTDB-5781] Change the default strategy to SequenceStrategy +* [IOTDB-5780] Let users know a node was successfully removed and data is recovered +* [IOTDB-5735] The result of adding the distinct function to the align by device is incorrect +* [IOTDB-5777] When writing data using non-root users, the permission authentication module takes too long +* [IOTDB-5835] Fix wal accumulation caused by datanode restart +* [IOTDB-5828] Optimize the implementation of some metric items in the metric module to prevent Prometheus pull timeouts +* [IOTDB-5813] ConfigNode restart error due to installSnapshot failed +* [IOTDB-5657] Limit does not take effect in last query +* [IOTDB-5717] Incorrect result when querying with limit push-downing & order by time desc +* [IOTDB-5722] Wrong default execution branch in PlanVisitor +* [IOTDB-5784] Incorrect result when querying with offset push-down and time filter +* [IOTDB-5815] NPE when using UDF to query +* [IOTDB-5829] Query with limit clause will cause other concurrent query break down +* [IOTDB-5824] show devices with * cannot display satisfied devices +* [IOTDB-5831] Drop database won't delete totally files in disk +* [IOTDB-5818] Cross_space compaction of Aligned timeseries is stucked +* [IOTDB-5859] Compaction error when using Version as first sort dimension +* [IOTDB-5869] Fix load overlap sequence TsFile + # Apache IoTDB 1.1.0 ## New Features diff --git a/code-coverage/pom.xml b/code-coverage/pom.xml index aa1bd56ff3f6c..c9d7e78ef8820 100644 --- a/code-coverage/pom.xml +++ b/code-coverage/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-parent - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT iotdb-code-coverage pom diff --git a/distribution/pom.xml b/distribution/pom.xml index 1f112d7fc2840..01fbf6b40a1e1 100644 --- a/distribution/pom.xml +++ b/distribution/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-parent - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT iotdb-distribution pom @@ -33,25 +33,25 @@ org.apache.iotdb iotdb-server - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT zip org.apache.iotdb iotdb-cli - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT zip org.apache.iotdb iotdb-confignode - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT zip org.apache.iotdb library-udf - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT @@ -168,5 +168,63 @@ + + with-ainode + + + org.apache.iotdb + iotdb-ainode + 1.3.4-SNAPSHOT + + + + + + org.apache.maven.plugins + maven-assembly-plugin + + + + all-bin + + single + + package + + + src/assembly/ainode.xml + + + + + + + + net.nicoulaj.maven.plugins + checksum-maven-plugin + + + sign-source-release + + files + + package + + + + + apache-iotdb-${project.version}-ainode-bin.zip + + + + + + + + + + diff --git a/distribution/src/assembly/ainode.xml b/distribution/src/assembly/ainode.xml new file mode 100644 index 0000000000000..c4bfca25c52e0 --- /dev/null +++ b/distribution/src/assembly/ainode.xml @@ -0,0 +1,46 @@ + + + + ainode-bin + + dir + zip + + apache-iotdb-${project.version}-ainode-bin + + + ${project.basedir}/../iotdb-core/ainode/target/apache-iotdb-ainode-${project.version}/apache-iotdb-ainode-${project.version}/conf + ${file.separator}/conf + + + ${project.basedir}/../iotdb-core/ainode/target/apache-iotdb-ainode-${project.version}/apache-iotdb-ainode-${project.version}/sbin + ${file.separator}/sbin + + + ${project.basedir}/../iotdb-core/ainode/target/apache-iotdb-ainode-${project.version}/apache-iotdb-ainode-${project.version}/lib + ${file.separator}/lib + + + + common-files.xml + + diff --git a/docker/src/main/DockerCompose/replace-conf-from-env.sh b/docker/src/main/DockerCompose/replace-conf-from-env.sh index 887faf1bdcd63..dc3d6cf2cf11e 100755 --- a/docker/src/main/DockerCompose/replace-conf-from-env.sh +++ b/docker/src/main/DockerCompose/replace-conf-from-env.sh @@ -21,33 +21,36 @@ conf_path=${IOTDB_HOME}/conf target_files="iotdb-system.properties" -function process_single(){ - local key_value="$1" - local filename=$2 - local key=$(echo $key_value|cut -d = -f1) - local line=$(grep -ni "${key}=" ${filename}) - #echo "line=$line" - if [[ -n "${line}" ]]; then +function process_single() { + local key_value="$1" + local filename=$2 + local key=$(echo $key_value | cut -d = -f1) + local line=$(grep -ni "${key}=" ${filename}) + #echo "line=$line" + if [[ -n "${line}" ]]; then echo "update $key $filename" - local line_no=$(echo $line|cut -d : -f1) - local content=$(echo $line|cut -d : -f2) - if [[ "${content:0:1}" != "#" ]]; then + local line_no=$(echo $line | cut -d : -f1) + local content=$(echo $line | cut -d : -f2) + if [[ "${content:0:1}" != "#" ]]; then sed -i "${line_no}d" ${filename} fi sed -i "${line_no} i${key_value}" ${filename} - fi + else + echo "append $key to $filename" + + echo "${key_value}" >>"${filename}" + fi } -function replace_configs(){ +function replace_configs() { for v in $(env); do if [[ "${v}" =~ "=" && "${v}" =~ "_" && ! "${v}" =~ "JAVA_" ]]; then -# echo "###### $v ####" + # echo "###### $v ####" for f in ${target_files}; do - process_single $v ${conf_path}/$f + process_single $v ${conf_path}/$f done fi done } replace_configs - diff --git a/example/client-cpp-example/pom.xml b/example/client-cpp-example/pom.xml index 29229f7078136..0b077b5ea6ee3 100644 --- a/example/client-cpp-example/pom.xml +++ b/example/client-cpp-example/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-examples - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT client-cpp-example IoTDB: Example: CPP Client diff --git a/example/jdbc/pom.xml b/example/jdbc/pom.xml index 2eb25e0633e68..2db6528c23fa3 100644 --- a/example/jdbc/pom.xml +++ b/example/jdbc/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-examples - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT jdbc-example IoTDB: Example: JDBC diff --git a/example/mqtt-customize/pom.xml b/example/mqtt-customize/pom.xml index b67be1f441318..9e119409fb952 100644 --- a/example/mqtt-customize/pom.xml +++ b/example/mqtt-customize/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-examples - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT customize-mqtt-example IoTDB: Example: Customized MQTT diff --git a/example/mqtt-customize/src/main/java/org/apache/iotdb/mqtt/server/CustomizedJsonPayloadFormatter.java b/example/mqtt-customize/src/main/java/org/apache/iotdb/mqtt/server/CustomizedJsonPayloadFormatter.java index 5b708f1bcd57d..b0a7adae9ae84 100644 --- a/example/mqtt-customize/src/main/java/org/apache/iotdb/mqtt/server/CustomizedJsonPayloadFormatter.java +++ b/example/mqtt-customize/src/main/java/org/apache/iotdb/mqtt/server/CustomizedJsonPayloadFormatter.java @@ -23,6 +23,7 @@ import org.apache.iotdb.db.protocol.mqtt.PayloadFormatter; import io.netty.buffer.ByteBuf; +import org.apache.commons.lang3.NotImplementedException; import java.util.ArrayList; import java.util.Arrays; @@ -32,7 +33,7 @@ public class CustomizedJsonPayloadFormatter implements PayloadFormatter { @Override - public List format(ByteBuf payload) { + public List format(String topic, ByteBuf payload) { // Suppose the payload is a json format if (payload == null) { return Collections.emptyList(); @@ -53,6 +54,12 @@ public List format(ByteBuf payload) { return ret; } + @Override + @Deprecated + public List format(ByteBuf payload) { + throw new NotImplementedException(); + } + @Override public String getName() { // set the value of mqtt_payload_formatter in iotdb-common.properties as the following string: diff --git a/example/mqtt/pom.xml b/example/mqtt/pom.xml index 62619735c0fc5..9b03993591fcf 100644 --- a/example/mqtt/pom.xml +++ b/example/mqtt/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-examples - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT mqtt-example IoTDB: Example: MQTT diff --git a/example/pipe-count-point-processor/pom.xml b/example/pipe-count-point-processor/pom.xml index 9b486cd08bd54..0877ab9df2940 100644 --- a/example/pipe-count-point-processor/pom.xml +++ b/example/pipe-count-point-processor/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-examples - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT pipe-count-point-processor-example IoTDB: Example: Pipe: Count Point Processor diff --git a/example/pipe-opc-ua-sink/pom.xml b/example/pipe-opc-ua-sink/pom.xml index 37107b08de5e4..12c2e8b7cdaab 100644 --- a/example/pipe-opc-ua-sink/pom.xml +++ b/example/pipe-opc-ua-sink/pom.xml @@ -23,7 +23,7 @@ org.apache.iotdb iotdb-examples - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT 4.0.0 pipe-opc-ua-sink-example diff --git a/example/pom.xml b/example/pom.xml index 548f7004d0d44..3e8687344a364 100644 --- a/example/pom.xml +++ b/example/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-parent - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT iotdb-examples pom diff --git a/example/rest-java-example/pom.xml b/example/rest-java-example/pom.xml index 970fe626dd9e5..3be62375eb37c 100644 --- a/example/rest-java-example/pom.xml +++ b/example/rest-java-example/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-examples - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT rest-java-example IoTDB: Example: Java Rest diff --git a/example/schema/pom.xml b/example/schema/pom.xml index 54246f9c8064a..b8463f379e5fd 100644 --- a/example/schema/pom.xml +++ b/example/schema/pom.xml @@ -24,7 +24,7 @@ iotdb-examples org.apache.iotdb - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT schema-example IoTDB: Example: Schema diff --git a/example/session/pom.xml b/example/session/pom.xml index 818cebd1ccc0a..2ad38f4f3c405 100644 --- a/example/session/pom.xml +++ b/example/session/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-examples - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT client-example IoTDB: Example: Session Client diff --git a/example/session/src/main/java/org/apache/iotdb/SubscriptionSessionExample.java b/example/session/src/main/java/org/apache/iotdb/SubscriptionSessionExample.java index 1ec74a34a29ca..c01797982e009 100644 --- a/example/session/src/main/java/org/apache/iotdb/SubscriptionSessionExample.java +++ b/example/session/src/main/java/org/apache/iotdb/SubscriptionSessionExample.java @@ -268,7 +268,7 @@ private static void dataSubscription3() throws Exception { .buildPushConsumer()) { consumer3.open(); consumer3.subscribe(TOPIC_3); - while (!consumer3.allSnapshotTopicMessagesHaveBeenConsumed()) { + while (!consumer3.allTopicMessagesHaveBeenConsumed()) { LockSupport.parkNanos(SLEEP_NS); // wait some time } } @@ -309,7 +309,7 @@ private static void dataSubscription4() throws Exception { .buildPullConsumer()) { consumer4.open(); consumer4.subscribe(TOPIC_4); - while (!consumer4.allSnapshotTopicMessagesHaveBeenConsumed()) { + while (!consumer4.allTopicMessagesHaveBeenConsumed()) { for (final SubscriptionMessage message : consumer4.poll(POLL_TIMEOUT_MS)) { final SubscriptionTsFileHandler handler = message.getTsFileHandler(); handler.moveFile( diff --git a/example/session/src/main/java/org/apache/iotdb/TabletExample.java b/example/session/src/main/java/org/apache/iotdb/TabletExample.java index 2b2d595e2e844..6ffa64af59652 100644 --- a/example/session/src/main/java/org/apache/iotdb/TabletExample.java +++ b/example/session/src/main/java/org/apache/iotdb/TabletExample.java @@ -89,9 +89,11 @@ private static Map> loadCSVData( case BOOLEAN: ret.get(measurement).add(Boolean.parseBoolean(items[idx])); break; + case DATE: case INT32: ret.get(measurement).add(Integer.parseInt(items[idx])); break; + case TIMESTAMP: case INT64: ret.get(measurement).add(Long.parseLong(items[idx])); break; @@ -101,6 +103,8 @@ private static Map> loadCSVData( case DOUBLE: ret.get(measurement).add(Double.parseDouble(items[idx])); break; + case STRING: + case BLOB: case TEXT: ret.get(measurement).add(BytesUtils.valueOf(items[idx])); break; diff --git a/example/trigger/pom.xml b/example/trigger/pom.xml index 1970972306f4f..caf264c63b532 100644 --- a/example/trigger/pom.xml +++ b/example/trigger/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-examples - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT trigger-example IoTDB: Example: Trigger diff --git a/example/udf/pom.xml b/example/udf/pom.xml index db0cd00884594..14e722ac384ac 100644 --- a/example/udf/pom.xml +++ b/example/udf/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-examples - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT udf-example IoTDB: Example: UDF diff --git a/example/udf/src/main/java/org/apache/iotdb/udf/UDAFExample.java b/example/udf/src/main/java/org/apache/iotdb/udf/UDAFExample.java index b2b9bec5f1bab..540f6bcef5cce 100644 --- a/example/udf/src/main/java/org/apache/iotdb/udf/UDAFExample.java +++ b/example/udf/src/main/java/org/apache/iotdb/udf/UDAFExample.java @@ -110,6 +110,10 @@ public void addInput(State state, Column[] columns, BitMap bitMap) { return; case TEXT: case BOOLEAN: + case TIMESTAMP: + case STRING: + case BLOB: + case DATE: default: throw new UnSupportedDataTypeException( String.format("Unsupported data type in aggregation AVG : %s", dataType)); diff --git a/integration-test/pom.xml b/integration-test/pom.xml index 896a9bfa94085..f0731eafbf25c 100644 --- a/integration-test/pom.xml +++ b/integration-test/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-parent - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT integration-test IoTDB: Integration-Test @@ -72,47 +72,47 @@ org.apache.iotdb iotdb-server - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-session - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-jdbc - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb trigger-api - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb isession - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb service-rpc - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-thrift-confignode - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb node-commons - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-thrift-commons - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.tsfile @@ -122,7 +122,7 @@ org.apache.iotdb udf-api - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb @@ -132,7 +132,7 @@ org.apache.iotdb iotdb-consensus - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.slf4j @@ -161,17 +161,17 @@ org.apache.iotdb iotdb-confignode - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-thrift - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-cli - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT commons-codec @@ -201,7 +201,7 @@ org.apache.iotdb iotdb-server - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT test-jar test @@ -415,7 +415,7 @@ - org.apache.iotdb.itbase.category.MultiClusterIT1,org.apache.iotdb.itbase.category.MultiClusterIT2AutoCreateSchema,org.apache.iotdb.itbase.category.MultiClusterIT2ManualCreateSchema,org.apache.iotdb.itbase.category.MultiClusterIT2Subscription,org.apache.iotdb.itbase.category.MultiClusterIT3 + org.apache.iotdb.itbase.category.MultiClusterIT1,org.apache.iotdb.itbase.category.MultiClusterIT2AutoCreateSchema,org.apache.iotdb.itbase.category.MultiClusterIT2ManualCreateSchema,org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionArchVerification,org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer,org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionMisc,org.apache.iotdb.itbase.category.MultiClusterIT3 false true true @@ -465,13 +465,41 @@ - MultiClusterIT2Subscription + MultiClusterIT2SubscriptionArchVerification false - org.apache.iotdb.itbase.category.MultiClusterIT2Subscription + org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionArchVerification + false + true + true + MultiCluster + + + + MultiClusterIT2SubscriptionRegressionConsumer + + false + + + + org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer + false + true + true + MultiCluster + + + + MultiClusterIT2SubscriptionRegressionMisc + + false + + + + org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionMisc false true true @@ -506,6 +534,20 @@ Cluster1 + + AIClusterIT + + false + + + + org.apache.iotdb.itbase.category.AIClusterIT + false + false + false + AI + + DailyIT diff --git a/integration-test/src/assembly/mpp-test.xml b/integration-test/src/assembly/mpp-test.xml index 3dc443c8d03f3..9a4a8727ea2e6 100644 --- a/integration-test/src/assembly/mpp-test.xml +++ b/integration-test/src/assembly/mpp-test.xml @@ -42,6 +42,10 @@ conf ${project.basedir}/../iotdb-core/metrics/interface/src/main/assembly/resources/conf + + conf + ${project.basedir}/../iotdb-core/ainode/resources/conf + sbin ${project.basedir}/../iotdb-core/datanode/src/assembly/resources/sbin @@ -52,6 +56,16 @@ ${project.basedir}/../iotdb-core/confignode/src/assembly/resources/sbin 0755 + + sbin + ${project.basedir}/../iotdb-core/ainode/resources/sbin + 0755 + + + venv + ${project.basedir}/../iotdb-core/ainode/venv + 0755 + tools ${project.basedir}/../iotdb-core/datanode/src/assembly/resources/tools @@ -67,6 +81,11 @@ ${project.basedir}/../iotdb-client/cli/src/assembly/resources/tools 0755 + + lib + ${project.basedir}/../iotdb-core/ainode/dist/ + 0755 + diff --git a/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/UDAFAvg.java b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/UDAFAvg.java index a17905e4b1518..ae34b8e5cb48d 100644 --- a/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/UDAFAvg.java +++ b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/UDAFAvg.java @@ -101,7 +101,11 @@ public void addInput(State state, Column[] columns, BitMap bitMap) { addDoubleInput(avgState, columns, bitMap); return; case TEXT: + case STRING: + case BLOB: case BOOLEAN: + case DATE: + case TIMESTAMP: default: throw new UnSupportedDataTypeException( String.format("Unsupported data type in aggregation AVG : %s", dataType)); diff --git a/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/UDAFSum.java b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/UDAFSum.java index 087dc655d8444..a829c9f2c477b 100644 --- a/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/UDAFSum.java +++ b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/UDAFSum.java @@ -101,7 +101,11 @@ public void addInput(State state, Column[] columns, BitMap bitMap) { addDoubleInput(sumState, columns, bitMap); return; case TEXT: + case STRING: + case BLOB: case BOOLEAN: + case TIMESTAMP: + case DATE: default: throw new UnSupportedDataTypeException( String.format("Unsupported data type in aggregation AVG : %s", dataType)); diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/EnvFactory.java b/integration-test/src/main/java/org/apache/iotdb/it/env/EnvFactory.java index 8ea62903e1f41..54799dbaa45d6 100644 --- a/integration-test/src/main/java/org/apache/iotdb/it/env/EnvFactory.java +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/EnvFactory.java @@ -19,6 +19,8 @@ package org.apache.iotdb.it.env; +import org.apache.iotdb.it.env.cluster.env.AIEnv; +import org.apache.iotdb.it.env.cluster.env.AbstractEnv; import org.apache.iotdb.it.env.cluster.env.Cluster1Env; import org.apache.iotdb.it.env.cluster.env.SimpleEnv; import org.apache.iotdb.it.env.remote.env.RemoteServerEnv; @@ -52,6 +54,9 @@ public static BaseEnv getEnv() { case Remote: env = new RemoteServerEnv(); break; + case AI: + env = new AIEnv(); + break; case MultiCluster: logger.warn( "EnvFactory only supports EnvType Simple, Cluster1 and Remote, please use MultiEnvFactory instead."); @@ -69,4 +74,8 @@ public static BaseEnv getEnv() { } return env; } + + public static AbstractEnv getAbstractEnv() { + return (AbstractEnv) getEnv(); + } } diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/EnvType.java b/integration-test/src/main/java/org/apache/iotdb/it/env/EnvType.java index 5be5da287c741..f22144014f5a7 100644 --- a/integration-test/src/main/java/org/apache/iotdb/it/env/EnvType.java +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/EnvType.java @@ -24,7 +24,7 @@ public enum EnvType { Simple, Cluster1, MultiCluster, - ; + AI; public static EnvType getSystemEnvType() { String envValue = System.getProperty("TestEnv", Simple.name()); diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/MultiEnvFactory.java b/integration-test/src/main/java/org/apache/iotdb/it/env/MultiEnvFactory.java index f8c28567be1cb..5832f1c485bc5 100644 --- a/integration-test/src/main/java/org/apache/iotdb/it/env/MultiEnvFactory.java +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/MultiEnvFactory.java @@ -38,24 +38,25 @@ private MultiEnvFactory() { // Empty constructor } - public static void setTestMethodName(String testMethodName) { + public static void setTestMethodName(final String testMethodName) { currentMethodName = testMethodName; + envList.forEach(baseEnv -> baseEnv.setTestMethodName(testMethodName)); } /** Get an environment with the specific index. */ - public static BaseEnv getEnv(int index) throws IndexOutOfBoundsException { + public static BaseEnv getEnv(final int index) throws IndexOutOfBoundsException { return envList.get(index); } /** Create several environments according to the specific number. */ - public static void createEnv(int num) { + public static void createEnv(final int num) { // Not judge EnvType for individual test convenience - long startTime = System.currentTimeMillis(); + final long startTime = System.currentTimeMillis(); for (int i = 0; i < num; ++i) { try { Class.forName(Config.JDBC_DRIVER_NAME); envList.add(new MultiClusterEnv(startTime, i, currentMethodName)); - } catch (ClassNotFoundException e) { + } catch (final ClassNotFoundException e) { logger.error("Create env error", e); System.exit(-1); } diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/ClusterConstant.java b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/ClusterConstant.java index 12af0d025f75c..147b57f2f67a8 100644 --- a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/ClusterConstant.java +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/ClusterConstant.java @@ -115,9 +115,6 @@ public class ClusterConstant { "strongConsistencyClusterMode.dataRegionReplicaNumber"; // Property file names - public static final String CONFIG_NODE_PROPERTIES_FILE = "iotdb-confignode.properties"; - public static final String DATA_NODE_PROPERTIES_FILE = "iotdb-datanode.properties"; - public static final String COMMON_PROPERTIES_FILE = "iotdb-common.properties"; public static final String IOTDB_SYSTEM_PROPERTIES_FILE = "iotdb-system.properties"; // Properties' keys @@ -142,10 +139,7 @@ public class ClusterConstant { // ConfigNode public static final String CN_SYSTEM_DIR = "cn_system_dir"; public static final String CN_CONSENSUS_DIR = "cn_consensus_dir"; - public static final String CN_METRIC_PROMETHEUS_REPORTER_PORT = - "cn_metric_prometheus_reporter_port"; public static final String CN_METRIC_IOTDB_REPORTER_HOST = "cn_metric_iotdb_reporter_host"; - public static final String CN_METRIC_IOTDB_REPORTER_PORT = "cn_metric_iotdb_reporter_port"; public static final String CN_CONNECTION_TIMEOUT_MS = "cn_connection_timeout_ms"; @@ -157,13 +151,10 @@ public class ClusterConstant { public static final String DN_TRACING_DIR = "dn_tracing_dir"; public static final String DN_SYNC_DIR = "dn_sync_dir"; public static final String DN_METRIC_IOTDB_REPORTER_HOST = "dn_metric_iotdb_reporter_host"; - public static final String DN_METRIC_PROMETHEUS_REPORTER_PORT = - "dn_metric_prometheus_reporter_port"; public static final String DN_MPP_DATA_EXCHANGE_PORT = "dn_mpp_data_exchange_port"; public static final String DN_DATA_REGION_CONSENSUS_PORT = "dn_data_region_consensus_port"; public static final String DN_SCHEMA_REGION_CONSENSUS_PORT = "dn_schema_region_consensus_port"; - public static final String PIPE_AIR_GAP_RECEIVER_ENABLED = "pipe_air_gap_receiver_enabled"; public static final String PIPE_AIR_GAP_RECEIVER_PORT = "pipe_air_gap_receiver_port"; public static final String MAX_TSBLOCK_SIZE_IN_BYTES = "max_tsblock_size_in_bytes"; public static final String PAGE_SIZE_IN_BYTE = "page_size_in_byte"; @@ -171,13 +162,23 @@ public class ClusterConstant { "dn_join_cluster_retry_interval_ms"; public static final String DN_CONNECTION_TIMEOUT_MS = "dn_connection_timeout_ms"; public static final String DN_METRIC_INTERNAL_REPORTER_TYPE = "dn_metric_internal_reporter_type"; + public static final String CONFIG_NODE_RATIS_LOG_APPENDER_BUFFER_SIZE_MAX = + "config_node_ratis_log_appender_buffer_size_max"; + public static final String WAL_BUFFER_SIZE_IN_BYTE = "wal_buffer_size_in_byte"; + public static final String SCHEMA_REGION_RATIS_LOG_APPENDER_BUFFER_SIZE_MAX = + "schema_region_ratis_log_appender_buffer_size_max"; + public static final String DATA_REGION_RATIS_LOG_APPENDER_BUFFER_SIZE_MAX = + "data_region_ratis_log_appender_buffer_size_max"; // Paths public static final String USER_DIR = "user.dir"; public static final String TARGET = "target"; + public static final String PYTHON_PATH = "venv/bin/python3"; public static final String DATA_NODE_NAME = "DataNode"; + public static final String AI_NODE_NAME = "AINode"; + public static final String LOCK_FILE_PATH = System.getProperty(USER_DIR) + File.separator + TARGET + File.separator + "lock-"; public static final String TEMPLATE_NODE_PATH = @@ -195,7 +196,6 @@ public class ClusterConstant { // Env Constant public static final int NODE_START_TIMEOUT = 100; - public static final int PROBE_TIMEOUT_MS = 2000; public static final int NODE_NETWORK_TIMEOUT_MS = 0; public static final String ZERO_TIME_ZONE = "GMT+0"; diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/EnvUtils.java b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/EnvUtils.java index f3c9527e5952f..9663fa371e95a 100644 --- a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/EnvUtils.java +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/EnvUtils.java @@ -70,22 +70,22 @@ public static int[] searchAvailablePorts() { while (true) { int randomPortStart = 1000 + (int) (Math.random() * (1999 - 1000)); randomPortStart = randomPortStart * (length + 1) + 1; - String lockFilePath = getLockFilePath(randomPortStart); - File lockFile = new File(lockFilePath); + final String lockFilePath = getLockFilePath(randomPortStart); + final File lockFile = new File(lockFilePath); try { // Lock the ports first to avoid to be occupied by other ForkedBooters during ports // available detecting if (!lockFile.createNewFile()) { continue; } - List requiredPorts = + final List requiredPorts = IntStream.rangeClosed(randomPortStart, randomPortStart + length) .boxed() .collect(Collectors.toList()); if (checkPortsAvailable(requiredPorts)) { return requiredPorts.stream().mapToInt(Integer::intValue).toArray(); } - } catch (IOException e) { + } catch (final IOException ignore) { // ignore } // Delete the lock file if the ports can't be used or some error happens @@ -95,39 +95,35 @@ public static int[] searchAvailablePorts() { } } - private static boolean checkPortsAvailable(List ports) { - String cmd = getSearchAvailablePortCmd(ports); + private static boolean checkPortsAvailable(final List ports) { + final String cmd = getSearchAvailablePortCmd(ports); try { - Process proc = Runtime.getRuntime().exec(cmd); - return proc.waitFor() == 1; - } catch (IOException e) { + return Runtime.getRuntime().exec(cmd).waitFor() == 1; + } catch (final IOException ignore) { // ignore - } catch (InterruptedException e) { + } catch (final InterruptedException e) { Thread.currentThread().interrupt(); } return false; } - private static String getSearchAvailablePortCmd(List ports) { - if (SystemUtils.IS_OS_WINDOWS) { - return getWindowsSearchPortCmd(ports); - } - return getUnixSearchPortCmd(ports); + private static String getSearchAvailablePortCmd(final List ports) { + return SystemUtils.IS_OS_WINDOWS ? getWindowsSearchPortCmd(ports) : getUnixSearchPortCmd(ports); } - private static String getWindowsSearchPortCmd(List ports) { - String cmd = "netstat -aon -p tcp | findStr "; - return cmd + private static String getWindowsSearchPortCmd(final List ports) { + return "netstat -aon -p tcp | findStr " + ports.stream().map(v -> "/C:'127.0.0.1:" + v + "'").collect(Collectors.joining(" ")); } - private static String getUnixSearchPortCmd(List ports) { - String cmd = "lsof -iTCP -sTCP:LISTEN -P -n | awk '{print $9}' | grep -E "; - return cmd + ports.stream().map(String::valueOf).collect(Collectors.joining("|")) + "\""; + private static String getUnixSearchPortCmd(final List ports) { + return "lsof -iTCP -sTCP:LISTEN -P -n | awk '{print $9}' | grep -E " + + ports.stream().map(String::valueOf).collect(Collectors.joining("|")) + + "\""; } - private static Pair getClusterNodesNum(int index) { - String valueStr = System.getProperty(CLUSTER_CONFIGURATIONS); + private static Pair getClusterNodesNum(final int index) { + final String valueStr = System.getProperty(CLUSTER_CONFIGURATIONS); if (valueStr == null) { return null; } @@ -154,17 +150,17 @@ private static Pair getClusterNodesNum(int index) { // Print nothing to avoid polluting test outputs return null; } - } catch (NumberFormatException ignore) { + } catch (final NumberFormatException ignore) { return null; } } - public static String getLockFilePath(int port) { + public static String getLockFilePath(final int port) { return LOCK_FILE_PATH + port; } public static Pair getNodeNum() { - Pair nodesNum = getClusterNodesNum(0); + final Pair nodesNum = getClusterNodesNum(0); if (nodesNum != null) { return nodesNum; } @@ -173,8 +169,8 @@ public static Pair getNodeNum() { getIntFromSysVar(DEFAULT_DATA_NODE_NUM, 3, 0)); } - public static Pair getNodeNum(int index) { - Pair nodesNum = getClusterNodesNum(index); + public static Pair getNodeNum(final int index) { + final Pair nodesNum = getClusterNodesNum(index); if (nodesNum != null) { return nodesNum; } @@ -183,38 +179,38 @@ public static Pair getNodeNum(int index) { getIntFromSysVar(DEFAULT_DATA_NODE_NUM, 3, index)); } - public static String getFilePathFromSysVar(String key, int index) { - String valueStr = System.getProperty(key); + public static String getFilePathFromSysVar(final String key, final int index) { + final String valueStr = System.getProperty(key); if (valueStr == null) { return null; } return System.getProperty(USER_DIR) + getValueOfIndex(valueStr, index); } - public static int getIntFromSysVar(String key, int defaultValue, int index) { - String valueStr = System.getProperty(key); + public static int getIntFromSysVar(final String key, final int defaultValue, final int index) { + final String valueStr = System.getProperty(key); if (valueStr == null) { return defaultValue; } - String value = getValueOfIndex(valueStr, index); + final String value = getValueOfIndex(valueStr, index); try { return Integer.parseInt(value); - } catch (NumberFormatException e) { + } catch (final NumberFormatException e) { throw new IllegalArgumentException("Invalid property value: " + value + " of key " + key); } } - public static String getValueOfIndex(String valueStr, int index) { - String[] values = valueStr.split(DELIMITER); + public static String getValueOfIndex(final String valueStr, final int index) { + final String[] values = valueStr.split(DELIMITER); return index <= values.length - 1 ? values[index] : values[values.length - 1]; } - public static String getTimeForLogDirectory(long startTime) { + public static String getTimeForLogDirectory(final long startTime) { return convertLongToDate(startTime, "ms").replace(":", DIR_TIME_REPLACEMENT); } - public static String fromConsensusFullNameToAbbr(String consensus) { + public static String fromConsensusFullNameToAbbr(final String consensus) { switch (consensus) { case SIMPLE_CONSENSUS: return SIMPLE_CONSENSUS_STR; @@ -233,7 +229,7 @@ public static String fromConsensusFullNameToAbbr(String consensus) { } } - public static String fromConsensusAbbrToFullName(String consensus) { + public static String fromConsensusAbbrToFullName(final String consensus) { switch (consensus) { case SIMPLE_CONSENSUS_STR: return SIMPLE_CONSENSUS; diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppCommonConfig.java b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppCommonConfig.java index 9a5e066d6419b..553890dafe7db 100644 --- a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppCommonConfig.java +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppCommonConfig.java @@ -124,11 +124,9 @@ public CommonConfig setEnableCrossSpaceCompaction(boolean enableCrossSpaceCompac } @Override - public CommonConfig setMaxInnerCompactionCandidateFileNum( - int maxInnerCompactionCandidateFileNum) { + public CommonConfig setInnerCompactionCandidateFileNum(int maxInnerCompactionCandidateFileNum) { setProperty( - "max_inner_compaction_candidate_file_num", - String.valueOf(maxInnerCompactionCandidateFileNum)); + "inner_compaction_candidate_file_num", String.valueOf(maxInnerCompactionCandidateFileNum)); return this; } @@ -151,8 +149,8 @@ public CommonConfig setPrimitiveArraySize(int primitiveArraySize) { } @Override - public CommonConfig setAvgSeriesPointNumberThreshold(int avgSeriesPointNumberThreshold) { - setProperty("avg_series_point_number_threshold", String.valueOf(avgSeriesPointNumberThreshold)); + public CommonConfig setTargetChunkPointNum(int targetChunkPointNum) { + setProperty("target_chunk_point_num", String.valueOf(targetChunkPointNum)); return this; } @@ -232,6 +230,12 @@ public CommonConfig setTimePartitionInterval(long timePartitionInterval) { return this; } + @Override + public CommonConfig setTTLCheckInterval(long ttlCheckInterval) { + setProperty("ttl_check_interval", String.valueOf(ttlCheckInterval)); + return this; + } + @Override public CommonConfig setTimePartitionOrigin(long timePartitionOrigin) { setProperty("time_partition_origin", String.valueOf(timePartitionOrigin)); @@ -359,6 +363,12 @@ public CommonConfig setSeriesSlotNum(int seriesSlotNum) { return this; } + @Override + public CommonConfig setSeriesPartitionExecutorClass(String seriesPartitionExecutorClass) { + setProperty("series_partition_executor_class", seriesPartitionExecutorClass); + return this; + } + @Override public CommonConfig setSchemaMemoryAllocate(String schemaMemoryAllocate) { setProperty("schema_memory_proportion", String.valueOf(schemaMemoryAllocate)); @@ -419,6 +429,18 @@ public CommonConfig setSchemaRegionPerDataNode(double schemaRegionPerDataNode) { return this; } + @Override + public CommonConfig setPipeMemoryManagementEnabled(boolean pipeMemoryManagementEnabled) { + setProperty("pipe_memory_management_enabled", String.valueOf(pipeMemoryManagementEnabled)); + return this; + } + + @Override + public CommonConfig setIsPipeEnableMemoryCheck(boolean isPipeEnableMemoryCheck) { + setProperty("pipe_enable_memory_checked", String.valueOf(isPipeEnableMemoryCheck)); + return this; + } + @Override public CommonConfig setPipeAirGapReceiverEnabled(boolean isPipeAirGapReceiverEnabled) { setProperty("pipe_air_gap_receiver_enabled", String.valueOf(isPipeAirGapReceiverEnabled)); @@ -445,8 +467,8 @@ public CommonConfig setTagAttributeTotalSize(int tagAttributeTotalSize) { } @Override - public CommonConfig setCnConnectionTimeoutMs(int connectionTimeoutMs) { - setProperty("cn_connection_timeout_ms", String.valueOf(connectionTimeoutMs)); + public CommonConfig setDnConnectionTimeoutMs(int connectionTimeoutMs) { + setProperty("dn_connection_timeout_ms", String.valueOf(connectionTimeoutMs)); return this; } @@ -476,6 +498,57 @@ public CommonConfig setPipeMetaSyncerSyncIntervalMinutes(long pipeMetaSyncerSync return this; } + @Override + public CommonConfig setPipeConnectorRequestSliceThresholdBytes( + int pipeConnectorRequestSliceThresholdBytes) { + setProperty( + "pipe_connector_request_slice_threshold_bytes", + String.valueOf(pipeConnectorRequestSliceThresholdBytes)); + + return this; + } + + @Override + public CommonConfig setQueryMemoryProportion(String queryMemoryProportion) { + setProperty("chunk_timeseriesmeta_free_memory_proportion", queryMemoryProportion); + return this; + } + + @Override + public CommonConfig setSubscriptionPrefetchTsFileBatchMaxDelayInMs( + int subscriptionPrefetchTsFileBatchMaxDelayInMs) { + setProperty( + "subscription_prefetch_ts_file_batch_max_delay_in_ms", + String.valueOf(subscriptionPrefetchTsFileBatchMaxDelayInMs)); + return this; + } + + @Override + public CommonConfig setSubscriptionPrefetchTsFileBatchMaxSizeInBytes( + int subscriptionPrefetchTsFileBatchMaxSizeInBytes) { + setProperty( + "subscription_prefetch_ts_file_batch_max_size_in_bytes", + String.valueOf(subscriptionPrefetchTsFileBatchMaxSizeInBytes)); + return this; + } + + public CommonConfig setSubscriptionEnabled(boolean subscriptionEnabled) { + setProperty("subscription_enabled", String.valueOf(subscriptionEnabled)); + return this; + } + + @Override + public CommonConfig setDefaultStorageGroupLevel(int defaultStorageGroupLevel) { + setProperty("default_storage_group_level", String.valueOf(defaultStorageGroupLevel)); + return this; + } + + @Override + public CommonConfig setDatanodeMemoryProportion(String datanodeMemoryProportion) { + setProperty("datanode_memory_proportion", datanodeMemoryProportion); + return this; + } + // For part of the log directory public String getClusterConfigStr() { return fromConsensusFullNameToAbbr(properties.getProperty(CONFIG_NODE_CONSENSUS_PROTOCOL_CLASS)) diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppConfigNodeConfig.java b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppConfigNodeConfig.java index 62ccbb0aa4ff6..8e4a6def36570 100644 --- a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppConfigNodeConfig.java +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppConfigNodeConfig.java @@ -55,4 +55,16 @@ public ConfigNodeConfig setMetricReporterType(List metricReporterTypes) properties.setProperty("cn_metric_reporter_list", String.join(",", metricReporterTypes)); return this; } + + @Override + public ConfigNodeConfig setMetricPrometheusReporterUsername(String username) { + properties.setProperty("metric_prometheus_reporter_username", username); + return this; + } + + @Override + public ConfigNodeConfig setMetricPrometheusReporterPassword(String password) { + properties.setProperty("metric_prometheus_reporter_password", password); + return this; + } } diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppDataNodeConfig.java b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppDataNodeConfig.java index 0f22e0d428699..01636b7bf0b95 100644 --- a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppDataNodeConfig.java +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppDataNodeConfig.java @@ -56,6 +56,18 @@ public DataNodeConfig setMetricReporterType(List metricReporterTypes) { return this; } + @Override + public DataNodeConfig setMetricPrometheusReporterUsername(String username) { + properties.setProperty("metric_prometheus_reporter_username", username); + return this; + } + + @Override + public DataNodeConfig setMetricPrometheusReporterPassword(String password) { + properties.setProperty("metric_prometheus_reporter_password", password); + return this; + } + @Override public DataNodeConfig setEnableRestService(boolean enableRestService) { properties.setProperty("enable_rest_service", String.valueOf(enableRestService)); @@ -76,4 +88,16 @@ public DataNodeConfig setLoadTsFileAnalyzeSchemaMemorySizeInBytes( String.valueOf(loadTsFileAnalyzeSchemaMemorySizeInBytes)); return this; } + + @Override + public DataNodeConfig setLoadLastCacheStrategy(String strategyName) { + setProperty("last_cache_operation_on_load", strategyName); + return this; + } + + @Override + public DataNodeConfig setCacheLastValuesForLoad(boolean cacheLastValuesForLoad) { + setProperty("cache_last_values_for_load", String.valueOf(cacheLastValuesForLoad)); + return this; + } } diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppSharedCommonConfig.java b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppSharedCommonConfig.java index 2d93a2e0dafc0..03516a2e07c62 100644 --- a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppSharedCommonConfig.java +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppSharedCommonConfig.java @@ -118,10 +118,9 @@ public CommonConfig setEnableCrossSpaceCompaction(boolean enableCrossSpaceCompac } @Override - public CommonConfig setMaxInnerCompactionCandidateFileNum( - int maxInnerCompactionCandidateFileNum) { - cnConfig.setMaxInnerCompactionCandidateFileNum(maxInnerCompactionCandidateFileNum); - dnConfig.setMaxInnerCompactionCandidateFileNum(maxInnerCompactionCandidateFileNum); + public CommonConfig setInnerCompactionCandidateFileNum(int maxInnerCompactionCandidateFileNum) { + cnConfig.setInnerCompactionCandidateFileNum(maxInnerCompactionCandidateFileNum); + dnConfig.setInnerCompactionCandidateFileNum(maxInnerCompactionCandidateFileNum); return this; } @@ -147,9 +146,9 @@ public CommonConfig setPrimitiveArraySize(int primitiveArraySize) { } @Override - public CommonConfig setAvgSeriesPointNumberThreshold(int avgSeriesPointNumberThreshold) { - cnConfig.setAvgSeriesPointNumberThreshold(avgSeriesPointNumberThreshold); - dnConfig.setAvgSeriesPointNumberThreshold(avgSeriesPointNumberThreshold); + public CommonConfig setTargetChunkPointNum(int targetChunkPointNum) { + cnConfig.setTargetChunkPointNum(targetChunkPointNum); + dnConfig.setTargetChunkPointNum(targetChunkPointNum); return this; } @@ -231,6 +230,13 @@ public CommonConfig setTimePartitionInterval(long timePartitionInterval) { return this; } + @Override + public CommonConfig setTTLCheckInterval(long ttlCheckInterval) { + cnConfig.setTTLCheckInterval(ttlCheckInterval); + dnConfig.setTTLCheckInterval(ttlCheckInterval); + return this; + } + @Override public CommonConfig setTimePartitionOrigin(long timePartitionOrigin) { cnConfig.setTimePartitionOrigin(timePartitionOrigin); @@ -355,6 +361,13 @@ public CommonConfig setSeriesSlotNum(int seriesSlotNum) { return this; } + @Override + public CommonConfig setSeriesPartitionExecutorClass(String seriesPartitionExecutorClass) { + cnConfig.setSeriesPartitionExecutorClass(seriesPartitionExecutorClass); + dnConfig.setSeriesPartitionExecutorClass(seriesPartitionExecutorClass); + return this; + } + @Override public CommonConfig setSchemaMemoryAllocate(String schemaMemoryAllocate) { dnConfig.setSchemaMemoryAllocate(schemaMemoryAllocate); @@ -425,6 +438,20 @@ public CommonConfig setSchemaRegionPerDataNode(double schemaRegionPerDataNode) { return this; } + @Override + public CommonConfig setPipeMemoryManagementEnabled(boolean pipeMemoryManagementEnabled) { + dnConfig.setPipeMemoryManagementEnabled(pipeMemoryManagementEnabled); + cnConfig.setPipeMemoryManagementEnabled(pipeMemoryManagementEnabled); + return this; + } + + @Override + public CommonConfig setIsPipeEnableMemoryCheck(boolean isPipeEnableMemoryCheck) { + dnConfig.setIsPipeEnableMemoryCheck(isPipeEnableMemoryCheck); + cnConfig.setIsPipeEnableMemoryCheck(isPipeEnableMemoryCheck); + return this; + } + @Override public CommonConfig setPipeAirGapReceiverEnabled(boolean isPipeAirGapReceiverEnabled) { dnConfig.setPipeAirGapReceiverEnabled(isPipeAirGapReceiverEnabled); @@ -454,9 +481,9 @@ public CommonConfig setTagAttributeTotalSize(int tagAttributeTotalSize) { } @Override - public CommonConfig setCnConnectionTimeoutMs(int connectionTimeoutMs) { - dnConfig.setCnConnectionTimeoutMs(connectionTimeoutMs); - cnConfig.setCnConnectionTimeoutMs(connectionTimeoutMs); + public CommonConfig setDnConnectionTimeoutMs(int connectionTimeoutMs) { + dnConfig.setDnConnectionTimeoutMs(connectionTimeoutMs); + cnConfig.setDnConnectionTimeoutMs(connectionTimeoutMs); return this; } @@ -484,4 +511,58 @@ public CommonConfig setPipeMetaSyncerSyncIntervalMinutes(long pipeMetaSyncerSync cnConfig.setPipeMetaSyncerSyncIntervalMinutes(pipeMetaSyncerSyncIntervalMinutes); return this; } + + @Override + public CommonConfig setPipeConnectorRequestSliceThresholdBytes( + int pipeConnectorRequestSliceThresholdBytes) { + dnConfig.setPipeConnectorRequestSliceThresholdBytes(pipeConnectorRequestSliceThresholdBytes); + cnConfig.setPipeConnectorRequestSliceThresholdBytes(pipeConnectorRequestSliceThresholdBytes); + return this; + } + + @Override + public CommonConfig setQueryMemoryProportion(String queryMemoryProportion) { + dnConfig.setQueryMemoryProportion(queryMemoryProportion); + cnConfig.setQueryMemoryProportion(queryMemoryProportion); + return this; + } + + @Override + public CommonConfig setSubscriptionPrefetchTsFileBatchMaxDelayInMs( + int subscriptionPrefetchTsFileBatchMaxDelayInMs) { + dnConfig.setSubscriptionPrefetchTsFileBatchMaxDelayInMs( + subscriptionPrefetchTsFileBatchMaxDelayInMs); + cnConfig.setSubscriptionPrefetchTsFileBatchMaxDelayInMs( + subscriptionPrefetchTsFileBatchMaxDelayInMs); + return this; + } + + @Override + public CommonConfig setSubscriptionPrefetchTsFileBatchMaxSizeInBytes( + int subscriptionPrefetchTsFileBatchMaxSizeInBytes) { + dnConfig.setSubscriptionPrefetchTsFileBatchMaxSizeInBytes( + subscriptionPrefetchTsFileBatchMaxSizeInBytes); + cnConfig.setSubscriptionPrefetchTsFileBatchMaxSizeInBytes( + subscriptionPrefetchTsFileBatchMaxSizeInBytes); + return this; + } + + public CommonConfig setSubscriptionEnabled(boolean subscriptionEnabled) { + dnConfig.setSubscriptionEnabled(subscriptionEnabled); + cnConfig.setSubscriptionEnabled(subscriptionEnabled); + return this; + } + + @Override + public CommonConfig setDefaultStorageGroupLevel(int defaultStorageGroupLevel) { + dnConfig.setDefaultStorageGroupLevel(defaultStorageGroupLevel); + cnConfig.setDefaultStorageGroupLevel(defaultStorageGroupLevel); + return this; + } + + public CommonConfig setDatanodeMemoryProportion(String datanodeMemoryProportion) { + dnConfig.setDatanodeMemoryProportion(datanodeMemoryProportion); + cnConfig.setDatanodeMemoryProportion(datanodeMemoryProportion); + return this; + } } diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/env/AIEnv.java b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/env/AIEnv.java new file mode 100644 index 0000000000000..916a2ee8cf6cf --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/env/AIEnv.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.it.env.cluster.env; + +public class AIEnv extends AbstractEnv { + @Override + public void initClusterEnvironment() { + initClusterEnvironment(1, 1); + } + + @Override + public void initClusterEnvironment(int configNodesNum, int dataNodesNum) { + super.initEnvironment(configNodesNum, dataNodesNum, 1000, true); + } + + @Override + public void initClusterEnvironment( + int configNodesNum, int dataNodesNum, int testWorkingRetryCount) { + super.initEnvironment(configNodesNum, dataNodesNum, testWorkingRetryCount, true); + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/env/AbstractEnv.java b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/env/AbstractEnv.java index 54fedec8de0f6..7358de706215a 100644 --- a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/env/AbstractEnv.java +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/env/AbstractEnv.java @@ -19,9 +19,7 @@ package org.apache.iotdb.it.env.cluster.env; -import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; -import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.commons.client.ClientPoolFactory; import org.apache.iotdb.commons.client.IClientManager; @@ -41,6 +39,7 @@ import org.apache.iotdb.it.env.EnvFactory; import org.apache.iotdb.it.env.cluster.EnvUtils; import org.apache.iotdb.it.env.cluster.config.*; +import org.apache.iotdb.it.env.cluster.node.AINodeWrapper; import org.apache.iotdb.it.env.cluster.node.AbstractNodeWrapper; import org.apache.iotdb.it.env.cluster.node.ConfigNodeWrapper; import org.apache.iotdb.it.env.cluster.node.DataNodeWrapper; @@ -65,6 +64,7 @@ import java.sql.Connection; import java.sql.DriverManager; import java.sql.SQLException; +import java.time.ZoneId; import java.util.*; import java.util.concurrent.TimeUnit; import java.util.function.Predicate; @@ -80,6 +80,7 @@ public abstract class AbstractEnv implements BaseEnv { private final Random rand = new Random(); protected List configNodeWrapperList = Collections.emptyList(); protected List dataNodeWrapperList = Collections.emptyList(); + protected List aiNodeWrapperList = Collections.emptyList(); protected String testMethodName = null; protected int index = 0; protected long startTime; @@ -101,7 +102,7 @@ protected AbstractEnv() { } // For multiple environment ITs, time must be consistent across environments. - protected AbstractEnv(long startTime) { + protected AbstractEnv(final long startTime) { this.startTime = startTime; this.clusterConfig = new MppClusterConfig(); } @@ -112,38 +113,49 @@ public ClusterConfig getConfig() { } @Override - public List getMetricPrometheusReporterContents() { - List result = new ArrayList<>(); + public List getMetricPrometheusReporterContents(String authHeader) { + final List result = new ArrayList<>(); // get all report content of confignodes - for (ConfigNodeWrapper configNode : this.configNodeWrapperList) { - String configNodeMetricContent = + for (final ConfigNodeWrapper configNode : this.configNodeWrapperList) { + final String configNodeMetricContent = getUrlContent( Config.IOTDB_HTTP_URL_PREFIX + configNode.getIp() + ":" + configNode.getMetricPort() - + "/metrics"); + + "/metrics", + authHeader); result.add(configNodeMetricContent); } // get all report content of datanodes - for (DataNodeWrapper dataNode : this.dataNodeWrapperList) { - String dataNodeMetricContent = + for (final DataNodeWrapper dataNode : this.dataNodeWrapperList) { + final String dataNodeMetricContent = getUrlContent( Config.IOTDB_HTTP_URL_PREFIX + dataNode.getIp() + ":" + dataNode.getMetricPort() - + "/metrics"); + + "/metrics", + authHeader); result.add(dataNodeMetricContent); } return result; } - protected void initEnvironment(int configNodesNum, int dataNodesNum) { + protected void initEnvironment(final int configNodesNum, final int dataNodesNum) { initEnvironment(configNodesNum, dataNodesNum, retryCount); } - protected void initEnvironment(int configNodesNum, int dataNodesNum, int retryCount) { + protected void initEnvironment( + final int configNodesNum, final int dataNodesNum, final int testWorkingRetryCount) { + initEnvironment(configNodesNum, dataNodesNum, testWorkingRetryCount, false); + } + + protected void initEnvironment( + final int configNodesNum, + final int dataNodesNum, + final int retryCount, + final boolean addAINode) { this.retryCount = retryCount; this.configNodeWrapperList = new ArrayList<>(); this.dataNodeWrapperList = new ArrayList<>(); @@ -154,7 +166,7 @@ protected void initEnvironment(int configNodesNum, int dataNodesNum, int retryCo final String testClassName = getTestClassName(); - ConfigNodeWrapper seedConfigNodeWrapper = + final ConfigNodeWrapper seedConfigNodeWrapper = new ConfigNodeWrapper( true, "", @@ -172,22 +184,23 @@ protected void initEnvironment(int configNodesNum, int dataNodesNum, int retryCo seedConfigNodeWrapper.createLogDir(); seedConfigNodeWrapper.setKillPoints(configNodeKillPoints); seedConfigNodeWrapper.start(); - String seedConfigNode = seedConfigNodeWrapper.getIpAndPortString(); + final String seedConfigNode = seedConfigNodeWrapper.getIpAndPortString(); this.configNodeWrapperList.add(seedConfigNodeWrapper); // Check if the Seed-ConfigNode started successfully - try (SyncConfigNodeIServiceClient ignored = + try (final SyncConfigNodeIServiceClient ignored = (SyncConfigNodeIServiceClient) getLeaderConfigNodeConnection()) { // Do nothing logger.info("The Seed-ConfigNode started successfully!"); - } catch (Exception e) { + } catch (final Exception e) { logger.error("Failed to get connection to the Seed-ConfigNode", e); } - List configNodeEndpoints = new ArrayList<>(); - RequestDelegate configNodesDelegate = new SerialRequestDelegate<>(configNodeEndpoints); + final List configNodeEndpoints = new ArrayList<>(); + final RequestDelegate configNodesDelegate = + new SerialRequestDelegate<>(configNodeEndpoints); for (int i = 1; i < configNodesNum; i++) { - ConfigNodeWrapper configNodeWrapper = + final ConfigNodeWrapper configNodeWrapper = new ConfigNodeWrapper( false, seedConfigNode, @@ -214,16 +227,16 @@ protected void initEnvironment(int configNodesNum, int dataNodesNum, int retryCo } try { configNodesDelegate.requestAll(); - } catch (SQLException e) { + } catch (final SQLException e) { logger.error("Start configNodes failed", e); throw new AssertionError(); } - List dataNodeEndpoints = new ArrayList<>(); - RequestDelegate dataNodesDelegate = + final List dataNodeEndpoints = new ArrayList<>(); + final RequestDelegate dataNodesDelegate = new ParallelRequestDelegate<>(dataNodeEndpoints, NODE_START_TIMEOUT); for (int i = 0; i < dataNodesNum; i++) { - DataNodeWrapper dataNodeWrapper = + final DataNodeWrapper dataNodeWrapper = new DataNodeWrapper( seedConfigNode, testClassName, @@ -250,20 +263,56 @@ protected void initEnvironment(int configNodesNum, int dataNodesNum, int retryCo try { dataNodesDelegate.requestAll(); - } catch (SQLException e) { + } catch (final SQLException e) { logger.error("Start dataNodes failed", e); throw new AssertionError(); } + if (addAINode) { + this.aiNodeWrapperList = new ArrayList<>(); + startAINode(seedConfigNode, testClassName); + } + checkClusterStatusWithoutUnknown(); } + private void startAINode(final String seedConfigNode, final String testClassName) { + final String aiNodeEndPoint; + final AINodeWrapper aiNodeWrapper = + new AINodeWrapper( + seedConfigNode, + testClassName, + testMethodName, + index, + EnvUtils.searchAvailablePorts(), + startTime); + aiNodeWrapperList.add(aiNodeWrapper); + aiNodeEndPoint = aiNodeWrapper.getIpAndPortString(); + aiNodeWrapper.createNodeDir(); + aiNodeWrapper.createLogDir(); + final RequestDelegate aiNodesDelegate = + new ParallelRequestDelegate<>( + Collections.singletonList(aiNodeEndPoint), NODE_START_TIMEOUT); + + aiNodesDelegate.addRequest( + () -> { + aiNodeWrapper.start(); + return null; + }); + + try { + aiNodesDelegate.requestAll(); + } catch (final SQLException e) { + logger.error("Start aiNodes failed", e); + } + } + public String getTestClassName() { - StackTraceElement[] stack = Thread.currentThread().getStackTrace(); - for (StackTraceElement stackTraceElement : stack) { - String className = stackTraceElement.getClassName(); + final StackTraceElement[] stack = Thread.currentThread().getStackTrace(); + for (final StackTraceElement stackTraceElement : stack) { + final String className = stackTraceElement.getClassName(); if (className.endsWith("IT")) { - String result = className.substring(className.lastIndexOf(".") + 1); + final String result = className.substring(className.lastIndexOf(".") + 1); if (!result.startsWith("Abstract")) { return result; } @@ -272,12 +321,16 @@ public String getTestClassName() { return "UNKNOWN-IT"; } - private Map countNodeStatus(Map nodeStatus) { - Map result = new HashMap<>(); + private Map countNodeStatus(final Map nodeStatus) { + final Map result = new HashMap<>(); nodeStatus.values().forEach(status -> result.put(status, result.getOrDefault(status, 0) + 1)); return result; } + public void checkNodeInStatus(int nodeId, NodeStatus expectation) { + checkClusterStatus(nodeStatusMap -> expectation.getStatus().equals(nodeStatusMap.get(nodeId))); + } + public void checkClusterStatusWithoutUnknown() { checkClusterStatus( nodeStatusMap -> nodeStatusMap.values().stream().noneMatch("Unknown"::equals)); @@ -300,13 +353,13 @@ public void checkClusterStatusOneUnknownOtherRunning() { * * @param statusCheck the predicate to test the status of nodes */ - public void checkClusterStatus(Predicate> statusCheck) { + public void checkClusterStatus(final Predicate> statusCheck) { logger.info("Testing cluster environment..."); TShowClusterResp showClusterResp; Exception lastException = null; boolean flag; for (int i = 0; i < retryCount; i++) { - try (SyncConfigNodeIServiceClient client = + try (final SyncConfigNodeIServiceClient client = (SyncConfigNodeIServiceClient) getLeaderConfigNodeConnection()) { flag = true; showClusterResp = client.showCluster(); @@ -318,26 +371,27 @@ public void checkClusterStatus(Predicate> statusCheck) { // Check the number of nodes if (showClusterResp.getNodeStatus().size() - != configNodeWrapperList.size() + dataNodeWrapperList.size()) { + != configNodeWrapperList.size() + + dataNodeWrapperList.size() + + aiNodeWrapperList.size()) { flag = false; } // Check the status of nodes if (flag) { - Map nodeStatus = showClusterResp.getNodeStatus(); - flag = statusCheck.test(nodeStatus); + flag = statusCheck.test(showClusterResp.getNodeStatus()); } if (flag) { logger.info("The cluster is now ready for testing!"); return; } - } catch (Exception e) { + } catch (final Exception e) { lastException = e; } try { TimeUnit.SECONDS.sleep(1L); - } catch (InterruptedException e) { + } catch (final InterruptedException e) { lastException = e; Thread.currentThread().interrupt(); } @@ -354,17 +408,19 @@ public void checkClusterStatus(Predicate> statusCheck) { @Override public void cleanClusterEnvironment() { - List allNodeWrappers = - Stream.concat(this.dataNodeWrapperList.stream(), this.configNodeWrapperList.stream()) + final List allNodeWrappers = + Stream.concat( + dataNodeWrapperList.stream(), + Stream.concat(configNodeWrapperList.stream(), aiNodeWrapperList.stream())) .collect(Collectors.toList()); allNodeWrappers.stream() .findAny() .ifPresent( nodeWrapper -> logger.info("You can find logs at {}", nodeWrapper.getLogDirPath())); - for (AbstractNodeWrapper nodeWrapper : allNodeWrappers) { + for (final AbstractNodeWrapper nodeWrapper : allNodeWrappers) { nodeWrapper.stopForcibly(); nodeWrapper.destroyDir(); - String lockPath = EnvUtils.getLockFilePath(nodeWrapper.getPort()); + final String lockPath = EnvUtils.getLockFilePath(nodeWrapper.getPort()); if (!new File(lockPath).delete()) { logger.error("Delete lock file {} failed", lockPath); } @@ -382,9 +438,19 @@ public Connection getConnection(String username, String password) throws SQLExce getWriteConnection(null, username, password), getReadConnections(null, username, password)); } + @Override + public Connection getConnection( + final DataNodeWrapper dataNodeWrapper, final String username, final String password) + throws SQLException { + return new ClusterTestConnection( + getWriteConnectionWithSpecifiedDataNode(dataNodeWrapper, null, username, password), + getReadConnections(null, dataNodeWrapper, username, password)); + } + @Override public Connection getWriteOnlyConnectionWithSpecifiedDataNode( - DataNodeWrapper dataNode, String username, String password) throws SQLException { + final DataNodeWrapper dataNode, final String username, final String password) + throws SQLException { return new ClusterTestConnection( getWriteConnectionWithSpecifiedDataNode(dataNode, null, username, password), Collections.emptyList()); @@ -392,7 +458,8 @@ public Connection getWriteOnlyConnectionWithSpecifiedDataNode( @Override public Connection getConnectionWithSpecifiedDataNode( - DataNodeWrapper dataNode, String username, String password) throws SQLException { + final DataNodeWrapper dataNode, final String username, final String password) + throws SQLException { return new ClusterTestConnection( getWriteConnectionWithSpecifiedDataNode(dataNode, null, username, password), getReadConnections(null, username, password)); @@ -419,10 +486,25 @@ public ISession getSessionConnection() throws IoTDBConnectionException { return session; } + @Override + public ISession getSessionConnection(ZoneId zoneId) throws IoTDBConnectionException { + DataNodeWrapper dataNode = + this.dataNodeWrapperList.get(rand.nextInt(this.dataNodeWrapperList.size())); + Session session = + new Session( + dataNode.getIp(), + dataNode.getPort(), + SessionConfig.DEFAULT_USER, + SessionConfig.DEFAULT_PASSWORD, + zoneId); + session.open(); + return session; + } + @Override public ISession getSessionConnection(String userName, String password) throws IoTDBConnectionException { - DataNodeWrapper dataNode = + final DataNodeWrapper dataNode = this.dataNodeWrapperList.get(rand.nextInt(this.dataNodeWrapperList.size())); Session session = new Session(dataNode.getIp(), dataNode.getPort(), userName, password); session.open(); @@ -476,8 +558,8 @@ protected NodeConnection getWriteConnection( protected NodeConnection getWriteConnectionWithSpecifiedDataNode( DataNodeWrapper dataNode, Constant.Version version, String username, String password) throws SQLException { - String endpoint = dataNode.getIp() + ":" + dataNode.getPort(); - Connection writeConnection = + final String endpoint = dataNode.getIp() + ":" + dataNode.getPort(); + final Connection writeConnection = DriverManager.getConnection( Config.IOTDB_URL_PREFIX + endpoint @@ -497,10 +579,10 @@ protected NodeConnection getWriteConnectionFromDataNodeList( String username, String password) throws SQLException { - List dataNodeWrapperListCopy = new ArrayList<>(dataNodeList); + final List dataNodeWrapperListCopy = new ArrayList<>(dataNodeList); Collections.shuffle(dataNodeWrapperListCopy); SQLException lastException = null; - for (DataNodeWrapper dataNode : dataNodeWrapperListCopy) { + for (final DataNodeWrapper dataNode : dataNodeWrapperListCopy) { try { return getWriteConnectionWithSpecifiedDataNode(dataNode, version, username, password); } catch (SQLException e) { @@ -538,6 +620,36 @@ protected List getReadConnections( return readConnRequestDelegate.requestAll(); } + protected List getReadConnections( + final Constant.Version version, + final DataNodeWrapper dataNode, + final String username, + final String password) + throws SQLException { + final List endpoints = new ArrayList<>(); + final ParallelRequestDelegate readConnRequestDelegate = + new ParallelRequestDelegate<>(endpoints, NODE_START_TIMEOUT); + + endpoints.add(dataNode.getIpAndPortString()); + readConnRequestDelegate.addRequest( + () -> { + Connection readConnection = + DriverManager.getConnection( + Config.IOTDB_URL_PREFIX + + dataNode.getIpAndPortString() + + getParam(version, NODE_NETWORK_TIMEOUT_MS, ZERO_TIME_ZONE), + System.getProperty("User", username), + System.getProperty("Password", password)); + return new NodeConnection( + dataNode.getIpAndPortString(), + NodeConnection.NodeRole.DATA_NODE, + NodeConnection.ConnectionRole.READ, + readConnection); + }); + + return readConnRequestDelegate.requestAll(); + } + // use this to avoid some runtimeExceptions when try to get jdbc connections. // because it is hard to add retry and handle exception when getting jdbc connections in // getWriteConnectionWithSpecifiedDataNode and getReadConnections. @@ -546,19 +658,19 @@ protected List getReadConnections( // AssertionError. protected void testJDBCConnection() { logger.info("Testing JDBC connection..."); - List endpoints = + final List endpoints = dataNodeWrapperList.stream() .map(DataNodeWrapper::getIpAndPortString) .collect(Collectors.toList()); - RequestDelegate testDelegate = + final RequestDelegate testDelegate = new ParallelRequestDelegate<>(endpoints, NODE_START_TIMEOUT); - for (DataNodeWrapper dataNode : dataNodeWrapperList) { + for (final DataNodeWrapper dataNode : dataNodeWrapperList) { final String dataNodeEndpoint = dataNode.getIpAndPortString(); testDelegate.addRequest( () -> { Exception lastException = null; for (int i = 0; i < retryCount; i++) { - try (IoTDBConnection ignored = + try (final IoTDBConnection ignored = (IoTDBConnection) DriverManager.getConnection( Config.IOTDB_URL_PREFIX @@ -568,7 +680,7 @@ protected void testJDBCConnection() { System.getProperty("Password", "root"))) { logger.info("Successfully connecting to DataNode: {}.", dataNodeEndpoint); return null; - } catch (Exception e) { + } catch (final Exception e) { lastException = e; TimeUnit.SECONDS.sleep(1L); } @@ -581,15 +693,16 @@ protected void testJDBCConnection() { } try { testDelegate.requestAll(); - } catch (Exception e) { + } catch (final Exception e) { logger.error("exception in test Cluster with RPC, message: {}", e.getMessage(), e); throw new AssertionError( String.format("After %d times retry, the cluster can't work!", retryCount)); } } - private String getParam(Constant.Version version, int timeout, String timeZone) { - StringBuilder sb = new StringBuilder("?"); + private String getParam( + final Constant.Version version, final int timeout, final String timeZone) { + final StringBuilder sb = new StringBuilder("?"); sb.append(Config.NETWORK_TIMEOUT).append("=").append(timeout); if (version != null) { sb.append("&").append(VERSION).append("=").append(version); @@ -605,23 +718,20 @@ public String getTestMethodName() { } @Override - public void setTestMethodName(String testMethodName) { + public void setTestMethodName(final String testMethodName) { this.testMethodName = testMethodName; } @Override public void dumpTestJVMSnapshot() { - for (ConfigNodeWrapper configNodeWrapper : configNodeWrapperList) { - configNodeWrapper.executeJstack(testMethodName); - } - for (DataNodeWrapper dataNodeWrapper : dataNodeWrapperList) { - dataNodeWrapper.executeJstack(testMethodName); - } + configNodeWrapperList.forEach( + configNodeWrapper -> configNodeWrapper.executeJstack(testMethodName)); + dataNodeWrapperList.forEach(dataNodeWrapper -> dataNodeWrapper.executeJstack(testMethodName)); } @Override public List getNodeWrapperList() { - List result = new ArrayList<>(configNodeWrapperList); + final List result = new ArrayList<>(configNodeWrapperList); result.addAll(dataNodeWrapperList); return result; } @@ -650,13 +760,13 @@ public IConfigNodeRPCService.Iface getLeaderConfigNodeConnection() Exception lastException = null; ConfigNodeWrapper lastErrorNode = null; for (int i = 0; i < retryCount; i++) { - for (ConfigNodeWrapper configNodeWrapper : configNodeWrapperList) { + for (final ConfigNodeWrapper configNodeWrapper : configNodeWrapperList) { try { lastErrorNode = configNodeWrapper; - SyncConfigNodeIServiceClient client = + final SyncConfigNodeIServiceClient client = clientManager.borrowClient( new TEndPoint(configNodeWrapper.getIp(), configNodeWrapper.getPort())); - TShowClusterResp resp = client.showCluster(); + final TShowClusterResp resp = client.showCluster(); if (resp.getStatus().getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { // Only the ConfigNodeClient who connects to the ConfigNode-leader @@ -671,7 +781,7 @@ public IConfigNodeRPCService.Iface getLeaderConfigNodeConnection() + " message: " + resp.getStatus().getMessage()); } - } catch (Exception e) { + } catch (final Exception e) { lastException = e; } @@ -692,12 +802,12 @@ public IConfigNodeRPCService.Iface getLeaderConfigNodeConnection() @Override public IConfigNodeRPCService.Iface getConfigNodeConnection(int index) throws Exception { Exception lastException = null; - ConfigNodeWrapper configNodeWrapper = configNodeWrapperList.get(index); + final ConfigNodeWrapper configNodeWrapper = configNodeWrapperList.get(index); for (int i = 0; i < 30; i++) { try { return clientManager.borrowClient( new TEndPoint(configNodeWrapper.getIp(), configNodeWrapper.getPort())); - } catch (Exception e) { + } catch (final Exception e) { lastException = e; } // Sleep 1s before next retry @@ -713,9 +823,9 @@ public int getFirstLeaderSchemaRegionDataNodeIndex() throws IOException, Interru ConfigNodeWrapper lastErrorNode = null; for (int retry = 0; retry < 30; retry++) { for (int configNodeId = 0; configNodeId < configNodeWrapperList.size(); configNodeId++) { - ConfigNodeWrapper configNodeWrapper = configNodeWrapperList.get(configNodeId); + final ConfigNodeWrapper configNodeWrapper = configNodeWrapperList.get(configNodeId); lastErrorNode = configNodeWrapper; - try (SyncConfigNodeIServiceClient client = + try (final SyncConfigNodeIServiceClient client = clientManager.borrowClient( new TEndPoint(configNodeWrapper.getIp(), configNodeWrapper.getPort()))) { TShowRegionResp resp = @@ -728,12 +838,12 @@ public int getFirstLeaderSchemaRegionDataNodeIndex() throws IOException, Interru int port; if (resp.getStatus().getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - for (TRegionInfo tRegionInfo : resp.getRegionInfoList()) { + for (final TRegionInfo tRegionInfo : resp.getRegionInfoList()) { if (tRegionInfo.getRoleType().equals("Leader")) { ip = tRegionInfo.getClientRpcIp(); port = tRegionInfo.getClientRpcPort(); for (int dataNodeId = 0; dataNodeId < dataNodeWrapperList.size(); ++dataNodeId) { - DataNodeWrapper dataNodeWrapper = dataNodeWrapperList.get(dataNodeId); + final DataNodeWrapper dataNodeWrapper = dataNodeWrapperList.get(dataNodeId); if (dataNodeWrapper.getIp().equals(ip) && dataNodeWrapper.getPort() == port) { return dataNodeId; } @@ -749,7 +859,7 @@ public int getFirstLeaderSchemaRegionDataNodeIndex() throws IOException, Interru + " message: " + resp.getStatus().getMessage()); } - } catch (Exception e) { + } catch (final Exception e) { lastException = e; } @@ -773,12 +883,12 @@ public int getLeaderConfigNodeIndex() throws IOException, InterruptedException { ConfigNodeWrapper lastErrorNode = null; for (int retry = 0; retry < retryCount; retry++) { for (int configNodeId = 0; configNodeId < configNodeWrapperList.size(); configNodeId++) { - ConfigNodeWrapper configNodeWrapper = configNodeWrapperList.get(configNodeId); + final ConfigNodeWrapper configNodeWrapper = configNodeWrapperList.get(configNodeId); lastErrorNode = configNodeWrapper; - try (SyncConfigNodeIServiceClient client = + try (final SyncConfigNodeIServiceClient client = clientManager.borrowClient( new TEndPoint(configNodeWrapper.getIp(), configNodeWrapper.getPort()))) { - TShowClusterResp resp = client.showCluster(); + final TShowClusterResp resp = client.showCluster(); // Only the ConfigNodeClient who connects to the ConfigNode-leader // will respond the SUCCESS_STATUS if (resp.getStatus().getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { @@ -790,7 +900,7 @@ public int getLeaderConfigNodeIndex() throws IOException, InterruptedException { + " message: " + resp.getStatus().getMessage()); } - } catch (Exception e) { + } catch (final Exception e) { lastException = e; } @@ -810,15 +920,13 @@ public int getLeaderConfigNodeIndex() throws IOException, InterruptedException { } @Override - public void startConfigNode(int index) { + public void startConfigNode(final int index) { configNodeWrapperList.get(index).start(); } @Override public void startAllConfigNodes() { - for (ConfigNodeWrapper configNodeWrapper : configNodeWrapperList) { - configNodeWrapper.start(); - } + configNodeWrapperList.forEach(AbstractNodeWrapper::start); } @Override @@ -828,24 +936,27 @@ public void shutdownConfigNode(int index) { @Override public void shutdownAllConfigNodes() { - for (ConfigNodeWrapper configNodeWrapper : configNodeWrapperList) { - configNodeWrapper.stop(); - } + configNodeWrapperList.forEach(AbstractNodeWrapper::stop); + } + + @Override + public void shutdownForciblyAllConfigNodes() { + configNodeWrapperList.forEach(AbstractNodeWrapper::stopForcibly); } @Override - public ConfigNodeWrapper getConfigNodeWrapper(int index) { + public ConfigNodeWrapper getConfigNodeWrapper(final int index) { return configNodeWrapperList.get(index); } @Override - public DataNodeWrapper getDataNodeWrapper(int index) { + public DataNodeWrapper getDataNodeWrapper(final int index) { return dataNodeWrapperList.get(index); } @Override public ConfigNodeWrapper generateRandomConfigNodeWrapper() { - ConfigNodeWrapper newConfigNodeWrapper = + final ConfigNodeWrapper newConfigNodeWrapper = new ConfigNodeWrapper( false, configNodeWrapperList.get(0).getIpAndPortString(), @@ -867,7 +978,7 @@ public ConfigNodeWrapper generateRandomConfigNodeWrapper() { @Override public DataNodeWrapper generateRandomDataNodeWrapper() { - DataNodeWrapper newDataNodeWrapper = + final DataNodeWrapper newDataNodeWrapper = new DataNodeWrapper( configNodeWrapperList.get(0).getIpAndPortString(), getTestClassName(), @@ -887,19 +998,20 @@ public DataNodeWrapper generateRandomDataNodeWrapper() { } @Override - public void registerNewDataNode(boolean isNeedVerify) { + public void registerNewDataNode(final boolean isNeedVerify) { registerNewDataNode(generateRandomDataNodeWrapper(), isNeedVerify); } @Override - public void registerNewConfigNode(boolean isNeedVerify) { + public void registerNewConfigNode(final boolean isNeedVerify) { registerNewConfigNode(generateRandomConfigNodeWrapper(), isNeedVerify); } @Override - public void registerNewConfigNode(ConfigNodeWrapper newConfigNodeWrapper, boolean isNeedVerify) { + public void registerNewConfigNode( + final ConfigNodeWrapper newConfigNodeWrapper, final boolean isNeedVerify) { // Start new ConfigNode - RequestDelegate configNodeDelegate = + final RequestDelegate configNodeDelegate = new ParallelRequestDelegate<>( Collections.singletonList(newConfigNodeWrapper.getIpAndPortString()), NODE_START_TIMEOUT); @@ -911,7 +1023,7 @@ public void registerNewConfigNode(ConfigNodeWrapper newConfigNodeWrapper, boolea try { configNodeDelegate.requestAll(); - } catch (SQLException e) { + } catch (final SQLException e) { logger.error("Start configNode failed", e); throw new AssertionError(); } @@ -923,11 +1035,12 @@ public void registerNewConfigNode(ConfigNodeWrapper newConfigNodeWrapper, boolea } @Override - public void registerNewDataNode(DataNodeWrapper newDataNodeWrapper, boolean isNeedVerify) { + public void registerNewDataNode( + final DataNodeWrapper newDataNodeWrapper, final boolean isNeedVerify) { // Start new DataNode - List dataNodeEndpoints = + final List dataNodeEndpoints = Collections.singletonList(newDataNodeWrapper.getIpAndPortString()); - RequestDelegate dataNodesDelegate = + final RequestDelegate dataNodesDelegate = new ParallelRequestDelegate<>(dataNodeEndpoints, NODE_START_TIMEOUT); dataNodesDelegate.addRequest( () -> { @@ -936,7 +1049,7 @@ public void registerNewDataNode(DataNodeWrapper newDataNodeWrapper, boolean isNe }); try { dataNodesDelegate.requestAll(); - } catch (SQLException e) { + } catch (final SQLException e) { logger.error("Start dataNodes failed", e); throw new AssertionError(); } @@ -948,58 +1061,68 @@ public void registerNewDataNode(DataNodeWrapper newDataNodeWrapper, boolean isNe } @Override - public void startDataNode(int index) { + public void startDataNode(final int index) { dataNodeWrapperList.get(index).start(); } @Override public void startAllDataNodes() { - for (DataNodeWrapper dataNodeWrapper : dataNodeWrapperList) { - dataNodeWrapper.start(); - } + dataNodeWrapperList.forEach(AbstractNodeWrapper::start); } @Override - public void shutdownDataNode(int index) { + public void shutdownDataNode(final int index) { dataNodeWrapperList.get(index).stop(); } @Override public void shutdownAllDataNodes() { - for (DataNodeWrapper dataNodeWrapper : dataNodeWrapperList) { - dataNodeWrapper.stop(); - } + dataNodeWrapperList.forEach(AbstractNodeWrapper::stop); + } + + @Override + public void shutdownForciblyAllDataNodes() { + dataNodeWrapperList.forEach(AbstractNodeWrapper::stopForcibly); } @Override - public void ensureNodeStatus(List nodes, List targetStatus) + public void ensureNodeStatus( + final List nodes, final List targetStatus) throws IllegalStateException { Throwable lastException = null; for (int i = 0; i < retryCount; i++) { - try (SyncConfigNodeIServiceClient client = + try (final SyncConfigNodeIServiceClient client = (SyncConfigNodeIServiceClient) EnvFactory.getEnv().getLeaderConfigNodeConnection()) { - List errorMessages = new ArrayList<>(nodes.size()); - Map nodeIds = new HashMap<>(nodes.size()); - TShowClusterResp showClusterResp = client.showCluster(); - for (TConfigNodeLocation node : showClusterResp.getConfigNodeList()) { - nodeIds.put( - node.getInternalEndPoint().getIp() + ":" + node.getInternalEndPoint().getPort(), - node.getConfigNodeId()); - } - for (TDataNodeLocation node : showClusterResp.getDataNodeList()) { - nodeIds.put( - node.getClientRpcEndPoint().getIp() + ":" + node.getClientRpcEndPoint().getPort(), - node.getDataNodeId()); - } + final List errorMessages = new ArrayList<>(nodes.size()); + final Map nodeIds = new HashMap<>(nodes.size()); + final TShowClusterResp showClusterResp = client.showCluster(); + showClusterResp + .getConfigNodeList() + .forEach( + node -> + nodeIds.put( + node.getInternalEndPoint().getIp() + + ":" + + node.getInternalEndPoint().getPort(), + node.getConfigNodeId())); + showClusterResp + .getDataNodeList() + .forEach( + node -> + nodeIds.put( + node.getClientRpcEndPoint().getIp() + + ":" + + node.getClientRpcEndPoint().getPort(), + node.getDataNodeId())); for (int j = 0; j < nodes.size(); j++) { - String endpoint = nodes.get(j).getIpAndPortString(); + final String endpoint = nodes.get(j).getIpAndPortString(); if (!nodeIds.containsKey(endpoint)) { // Node not exist // Notice: Never modify this line, since the NodeLocation might be modified in IT errorMessages.add("The node " + nodes.get(j).getIpAndPortString() + " is not found!"); continue; } - String status = showClusterResp.getNodeStatus().get(nodeIds.get(endpoint)); + final String status = showClusterResp.getNodeStatus().get(nodeIds.get(endpoint)); if (!targetStatus.get(j).getStatus().equals(status)) { // Error status errorMessages.add( @@ -1013,12 +1136,12 @@ public void ensureNodeStatus(List nodes, List targe } else { lastException = new IllegalStateException(String.join(". ", errorMessages)); } - } catch (TException | ClientManagerException | IOException | InterruptedException e) { + } catch (final TException | ClientManagerException | IOException | InterruptedException e) { lastException = e; } try { TimeUnit.SECONDS.sleep(1); - } catch (InterruptedException e) { + } catch (final InterruptedException e) { throw new RuntimeException(e); } } @@ -1027,8 +1150,9 @@ public void ensureNodeStatus(List nodes, List targe @Override public int getMqttPort() { - int randomIndex = new Random(System.currentTimeMillis()).nextInt(dataNodeWrapperList.size()); - return dataNodeWrapperList.get(randomIndex).getMqttPort(); + return dataNodeWrapperList + .get(new Random(System.currentTimeMillis()).nextInt(dataNodeWrapperList.size())) + .getMqttPort(); } @Override @@ -1057,11 +1181,11 @@ public String getLibPath() { } @Override - public Optional dataNodeIdToWrapper(int nodeId) { - try (SyncConfigNodeIServiceClient leaderClient = + public Optional dataNodeIdToWrapper(final int nodeId) { + try (final SyncConfigNodeIServiceClient leaderClient = (SyncConfigNodeIServiceClient) getLeaderConfigNodeConnection()) { - TShowDataNodesResp resp = leaderClient.showDataNodes(); - for (TDataNodeInfo dataNodeInfo : resp.getDataNodesInfoList()) { + final TShowDataNodesResp resp = leaderClient.showDataNodes(); + for (final TDataNodeInfo dataNodeInfo : resp.getDataNodesInfoList()) { if (dataNodeInfo.getDataNodeId() == nodeId) { return dataNodeWrapperList.stream() .filter(dataNodeWrapper -> dataNodeWrapper.getPort() == dataNodeInfo.getRpcPort()) @@ -1069,18 +1193,18 @@ public Optional dataNodeIdToWrapper(int nodeId) { } } return Optional.empty(); - } catch (Exception e) { + } catch (final Exception e) { return Optional.empty(); } } @Override - public void registerConfigNodeKillPoints(List killPoints) { + public void registerConfigNodeKillPoints(final List killPoints) { this.configNodeKillPoints = killPoints; } @Override - public void registerDataNodeKillPoints(List killPoints) { + public void registerDataNodeKillPoints(final List killPoints) { this.dataNodeKillPoints = killPoints; } } diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/env/MultiClusterEnv.java b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/env/MultiClusterEnv.java index 1eb05cb315f41..d462b5a668b3b 100644 --- a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/env/MultiClusterEnv.java +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/env/MultiClusterEnv.java @@ -25,7 +25,7 @@ public class MultiClusterEnv extends AbstractEnv { - public MultiClusterEnv(long startTime, int index, String currentMethodName) { + public MultiClusterEnv(final long startTime, final int index, final String currentMethodName) { super(startTime); this.index = index; this.testMethodName = currentMethodName; @@ -33,18 +33,18 @@ public MultiClusterEnv(long startTime, int index, String currentMethodName) { @Override public void initClusterEnvironment() { - Pair nodeNum = EnvUtils.getNodeNum(index); + final Pair nodeNum = EnvUtils.getNodeNum(index); super.initEnvironment(nodeNum.getLeft(), nodeNum.getRight()); } @Override - public void initClusterEnvironment(int configNodesNum, int dataNodesNum) { + public void initClusterEnvironment(final int configNodesNum, final int dataNodesNum) { super.initEnvironment(configNodesNum, dataNodesNum); } @Override public void initClusterEnvironment( - int configNodesNum, int dataNodesNum, int testWorkingRetryCount) { + final int configNodesNum, final int dataNodesNum, final int testWorkingRetryCount) { super.initEnvironment(configNodesNum, dataNodesNum, testWorkingRetryCount); } } diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/node/AINodeWrapper.java b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/node/AINodeWrapper.java new file mode 100644 index 0000000000000..8da2437aed687 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/node/AINodeWrapper.java @@ -0,0 +1,184 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.it.env.cluster.node; + +import org.apache.iotdb.it.env.cluster.config.MppJVMConfig; +import org.apache.iotdb.it.framework.IoTDBTestLogger; + +import org.slf4j.Logger; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.iotdb.it.env.cluster.ClusterConstant.AI_NODE_NAME; +import static org.apache.iotdb.it.env.cluster.ClusterConstant.PYTHON_PATH; +import static org.apache.iotdb.it.env.cluster.ClusterConstant.TARGET; +import static org.apache.iotdb.it.env.cluster.ClusterConstant.USER_DIR; +import static org.apache.iotdb.it.env.cluster.EnvUtils.getTimeForLogDirectory; + +public class AINodeWrapper extends AbstractNodeWrapper { + + private static final Logger logger = IoTDBTestLogger.logger; + private final long startTime; + private final String seedConfigNode; + + private static final String SCRIPT_FILE = "start-ainode.sh"; + + private static final String SHELL_COMMAND = "bash"; + + private static final String PROPERTIES_FILE = "iotdb-ainode.properties"; + public static final String CONFIG_PATH = "conf"; + public static final String SCRIPT_PATH = "sbin"; + + private void replaceAttribute(String[] keys, String[] values, String filePath) { + try (BufferedWriter writer = new BufferedWriter(new FileWriter(filePath, true))) { + for (int i = 0; i < keys.length; i++) { + String line = keys[i] + "=" + values[i]; + writer.newLine(); + writer.write(line); + } + } catch (IOException e) { + logger.error( + "Failed to set attribute for AINode in file: {} because {}", filePath, e.getMessage()); + } + } + + public AINodeWrapper( + String seedConfigNode, + String testClassName, + String testMethodName, + int clusterIndex, + int[] port, + long startTime) { + super(testClassName, testMethodName, port, clusterIndex, false, startTime); + this.seedConfigNode = seedConfigNode; + this.startTime = startTime; + } + + @Override + public String getLogDirPath() { + return System.getProperty(USER_DIR) + + File.separator + + TARGET + + File.separator + + "ainode-logs" + + File.separator + + getTestLogDirName() + + File.separator + + getTimeForLogDirectory(startTime); + } + + @Override + String getNodeType() { + return "ainode"; + } + + @Override + public void start() { + try { + File stdoutFile = new File(getLogPath()); + String filePrefix = + System.getProperty(USER_DIR) + + File.separator + + TARGET + + File.separator + + AI_NODE_NAME + + getPort(); + String propertiesFile = + filePrefix + File.separator + CONFIG_PATH + File.separator + PROPERTIES_FILE; + + // set attribute + replaceAttribute( + new String[] {"ain_seed_config_node", "ain_inference_rpc_port"}, + new String[] {this.seedConfigNode, Integer.toString(getPort())}, + propertiesFile); + + // start AINode + List startCommand = new ArrayList<>(); + startCommand.add(SHELL_COMMAND); + startCommand.add(filePrefix + File.separator + SCRIPT_PATH + File.separator + SCRIPT_FILE); + startCommand.add("-i"); + startCommand.add(filePrefix + File.separator + PYTHON_PATH); + startCommand.add("-r"); + + ProcessBuilder processBuilder = + new ProcessBuilder(startCommand) + .redirectOutput(ProcessBuilder.Redirect.appendTo(stdoutFile)) + .redirectError(ProcessBuilder.Redirect.appendTo(stdoutFile)); + this.instance = processBuilder.start(); + logger.info("In test {} {} started.", getTestLogDirName(), getId()); + } catch (Exception e) { + throw new AssertionError("Start AI Node failed. " + e + Paths.get("")); + } + } + + @Override + public int getMetricPort() { + // no metric currently + return -1; + } + + @Override + public String getId() { + return AI_NODE_NAME + getPort(); + } + + /* Abstract methods, which must be implemented in ConfigNode and DataNode. */ + public void reloadMutableFields() {} + ; + + public void renameFile() {} + ; + + public String getSystemConfigPath() { + return ""; + } + ; + + /** Return the node config file path specified through system variable */ + public String getDefaultNodeConfigPath() { + return ""; + } + ; + + /** Return the common config file path specified through system variable */ + public String getDefaultCommonConfigPath() { + return ""; + } + ; + + public void addStartCmdParams(List params) {} + ; + + public String getSystemPropertiesPath() { + return ""; + } + ; + + public MppJVMConfig initVMConfig() { + return null; + } + ; +} diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/node/AbstractNodeWrapper.java b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/node/AbstractNodeWrapper.java index 942b6a9e01242..f1e25f9e1d663 100644 --- a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/node/AbstractNodeWrapper.java +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/node/AbstractNodeWrapper.java @@ -125,11 +125,11 @@ public abstract class AbstractNodeWrapper implements BaseNodeWrapper { protected final MppJVMConfig jvmConfig; protected final int clusterIndex; protected final boolean isMultiCluster; - private Process instance; + protected Process instance; private final String nodeAddress; private int nodePort; - private int metricPort; - private long startTime; + private final int metricPort; + private final long startTime; private List killPoints = new ArrayList<>(); /** @@ -523,7 +523,7 @@ public final int getPort() { } @Override - public final int getMetricPort() { + public int getMetricPort() { return this.metricPort; } @@ -540,7 +540,7 @@ protected String workDirFilePath(String dirName, String fileName) { return getNodePath() + File.separator + dirName + File.separator + fileName; } - private String getLogPath() { + protected String getLogPath() { return getLogDirPath() + File.separator + getId() + ".log"; } @@ -570,6 +570,18 @@ public String getNodePath() { return System.getProperty(USER_DIR) + File.separator + TARGET + File.separator + getId(); } + public String getDataPath() { + return getNodePath() + + File.separator + + IoTDBConstant.DATA_FOLDER_NAME + + File.separator + + getNodeType() + + File.separator + + IoTDBConstant.DATA_FOLDER_NAME; + } + + abstract String getNodeType(); + public void dumpJVMSnapshot(String testCaseName) { JMXServiceURL url; try { @@ -645,11 +657,11 @@ private void dumpThread(PrintWriter output, ThreadInfo ti) { output.print(sb); } - private String getTestLogDirName() { + protected String getTestLogDirName() { if (testMethodName == null) { return testClassName; } - return testClassName + "_" + testMethodName; + return testClassName + File.separator + testMethodName; } public void setKillPoints(List killPoints) { diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/node/ConfigNodeWrapper.java b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/node/ConfigNodeWrapper.java index a93c54c52b82a..aafb3641fc1d7 100644 --- a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/node/ConfigNodeWrapper.java +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/node/ConfigNodeWrapper.java @@ -38,6 +38,7 @@ import static org.apache.iotdb.it.env.cluster.ClusterConstant.CONFIG_NODE_INIT_HEAP_SIZE; import static org.apache.iotdb.it.env.cluster.ClusterConstant.CONFIG_NODE_MAX_DIRECT_MEMORY_SIZE; import static org.apache.iotdb.it.env.cluster.ClusterConstant.CONFIG_NODE_MAX_HEAP_SIZE; +import static org.apache.iotdb.it.env.cluster.ClusterConstant.CONFIG_NODE_RATIS_LOG_APPENDER_BUFFER_SIZE_MAX; import static org.apache.iotdb.it.env.cluster.ClusterConstant.DATA_REGION_CONSENSUS_PROTOCOL_CLASS; import static org.apache.iotdb.it.env.cluster.ClusterConstant.DATA_REPLICATION_FACTOR; import static org.apache.iotdb.it.env.cluster.ClusterConstant.DEFAULT_CONFIG_NODE_COMMON_PROPERTIES; @@ -56,23 +57,17 @@ public class ConfigNodeWrapper extends AbstractNodeWrapper { private final String defaultCommonPropertiesFile; public ConfigNodeWrapper( - boolean isSeed, - String targetCNs, - String testClassName, - String testMethodName, - int[] portList, - int clusterIndex, - boolean isMultiCluster, - long startTime) { + final boolean isSeed, + final String targetCNs, + final String testClassName, + final String testMethodName, + final int[] portList, + final int clusterIndex, + final boolean isMultiCluster, + final long startTime) { super(testClassName, testMethodName, portList, clusterIndex, isMultiCluster, startTime); this.consensusPort = portList[1]; this.isSeed = isSeed; - String seedConfigNodes; - if (isSeed) { - seedConfigNodes = getIpAndPortString(); - } else { - seedConfigNodes = targetCNs; - } this.defaultNodePropertiesFile = EnvUtils.getFilePathFromSysVar(DEFAULT_CONFIG_NODE_PROPERTIES, clusterIndex); this.defaultCommonPropertiesFile = @@ -82,7 +77,8 @@ public ConfigNodeWrapper( reloadMutableFields(); // initialize immutable properties - immutableNodeProperties.setProperty(IoTDBConstant.CN_SEED_CONFIG_NODE, seedConfigNodes); + immutableNodeProperties.setProperty( + IoTDBConstant.CN_SEED_CONFIG_NODE, isSeed ? getIpAndPortString() : targetCNs); immutableNodeProperties.setProperty(CN_SYSTEM_DIR, MppBaseConfig.NULL_VALUE); immutableNodeProperties.setProperty(CN_CONSENSUS_DIR, MppBaseConfig.NULL_VALUE); immutableNodeProperties.setProperty(CN_METRIC_IOTDB_REPORTER_HOST, MppBaseConfig.NULL_VALUE); @@ -128,7 +124,7 @@ public final String getId() { } @Override - protected void addStartCmdParams(List params) { + protected void addStartCmdParams(final List params) { final String workDir = getNodePath(); final String confDir = workDir + File.separator + "conf"; params.addAll( @@ -143,6 +139,11 @@ protected void addStartCmdParams(List params) { "-s")); } + @Override + String getNodeType() { + return "confignode"; + } + @Override protected void reloadMutableFields() { mutableCommonProperties.setProperty(CONFIG_NODE_CONSENSUS_PROTOCOL_CLASS, SIMPLE_CONSENSUS); @@ -160,18 +161,19 @@ protected void reloadMutableFields() { IoTDBConstant.CN_CONSENSUS_PORT, String.valueOf(this.consensusPort)); mutableNodeProperties.setProperty( IoTDBConstant.CN_METRIC_PROMETHEUS_REPORTER_PORT, String.valueOf(super.getMetricPort())); + mutableNodeProperties.setProperty(CONFIG_NODE_RATIS_LOG_APPENDER_BUFFER_SIZE_MAX, "8388608"); } @Override protected void renameFile() { - String configNodeName = isSeed ? "SeedConfigNode" : "ConfigNode"; + final String configNodeName = isSeed ? "SeedConfigNode" : "ConfigNode"; // rename log file - File oldLogFile = + final File oldLogFile = new File(getLogDirPath() + File.separator + configNodeName + portList[0] + ".log"); oldLogFile.renameTo(new File(getLogDirPath() + File.separator + getId() + ".log")); // rename node dir - File oldNodeDir = + final File oldNodeDir = new File( System.getProperty(USER_DIR) + File.separator @@ -182,7 +184,7 @@ protected void renameFile() { oldNodeDir.renameTo(new File(getNodePath())); } - public void setConsensusPort(int consensusPort) { + public void setConsensusPort(final int consensusPort) { this.consensusPort = consensusPort; } diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/node/DataNodeWrapper.java b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/node/DataNodeWrapper.java index 647bd9b9fa003..3df21cb42a697 100644 --- a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/node/DataNodeWrapper.java +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/node/DataNodeWrapper.java @@ -36,6 +36,7 @@ import static org.apache.iotdb.it.env.cluster.ClusterConstant.DATANODE_MAX_HEAP_SIZE; import static org.apache.iotdb.it.env.cluster.ClusterConstant.DATA_NODE_NAME; import static org.apache.iotdb.it.env.cluster.ClusterConstant.DATA_REGION_CONSENSUS_PROTOCOL_CLASS; +import static org.apache.iotdb.it.env.cluster.ClusterConstant.DATA_REGION_RATIS_LOG_APPENDER_BUFFER_SIZE_MAX; import static org.apache.iotdb.it.env.cluster.ClusterConstant.DATA_REPLICATION_FACTOR; import static org.apache.iotdb.it.env.cluster.ClusterConstant.DEFAULT_DATA_NODE_COMMON_PROPERTIES; import static org.apache.iotdb.it.env.cluster.ClusterConstant.DEFAULT_DATA_NODE_PROPERTIES; @@ -61,9 +62,11 @@ import static org.apache.iotdb.it.env.cluster.ClusterConstant.PIPE_AIR_GAP_RECEIVER_PORT; import static org.apache.iotdb.it.env.cluster.ClusterConstant.REST_SERVICE_PORT; import static org.apache.iotdb.it.env.cluster.ClusterConstant.SCHEMA_REGION_CONSENSUS_PROTOCOL_CLASS; +import static org.apache.iotdb.it.env.cluster.ClusterConstant.SCHEMA_REGION_RATIS_LOG_APPENDER_BUFFER_SIZE_MAX; import static org.apache.iotdb.it.env.cluster.ClusterConstant.SCHEMA_REPLICATION_FACTOR; import static org.apache.iotdb.it.env.cluster.ClusterConstant.TARGET; import static org.apache.iotdb.it.env.cluster.ClusterConstant.USER_DIR; +import static org.apache.iotdb.it.env.cluster.ClusterConstant.WAL_BUFFER_SIZE_IN_BYTE; public class DataNodeWrapper extends AbstractNodeWrapper { private int mppDataExchangePort; @@ -80,13 +83,13 @@ public class DataNodeWrapper extends AbstractNodeWrapper { private final String defaultCommonPropertiesFile; public DataNodeWrapper( - String seedConfigNode, - String testClassName, - String testMethodName, - int[] portList, - int clusterIndex, - boolean isMultiCluster, - long startTime) { + final String seedConfigNode, + final String testClassName, + final String testMethodName, + final int[] portList, + final int clusterIndex, + final boolean isMultiCluster, + final long startTime) { super(testClassName, testMethodName, portList, clusterIndex, isMultiCluster, startTime); this.internalAddress = super.getIp(); this.mppDataExchangePort = portList[1]; @@ -158,7 +161,7 @@ public final String getId() { } @Override - protected void addStartCmdParams(List params) { + protected void addStartCmdParams(final List params) { final String workDir = getNodePath(); final String confDir = workDir + File.separator + "conf"; params.addAll( @@ -172,6 +175,11 @@ protected void addStartCmdParams(List params) { "-s")); } + @Override + String getNodeType() { + return "datanode"; + } + @Override protected void reloadMutableFields() { mutableCommonProperties.setProperty(CONFIG_NODE_CONSENSUS_PROTOCOL_CLASS, SIMPLE_CONSENSUS); @@ -203,27 +211,30 @@ protected void reloadMutableFields() { DN_DATA_REGION_CONSENSUS_PORT, String.valueOf(this.dataRegionConsensusPort)); mutableNodeProperties.setProperty( DN_SCHEMA_REGION_CONSENSUS_PORT, String.valueOf(this.schemaRegionConsensusPort)); + mutableNodeProperties.setProperty(WAL_BUFFER_SIZE_IN_BYTE, "16777216"); + mutableNodeProperties.setProperty(SCHEMA_REGION_RATIS_LOG_APPENDER_BUFFER_SIZE_MAX, "8388608"); + mutableNodeProperties.setProperty(DATA_REGION_RATIS_LOG_APPENDER_BUFFER_SIZE_MAX, "8388608"); } @Override public void renameFile() { // Rename log file - String oldLogFilePath = + final String oldLogFilePath = getLogDirPath() + File.separator + DATA_NODE_NAME + portList[0] + ".log"; - String newLogFilePath = getLogDirPath() + File.separator + getId() + ".log"; - File oldLogFile = new File(oldLogFilePath); + final String newLogFilePath = getLogDirPath() + File.separator + getId() + ".log"; + final File oldLogFile = new File(oldLogFilePath); oldLogFile.renameTo(new File(newLogFilePath)); // Rename node dir - String oldNodeDirPath = + final String oldNodeDirPath = System.getProperty(USER_DIR) + File.separator + TARGET + File.separator + DATA_NODE_NAME + portList[0]; - String newNodeDirPath = getNodePath(); - File oldNodeDir = new File(oldNodeDirPath); + final String newNodeDirPath = getNodePath(); + final File oldNodeDir = new File(oldNodeDirPath); oldNodeDir.renameTo(new File(newNodeDirPath)); } @@ -231,7 +242,7 @@ public int getMppDataExchangePort() { return mppDataExchangePort; } - public void setMppDataExchangePort(int mppDataExchangePort) { + public void setMppDataExchangePort(final int mppDataExchangePort) { this.mppDataExchangePort = mppDataExchangePort; } @@ -243,7 +254,7 @@ public int getInternalPort() { return internalPort; } - public void setInternalPort(int internalPort) { + public void setInternalPort(final int internalPort) { this.internalPort = internalPort; } diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/remote/config/RemoteCommonConfig.java b/integration-test/src/main/java/org/apache/iotdb/it/env/remote/config/RemoteCommonConfig.java index dd93479ecf434..a79a56852f16d 100644 --- a/integration-test/src/main/java/org/apache/iotdb/it/env/remote/config/RemoteCommonConfig.java +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/remote/config/RemoteCommonConfig.java @@ -85,8 +85,7 @@ public CommonConfig setEnableCrossSpaceCompaction(boolean enableCrossSpaceCompac } @Override - public CommonConfig setMaxInnerCompactionCandidateFileNum( - int maxInnerCompactionCandidateFileNum) { + public CommonConfig setInnerCompactionCandidateFileNum(int maxInnerCompactionCandidateFileNum) { return this; } @@ -106,7 +105,7 @@ public CommonConfig setPrimitiveArraySize(int primitiveArraySize) { } @Override - public CommonConfig setAvgSeriesPointNumberThreshold(int avgSeriesPointNumberThreshold) { + public CommonConfig setTargetChunkPointNum(int targetChunkPointNum) { return this; } @@ -166,6 +165,11 @@ public CommonConfig setTimePartitionInterval(long timePartitionInterval) { return this; } + @Override + public CommonConfig setTTLCheckInterval(long ttlCheckInterval) { + return this; + } + @Override public CommonConfig setTimePartitionOrigin(long timePartitionOrigin) { return this; @@ -250,6 +254,11 @@ public CommonConfig setSeriesSlotNum(int seriesSlotNum) { return this; } + @Override + public CommonConfig setSeriesPartitionExecutorClass(String seriesPartitionExecutorClass) { + return this; + } + @Override public CommonConfig setSchemaMemoryAllocate(String schemaMemoryAllocate) { return this; @@ -299,6 +308,16 @@ public CommonConfig setSchemaRegionPerDataNode(double schemaRegionPerDataNode) { return this; } + @Override + public CommonConfig setPipeMemoryManagementEnabled(boolean pipeMemoryManagementEnabled) { + return this; + } + + @Override + public CommonConfig setIsPipeEnableMemoryCheck(boolean isPipeEnableMemoryCheck) { + return this; + } + @Override public CommonConfig setPipeAirGapReceiverEnabled(boolean isPipeAirGapReceiverEnabled) { return this; @@ -320,7 +339,7 @@ public CommonConfig setTagAttributeTotalSize(int tagAttributeTotalSize) { } @Override - public CommonConfig setCnConnectionTimeoutMs(int connectionTimeoutMs) { + public CommonConfig setDnConnectionTimeoutMs(int connectionTimeoutMs) { return this; } @@ -340,4 +359,36 @@ public CommonConfig setPipeMetaSyncerInitialSyncDelayMinutes( public CommonConfig setPipeMetaSyncerSyncIntervalMinutes(long pipeMetaSyncerSyncIntervalMinutes) { return this; } + + @Override + public CommonConfig setPipeConnectorRequestSliceThresholdBytes( + int pipeConnectorRequestSliceThresholdBytes) { + return this; + } + + @Override + public CommonConfig setQueryMemoryProportion(String queryMemoryProportion) { + return this; + } + + @Override + public CommonConfig setSubscriptionPrefetchTsFileBatchMaxDelayInMs( + int subscriptionPrefetchTsFileBatchMaxDelayInMs) { + return this; + } + + @Override + public CommonConfig setSubscriptionPrefetchTsFileBatchMaxSizeInBytes( + int subscriptionPrefetchTsFileBatchMaxSizeInBytes) { + return this; + } + + public CommonConfig setSubscriptionEnabled(boolean subscriptionEnabled) { + return this; + } + + @Override + public CommonConfig setDatanodeMemoryProportion(String datanodeMemoryProportion) { + return this; + } } diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/remote/config/RemoteConfigNodeConfig.java b/integration-test/src/main/java/org/apache/iotdb/it/env/remote/config/RemoteConfigNodeConfig.java index 33a6bc48afd72..ae8645eff524e 100644 --- a/integration-test/src/main/java/org/apache/iotdb/it/env/remote/config/RemoteConfigNodeConfig.java +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/remote/config/RemoteConfigNodeConfig.java @@ -28,4 +28,14 @@ public class RemoteConfigNodeConfig implements ConfigNodeConfig { public ConfigNodeConfig setMetricReporterType(List metricReporterTypes) { return this; } + + @Override + public ConfigNodeConfig setMetricPrometheusReporterUsername(String username) { + return this; + } + + @Override + public ConfigNodeConfig setMetricPrometheusReporterPassword(String password) { + return this; + } } diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/remote/config/RemoteDataNodeConfig.java b/integration-test/src/main/java/org/apache/iotdb/it/env/remote/config/RemoteDataNodeConfig.java index fe89997bc41f8..80d9d98d23df0 100644 --- a/integration-test/src/main/java/org/apache/iotdb/it/env/remote/config/RemoteDataNodeConfig.java +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/remote/config/RemoteDataNodeConfig.java @@ -28,6 +28,16 @@ public DataNodeConfig setMetricReporterType(List metricReporterTypes) { return this; } + @Override + public DataNodeConfig setMetricPrometheusReporterUsername(String username) { + return this; + } + + @Override + public DataNodeConfig setMetricPrometheusReporterPassword(String password) { + return this; + } + @Override public DataNodeConfig setEnableRestService(boolean enableRestService) { return this; @@ -43,4 +53,14 @@ public DataNodeConfig setLoadTsFileAnalyzeSchemaMemorySizeInBytes( long loadTsFileAnalyzeSchemaMemorySizeInBytes) { return this; } + + @Override + public DataNodeConfig setLoadLastCacheStrategy(String strategyName) { + return this; + } + + @Override + public DataNodeConfig setCacheLastValuesForLoad(boolean cacheLastValuesForLoad) { + return this; + } } diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/remote/env/RemoteServerEnv.java b/integration-test/src/main/java/org/apache/iotdb/it/env/remote/env/RemoteServerEnv.java index b8b29544520dc..efd883e41a9d3 100644 --- a/integration-test/src/main/java/org/apache/iotdb/it/env/remote/env/RemoteServerEnv.java +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/remote/env/RemoteServerEnv.java @@ -47,6 +47,7 @@ import java.sql.DriverManager; import java.sql.SQLException; import java.sql.Statement; +import java.time.ZoneId; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -72,7 +73,7 @@ public void initClusterEnvironment() { try (Connection connection = EnvFactory.getEnv().getConnection(); Statement statement = connection.createStatement()) { statement.execute("CREATE DATABASE root.init;"); - statement.execute("DELETE DATABASE root;"); + statement.execute("DELETE DATABASE root.init;"); } catch (Exception e) { e.printStackTrace(); throw new AssertionError(e.getMessage()); @@ -107,14 +108,16 @@ public ClusterConfig getConfig() { } @Override - public List getMetricPrometheusReporterContents() { + public List getMetricPrometheusReporterContents(String authHeader) { List result = new ArrayList<>(); result.add( getUrlContent( - Config.IOTDB_HTTP_URL_PREFIX + ip_addr + ":" + configNodeMetricPort + "/metrics")); + Config.IOTDB_HTTP_URL_PREFIX + ip_addr + ":" + configNodeMetricPort + "/metrics", + authHeader)); result.add( getUrlContent( - Config.IOTDB_HTTP_URL_PREFIX + ip_addr + ":" + dataNodeMetricPort + "/metrics")); + Config.IOTDB_HTTP_URL_PREFIX + ip_addr + ":" + dataNodeMetricPort + "/metrics", + authHeader)); return result; } @@ -170,6 +173,12 @@ public Connection getConnection(Constant.Version version, String username, Strin return connection; } + @Override + public Connection getConnection(DataNodeWrapper dataNodeWrapper, String username, String password) + throws SQLException { + throw new UnsupportedOperationException(); + } + public void setTestMethodName(String testCaseName) { // Do nothing } @@ -225,6 +234,19 @@ public ISession getSessionConnection() throws IoTDBConnectionException { return session; } + @Override + public ISession getSessionConnection(ZoneId zoneId) throws IoTDBConnectionException { + Session session = + new Session( + ip_addr, + Integer.parseInt(port), + SessionConfig.DEFAULT_USER, + SessionConfig.DEFAULT_PASSWORD, + zoneId); + session.open(); + return session; + } + public ISession getSessionConnection(String userName, String password) throws IoTDBConnectionException { Session session = new Session(ip_addr, Integer.parseInt(port), userName, password); @@ -279,6 +301,11 @@ public void shutdownAllConfigNodes() { throw new UnsupportedOperationException(); } + @Override + public void shutdownForciblyAllConfigNodes() { + throw new UnsupportedOperationException(); + } + @Override public void ensureNodeStatus(List nodes, List targetStatus) { throw new UnsupportedOperationException(); @@ -344,6 +371,11 @@ public void shutdownAllDataNodes() { throw new UnsupportedOperationException(); } + @Override + public void shutdownForciblyAllDataNodes() { + throw new UnsupportedOperationException(); + } + @Override public int getMqttPort() { throw new UnsupportedOperationException(); diff --git a/integration-test/src/main/java/org/apache/iotdb/it/utils/TsFileGenerator.java b/integration-test/src/main/java/org/apache/iotdb/it/utils/TsFileGenerator.java index d30c0c36d5048..e71d788fc013c 100644 --- a/integration-test/src/main/java/org/apache/iotdb/it/utils/TsFileGenerator.java +++ b/integration-test/src/main/java/org/apache/iotdb/it/utils/TsFileGenerator.java @@ -19,6 +19,7 @@ package org.apache.iotdb.it.utils; +import org.apache.iotdb.commons.conf.IoTDBConstant; import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.db.storageengine.dataregion.modification.Deletion; @@ -248,6 +249,36 @@ private void generateTEXT(final Object obj, final int row) { new Binary(String.format("test point %d", random.nextInt()), TSFileConfig.STRING_CHARSET); } + public void generateDeletion(final String device) throws IOException, IllegalPathException { + try (final ModificationFile modificationFile = + new ModificationFile(tsFile.getAbsolutePath() + ModificationFile.FILE_SUFFIX)) { + modificationFile.write( + new Deletion( + new PartialPath( + device + TsFileConstant.PATH_SEPARATOR + IoTDBConstant.ONE_LEVEL_PATH_WILDCARD), + tsFile.length(), + Long.MIN_VALUE, + Long.MAX_VALUE)); + device2TimeSet.remove(device); + device2MeasurementSchema.remove(device); + } + } + + public void generateDeletion(final String device, final MeasurementSchema measurement) + throws IOException, IllegalPathException { + try (final ModificationFile modificationFile = + new ModificationFile(tsFile.getAbsolutePath() + ModificationFile.FILE_SUFFIX)) { + modificationFile.write( + new Deletion( + new PartialPath( + device + TsFileConstant.PATH_SEPARATOR + measurement.getMeasurementId()), + tsFile.length(), + Long.MIN_VALUE, + Long.MAX_VALUE)); + device2MeasurementSchema.get(device).remove(measurement); + } + } + public void generateDeletion(final String device, final int number) throws IOException, IllegalPathException { try (final ModificationFile modificationFile = diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/category/MultiClusterIT2Subscription.java b/integration-test/src/main/java/org/apache/iotdb/itbase/category/AIClusterIT.java similarity index 94% rename from integration-test/src/main/java/org/apache/iotdb/itbase/category/MultiClusterIT2Subscription.java rename to integration-test/src/main/java/org/apache/iotdb/itbase/category/AIClusterIT.java index 34d870e4d6d1d..ab3c458cd79c1 100644 --- a/integration-test/src/main/java/org/apache/iotdb/itbase/category/MultiClusterIT2Subscription.java +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/category/AIClusterIT.java @@ -19,4 +19,4 @@ package org.apache.iotdb.itbase.category; -public interface MultiClusterIT2Subscription {} +public interface AIClusterIT {} diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/category/MultiClusterIT2SubscriptionArchVerification.java b/integration-test/src/main/java/org/apache/iotdb/itbase/category/MultiClusterIT2SubscriptionArchVerification.java new file mode 100644 index 0000000000000..3f0c5afa50da6 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/category/MultiClusterIT2SubscriptionArchVerification.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.itbase.category; + +public interface MultiClusterIT2SubscriptionArchVerification {} diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/category/MultiClusterIT2SubscriptionRegressionConsumer.java b/integration-test/src/main/java/org/apache/iotdb/itbase/category/MultiClusterIT2SubscriptionRegressionConsumer.java new file mode 100644 index 0000000000000..d2d63318ef7a6 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/category/MultiClusterIT2SubscriptionRegressionConsumer.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.itbase.category; + +public interface MultiClusterIT2SubscriptionRegressionConsumer {} diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/category/MultiClusterIT2SubscriptionRegressionMisc.java b/integration-test/src/main/java/org/apache/iotdb/itbase/category/MultiClusterIT2SubscriptionRegressionMisc.java new file mode 100644 index 0000000000000..30f1ef217f2ff --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/category/MultiClusterIT2SubscriptionRegressionMisc.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.itbase.category; + +public interface MultiClusterIT2SubscriptionRegressionMisc {} diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/env/BaseEnv.java b/integration-test/src/main/java/org/apache/iotdb/itbase/env/BaseEnv.java index 3b9b27de8517c..c3dc9a3eb3222 100644 --- a/integration-test/src/main/java/org/apache/iotdb/itbase/env/BaseEnv.java +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/env/BaseEnv.java @@ -31,6 +31,8 @@ import org.apache.iotdb.jdbc.Constant; import org.apache.iotdb.rpc.IoTDBConnectionException; +import reactor.util.annotation.Nullable; + import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; @@ -39,6 +41,7 @@ import java.net.URL; import java.sql.Connection; import java.sql.SQLException; +import java.time.ZoneId; import java.util.List; import java.util.Optional; @@ -70,11 +73,14 @@ public interface BaseEnv { /** Return the {@link ClusterConfig} for developers to set values before test. */ ClusterConfig getConfig(); - default String getUrlContent(String urlStr) { + default String getUrlContent(String urlStr, @Nullable String authHeader) { StringBuilder sb = new StringBuilder(); try { URL url = new URL(urlStr); HttpURLConnection httpConnection = (HttpURLConnection) url.openConnection(); + if (authHeader != null) { + httpConnection.setRequestProperty("Authorization", authHeader); + } if (httpConnection.getResponseCode() == HttpURLConnection.HTTP_OK) { InputStream in = httpConnection.getInputStream(); InputStreamReader isr = new InputStreamReader(in); @@ -96,7 +102,7 @@ default String getUrlContent(String urlStr) { } /** Return the content of prometheus */ - List getMetricPrometheusReporterContents(); + List getMetricPrometheusReporterContents(String authHeader); default Connection getConnection() throws SQLException { return getConnection(SessionConfig.DEFAULT_USER, SessionConfig.DEFAULT_PASSWORD); @@ -111,6 +117,9 @@ Connection getConnection(Constant.Version version, String username, String passw Connection getConnection(String username, String password) throws SQLException; + Connection getConnection(DataNodeWrapper dataNodeWrapper, String username, String password) + throws SQLException; + default Connection getWriteOnlyConnectionWithSpecifiedDataNode(DataNodeWrapper dataNode) throws SQLException { return getWriteOnlyConnectionWithSpecifiedDataNode( @@ -147,6 +156,8 @@ IConfigNodeRPCService.Iface getLeaderConfigNodeConnection() ISession getSessionConnection() throws IoTDBConnectionException; + ISession getSessionConnection(ZoneId zoneId) throws IoTDBConnectionException; + ISession getSessionConnection(String userName, String password) throws IoTDBConnectionException; ISession getSessionConnection(List nodeUrls) throws IoTDBConnectionException; @@ -181,6 +192,8 @@ default IConfigNodeRPCService.Iface getConfigNodeConnection(int index) throws Ex /** Shutdown all existed ConfigNodes. */ void shutdownAllConfigNodes(); + void shutdownForciblyAllConfigNodes(); + /** * Ensure all the nodes being in the corresponding status. * @@ -245,6 +258,9 @@ void ensureNodeStatus(List nodes, List targetStatus /** Shutdown all existed DataNodes. */ void shutdownAllDataNodes(); + /** Shutdown forcibly all existed DataNodes. */ + void shutdownForciblyAllDataNodes(); + int getMqttPort(); String getIP(); diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/env/CommonConfig.java b/integration-test/src/main/java/org/apache/iotdb/itbase/env/CommonConfig.java index 840b9effde401..7c9a2d00521fa 100644 --- a/integration-test/src/main/java/org/apache/iotdb/itbase/env/CommonConfig.java +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/env/CommonConfig.java @@ -48,7 +48,7 @@ public interface CommonConfig { CommonConfig setEnableCrossSpaceCompaction(boolean enableCrossSpaceCompaction); - CommonConfig setMaxInnerCompactionCandidateFileNum(int maxInnerCompactionCandidateFileNum); + CommonConfig setInnerCompactionCandidateFileNum(int maxInnerCompactionCandidateFileNum); CommonConfig setAutoCreateSchemaEnabled(boolean enableAutoCreateSchema); @@ -56,7 +56,7 @@ public interface CommonConfig { CommonConfig setPrimitiveArraySize(int primitiveArraySize); - CommonConfig setAvgSeriesPointNumberThreshold(int avgSeriesPointNumberThreshold); + CommonConfig setTargetChunkPointNum(int targetChunkPointNum); CommonConfig setMaxTsBlockLineNumber(int maxTsBlockLineNumber); @@ -80,6 +80,8 @@ public interface CommonConfig { CommonConfig setTimePartitionInterval(long timePartitionInterval); + CommonConfig setTTLCheckInterval(long ttlCheckInterval); + CommonConfig setTimePartitionOrigin(long timePartitionOrigin); CommonConfig setTimestampPrecision(String timestampPrecision); @@ -114,6 +116,8 @@ CommonConfig setEnableAutoLeaderBalanceForIoTConsensus( CommonConfig setSeriesSlotNum(int seriesSlotNum); + CommonConfig setSeriesPartitionExecutorClass(String seriesPartitionExecutorClass); + CommonConfig setSchemaMemoryAllocate(String schemaMemoryAllocate); CommonConfig setWriteMemoryProportion(String writeMemoryProportion); @@ -134,6 +138,10 @@ CommonConfig setEnableAutoLeaderBalanceForIoTConsensus( CommonConfig setSchemaRegionPerDataNode(double schemaRegionPerDataNode); + CommonConfig setPipeMemoryManagementEnabled(boolean pipeMemoryManagementEnabled); + + CommonConfig setIsPipeEnableMemoryCheck(boolean isPipeEnableMemoryCheck); + CommonConfig setPipeAirGapReceiverEnabled(boolean isPipeAirGapReceiverEnabled); CommonConfig setDriverTaskExecutionTimeSliceInMs(long driverTaskExecutionTimeSliceInMs); @@ -142,7 +150,7 @@ CommonConfig setEnableAutoLeaderBalanceForIoTConsensus( CommonConfig setTagAttributeTotalSize(int tagAttributeTotalSize); - CommonConfig setCnConnectionTimeoutMs(int connectionTimeoutMs); + CommonConfig setDnConnectionTimeoutMs(int connectionTimeoutMs); CommonConfig setPipeHeartbeatIntervalSecondsForCollectingPipeMeta( int pipeHeartbeatIntervalSecondsForCollectingPipeMeta); @@ -150,4 +158,23 @@ CommonConfig setPipeHeartbeatIntervalSecondsForCollectingPipeMeta( CommonConfig setPipeMetaSyncerInitialSyncDelayMinutes(long pipeMetaSyncerInitialSyncDelayMinutes); CommonConfig setPipeMetaSyncerSyncIntervalMinutes(long pipeMetaSyncerSyncIntervalMinutes); + + CommonConfig setPipeConnectorRequestSliceThresholdBytes( + int pipeConnectorRequestSliceThresholdBytes); + + CommonConfig setQueryMemoryProportion(String queryMemoryProportion); + + CommonConfig setSubscriptionPrefetchTsFileBatchMaxDelayInMs( + int subscriptionPrefetchTsFileBatchMaxDelayInMs); + + CommonConfig setSubscriptionPrefetchTsFileBatchMaxSizeInBytes( + int subscriptionPrefetchTsFileBatchMaxSizeInBytes); + + CommonConfig setSubscriptionEnabled(boolean subscriptionEnabled); + + default CommonConfig setDefaultStorageGroupLevel(int defaultStorageGroupLevel) { + return this; + } + + CommonConfig setDatanodeMemoryProportion(String datanodeMemoryProportion); } diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/env/ConfigNodeConfig.java b/integration-test/src/main/java/org/apache/iotdb/itbase/env/ConfigNodeConfig.java index bf7179ef70289..65a5a3271fc19 100644 --- a/integration-test/src/main/java/org/apache/iotdb/itbase/env/ConfigNodeConfig.java +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/env/ConfigNodeConfig.java @@ -23,5 +23,10 @@ /** This interface is used to handle properties in iotdb-confignode.properties. */ public interface ConfigNodeConfig { + ConfigNodeConfig setMetricReporterType(List metricReporterTypes); + + ConfigNodeConfig setMetricPrometheusReporterUsername(String username); + + ConfigNodeConfig setMetricPrometheusReporterPassword(String password); } diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/env/DataNodeConfig.java b/integration-test/src/main/java/org/apache/iotdb/itbase/env/DataNodeConfig.java index 2887b0a987189..c6112a0e639c1 100644 --- a/integration-test/src/main/java/org/apache/iotdb/itbase/env/DataNodeConfig.java +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/env/DataNodeConfig.java @@ -25,10 +25,18 @@ public interface DataNodeConfig { DataNodeConfig setMetricReporterType(List metricReporterTypes); + DataNodeConfig setMetricPrometheusReporterUsername(String username); + + DataNodeConfig setMetricPrometheusReporterPassword(String password); + DataNodeConfig setEnableRestService(boolean enableRestService); DataNodeConfig setConnectionTimeoutInMS(int connectionTimeoutInMS); DataNodeConfig setLoadTsFileAnalyzeSchemaMemorySizeInBytes( long loadTsFileAnalyzeSchemaMemorySizeInBytes); + + DataNodeConfig setLoadLastCacheStrategy(String strategyName); + + DataNodeConfig setCacheLastValuesForLoad(boolean cacheLastValuesForLoad); } diff --git a/integration-test/src/test/java/org/apache/iotdb/ainode/it/AINodeBasicIT.java b/integration-test/src/test/java/org/apache/iotdb/ainode/it/AINodeBasicIT.java new file mode 100644 index 0000000000000..07d29c0d224ed --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/ainode/it/AINodeBasicIT.java @@ -0,0 +1,254 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.ainode.it; + +import org.apache.iotdb.it.env.EnvFactory; +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.AIClusterIT; + +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.File; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; + +import static org.apache.iotdb.db.it.utils.TestUtils.prepareData; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +@RunWith(IoTDBTestRunner.class) +@Category({AIClusterIT.class}) +public class AINodeBasicIT { + static final String MODEL_PATH = + System.getProperty("user.dir") + + File.separator + + "src" + + File.separator + + "test" + + File.separator + + "resources" + + File.separator + + "ainode-example"; + + static String[] sqls = + new String[] { + "set configuration \"trusted_uri_pattern\"='.*'", + "create model identity using uri \"" + MODEL_PATH + "\"", + "CREATE DATABASE root.AI.data", + "CREATE TIMESERIES root.AI.data.s0 WITH DATATYPE=FLOAT, ENCODING=RLE", + "CREATE TIMESERIES root.AI.data.s1 WITH DATATYPE=FLOAT, ENCODING=RLE", + "CREATE TIMESERIES root.AI.data.s2 WITH DATATYPE=FLOAT, ENCODING=RLE", + "CREATE TIMESERIES root.AI.data.s3 WITH DATATYPE=DOUBLE, ENCODING=RLE", + "insert into root.AI.data(timestamp,s0,s1,s2,s3) values(1,1.0,2.0,3.0,4.0)", + "insert into root.AI.data(timestamp,s0,s1,s2,s3) values(2,2.0,3.0,4.0,5.0)", + "insert into root.AI.data(timestamp,s0,s1,s2,s3) values(3,3.0,4.0,5.0,6.0)", + "insert into root.AI.data(timestamp,s0,s1,s2,s3) values(4,4.0,5.0,6.0,7.0)", + "insert into root.AI.data(timestamp,s0,s1,s2,s3) values(5,5.0,6.0,7.0,8.0)", + "insert into root.AI.data(timestamp,s0,s1,s2,s3) values(6,6.0,7.0,8.0,9.0)", + "insert into root.AI.data(timestamp,s0,s1,s2,s3) values(7,7.0,8.0,9.0,10.0)", + }; + + @BeforeClass + public static void setUp() throws Exception { + // Init 1C1D1M cluster environment + EnvFactory.getEnv().initClusterEnvironment(1, 1); + prepareData(sqls); + } + + @AfterClass + public static void tearDown() throws Exception { + EnvFactory.getEnv().cleanClusterEnvironment(); + } + + private static void checkHeader(ResultSetMetaData resultSetMetaData, String title) + throws SQLException { + String[] headers = title.split(","); + for (int i = 1; i <= resultSetMetaData.getColumnCount(); i++) { + assertEquals(headers[i - 1], resultSetMetaData.getColumnName(i)); + } + } + + private void errorTest(String sql, String errorMessage) { + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + + try (ResultSet ignored = statement.executeQuery(sql)) { + fail("There should be an exception"); + } + } catch (SQLException e) { + assertEquals(errorMessage, e.getMessage()); + } + } + + @Test + public void aiNodeConnectionTest() { + String sql = "SHOW AINODES"; + String title = "NodeID,Status,RpcAddress,RpcPort"; + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + + try (ResultSet resultSet = statement.executeQuery(sql)) { + ResultSetMetaData resultSetMetaData = resultSet.getMetaData(); + checkHeader(resultSetMetaData, title); + int count = 0; + while (resultSet.next()) { + assertEquals("2", resultSet.getString(1)); + assertEquals("Running", resultSet.getString(2)); + count++; + } + assertEquals(1, count); + } + } catch (SQLException e) { + e.printStackTrace(); + fail(e.getMessage()); + } + } + + @Test + public void ModelOperationTest() { + String registerSql = "create model operationTest using uri \"" + MODEL_PATH + "\""; + String showSql = "SHOW MODELS operationTest"; + String dropSql = "DROP MODEL operationTest"; + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + statement.execute(registerSql); + boolean loading = true; + int count = 0; + while (loading) { + try (ResultSet resultSet = statement.executeQuery(showSql)) { + ResultSetMetaData resultSetMetaData = resultSet.getMetaData(); + checkHeader(resultSetMetaData, "ModelId,ModelType,State,Configs,Notes"); + while (resultSet.next()) { + String modelName = resultSet.getString(1); + String modelType = resultSet.getString(2); + String status = resultSet.getString(3); + + assertEquals("operationTest", modelName); + assertEquals("USER_DEFINED", modelType); + if (status.equals("ACTIVE")) { + loading = false; + count++; + } else if (status.equals("LOADING")) { + break; + } else { + fail("Unexpected status of model: " + status); + } + } + } + } + assertEquals(1, count); + statement.execute(dropSql); + try (ResultSet resultSet = statement.executeQuery(showSql)) { + ResultSetMetaData resultSetMetaData = resultSet.getMetaData(); + checkHeader(resultSetMetaData, "ModelId,ModelType,State,Configs,Notes"); + count = 0; + while (resultSet.next()) { + count++; + } + assertEquals(0, count); + } + } catch (SQLException e) { + fail(e.getMessage()); + } + } + + @Test + public void callInferenceTest() { + String sql = + "CALL INFERENCE(identity, \"select s0,s1,s2 from root.AI.data\", generateTime=true)"; + String sql2 = "CALL INFERENCE(identity, \"select s2,s0,s1 from root.AI.data\")"; + String sql3 = + "CALL INFERENCE(_NaiveForecaster, \"select s0 from root.AI.data\", predict_length=3, generateTime=true)"; + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + + try (ResultSet resultSet = statement.executeQuery(sql)) { + ResultSetMetaData resultSetMetaData = resultSet.getMetaData(); + checkHeader(resultSetMetaData, "Time,output0,output1,output2"); + int count = 0; + while (resultSet.next()) { + float s0 = resultSet.getFloat(2); + float s1 = resultSet.getFloat(3); + float s2 = resultSet.getFloat(4); + + assertEquals(s0, count + 1.0, 0.0001); + assertEquals(s1, count + 2.0, 0.0001); + assertEquals(s2, count + 3.0, 0.0001); + count++; + } + assertEquals(7, count); + } + + try (ResultSet resultSet = statement.executeQuery(sql2)) { + ResultSetMetaData resultSetMetaData = resultSet.getMetaData(); + checkHeader(resultSetMetaData, "output0,output1,output2"); + int count = 0; + while (resultSet.next()) { + float s2 = resultSet.getFloat(1); + float s0 = resultSet.getFloat(2); + float s1 = resultSet.getFloat(3); + + assertEquals(s0, count + 1.0, 0.0001); + assertEquals(s1, count + 2.0, 0.0001); + assertEquals(s2, count + 3.0, 0.0001); + count++; + } + assertEquals(7, count); + } + + try (ResultSet resultSet = statement.executeQuery(sql3)) { + ResultSetMetaData resultSetMetaData = resultSet.getMetaData(); + checkHeader(resultSetMetaData, "Time,output0,output1,output2"); + int count = 0; + while (resultSet.next()) { + count++; + } + assertEquals(3, count); + } + + } catch (SQLException e) { + fail(e.getMessage()); + } + } + + @Test + public void errorTest() { + String sql = + "CALL INFERENCE(notFound404, \"select s0,s1,s2 from root.AI.data\", window=head(5))"; + errorTest(sql, "1505: model [notFound404] has not been created."); + sql = "CALL INFERENCE(identity, \"select s0,s1,s2 from root.AI.data\", window=head(2))"; + errorTest(sql, "701: Window output 2 is not equal to input size of model 7"); + sql = "CALL INFERENCE(identity, \"select s0,s1,s2 from root.AI.data limit 5\")"; + errorTest( + sql, + "301: The number of rows 5 in the input data does not match the model input 7. Try to use LIMIT in SQL or WINDOW in CALL INFERENCE"); + sql = "CREATE MODEL 中文 USING URI \"" + MODEL_PATH + "\""; + errorTest(sql, "701: ModelName can only contain letters, numbers, and underscores"); + sql = "DROP MODEL _GaussianHMM"; + errorTest(sql, "1502: Built-in model _GaussianHMM can't be removed"); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/cli/it/AbstractScript.java b/integration-test/src/test/java/org/apache/iotdb/cli/it/AbstractScriptIT.java similarity index 98% rename from integration-test/src/test/java/org/apache/iotdb/cli/it/AbstractScript.java rename to integration-test/src/test/java/org/apache/iotdb/cli/it/AbstractScriptIT.java index 53f51cb81a6ea..a0a25a2e12e0d 100644 --- a/integration-test/src/test/java/org/apache/iotdb/cli/it/AbstractScript.java +++ b/integration-test/src/test/java/org/apache/iotdb/cli/it/AbstractScriptIT.java @@ -34,7 +34,7 @@ import static org.junit.Assert.fail; @RunWith(IoTDBTestRunner.class) -public abstract class AbstractScript { +public abstract class AbstractScriptIT { protected void testOutput(ProcessBuilder builder, @Nullable String[] output, int statusCode) throws IOException { diff --git a/integration-test/src/test/java/org/apache/iotdb/cli/it/StartClientScriptIT.java b/integration-test/src/test/java/org/apache/iotdb/cli/it/StartClientScriptIT.java index 317ec218fc617..0385071e2ad6a 100644 --- a/integration-test/src/test/java/org/apache/iotdb/cli/it/StartClientScriptIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/cli/it/StartClientScriptIT.java @@ -35,7 +35,7 @@ @RunWith(IoTDBTestRunner.class) @Category({LocalStandaloneIT.class, ClusterIT.class}) -public class StartClientScriptIT extends AbstractScript { +public class StartClientScriptIT extends AbstractScriptIT { private static String ip; @@ -77,7 +77,7 @@ public void test() throws IOException { protected void testOnWindows() throws IOException { final String[] output = { - "Error: Connection Error, please check whether the network is available or the server has started. Host is 127.0.0.1, port is 6668." + "Error: Connection Error, please check whether the network is available or the server has started." }; ProcessBuilder builder = new ProcessBuilder( @@ -121,7 +121,7 @@ protected void testOnWindows() throws IOException { protected void testOnUnix() throws IOException { final String[] output = { - "Error: Connection Error, please check whether the network is available or the server has started. Host is 127.0.0.1, port is 6668." + "Error: Connection Error, please check whether the network is available or the server has started." }; ProcessBuilder builder = new ProcessBuilder( diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/IoTDBConfigNodeSnapshot2IT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/IoTDBConfigNodeSnapshot2IT.java new file mode 100644 index 0000000000000..c156adbe259fe --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/IoTDBConfigNodeSnapshot2IT.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.it; + +import org.apache.iotdb.consensus.ConsensusFactory; +import org.apache.iotdb.it.env.EnvFactory; +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.ClusterIT; + +import org.junit.Before; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +@RunWith(IoTDBTestRunner.class) +@Category({ClusterIT.class}) +public class IoTDBConfigNodeSnapshot2IT extends IoTDBConfigNodeSnapshotIT { + @Before + public void setUp() throws Exception { + // set setConfigNodeRatisSnapshotTriggerThreshold to 1 + EnvFactory.getEnv() + .getConfig() + .getCommonConfig() + .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setConfigNodeRatisSnapshotTriggerThreshold(1) + .setTimePartitionInterval(testTimePartitionInterval); + + // Init 2C2D cluster environment + EnvFactory.getEnv().initClusterEnvironment(2, 2); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/IoTDBConfigNodeSnapshotIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/IoTDBConfigNodeSnapshotIT.java index af7cb37669d18..9ff58305bf506 100644 --- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/IoTDBConfigNodeSnapshotIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/IoTDBConfigNodeSnapshotIT.java @@ -78,8 +78,7 @@ @RunWith(IoTDBTestRunner.class) @Category({ClusterIT.class}) public class IoTDBConfigNodeSnapshotIT { - private static final int testRatisSnapshotTriggerThreshold = 100; - private static final long testTimePartitionInterval = 86400; + protected final long testTimePartitionInterval = 86400; @Before public void setUp() throws Exception { @@ -87,7 +86,7 @@ public void setUp() throws Exception { .getConfig() .getCommonConfig() .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setConfigNodeRatisSnapshotTriggerThreshold(testRatisSnapshotTriggerThreshold) + .setConfigNodeRatisSnapshotTriggerThreshold(100) .setTimePartitionInterval(testTimePartitionInterval); // Init 2C2D cluster environment diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/cluster/IoTDBClusterNodeGetterIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/cluster/IoTDBClusterNodeGetterIT.java index d31e221724950..2ca034497f095 100644 --- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/cluster/IoTDBClusterNodeGetterIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/cluster/IoTDBClusterNodeGetterIT.java @@ -162,12 +162,10 @@ public void showClusterAndNodesTest() throws Exception { clusterParameters.getSchemaReplicationFactor()); Assert.assertEquals( expectedParameters.getDataRegionPerDataNode(), - clusterParameters.getDataRegionPerDataNode(), - 0.01); + clusterParameters.getDataRegionPerDataNode()); Assert.assertEquals( expectedParameters.getSchemaRegionPerDataNode(), - clusterParameters.getSchemaRegionPerDataNode(), - 0.01); + clusterParameters.getSchemaRegionPerDataNode()); Assert.assertEquals( expectedParameters.getDiskSpaceWarningThreshold(), clusterParameters.getDiskSpaceWarningThreshold(), @@ -241,7 +239,7 @@ public void removeAndStopConfigNodeTest() throws Exception { } // Test stop ConfigNode - status = client.stopConfigNode(removedConfigNodeLocation); + status = client.stopAndClearConfigNode(removedConfigNodeLocation); assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); } } diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBAutoRegionGroupExtensionIT2.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBAutoRegionGroupExtension2IT.java similarity index 98% rename from integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBAutoRegionGroupExtensionIT2.java rename to integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBAutoRegionGroupExtension2IT.java index f091bafd542cb..5d00ef023ebc5 100644 --- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBAutoRegionGroupExtensionIT2.java +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBAutoRegionGroupExtension2IT.java @@ -56,10 +56,10 @@ @RunWith(IoTDBTestRunner.class) @Category({ClusterIT.class}) -public class IoTDBAutoRegionGroupExtensionIT2 { +public class IoTDBAutoRegionGroupExtension2IT { private static final Logger LOGGER = - LoggerFactory.getLogger(IoTDBAutoRegionGroupExtensionIT2.class); + LoggerFactory.getLogger(IoTDBAutoRegionGroupExtension2IT.class); private static final String testDataRegionGroupExtensionPolicy = "AUTO"; private static final String testConsensusProtocolClass = ConsensusFactory.IOT_CONSENSUS; diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBPartitionCreationIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBPartitionCreationIT.java index ed2423e304594..2a0ccffe60cd0 100644 --- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBPartitionCreationIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBPartitionCreationIT.java @@ -388,27 +388,48 @@ public void testPartitionAllocation() throws Exception { testTimePartitionInterval, dataPartitionTableResp.getDataPartitionTable()); - // Check Region count and status int runningCnt = 0; int unknownCnt = 0; int readOnlyCnt = 0; int removingCnt = 0; - TShowRegionResp showRegionResp = client.showRegion(new TShowRegionReq()); - Assert.assertEquals( - TSStatusCode.SUCCESS_STATUS.getStatusCode(), showRegionResp.getStatus().getCode()); - for (TRegionInfo regionInfo : showRegionResp.getRegionInfoList()) { - if (RegionStatus.Running.getStatus().equals(regionInfo.getStatus())) { - runningCnt += 1; - } else if (RegionStatus.Unknown.getStatus().equals(regionInfo.getStatus())) { - unknownCnt += 1; - } else if (RegionStatus.Removing.getStatus().equals(regionInfo.getStatus())) { - removingCnt += 1; - } else if (RegionStatus.ReadOnly.getStatus().equals(regionInfo.getStatus())) { - readOnlyCnt += 1; + TShowRegionResp showRegionResp; + + // Check Region count and status + for (int retry = 0; retry < 30; retry++) { + runningCnt = 0; + unknownCnt = 0; + readOnlyCnt = 0; + removingCnt = 0; + showRegionResp = client.showRegion(new TShowRegionReq()); + Assert.assertEquals( + TSStatusCode.SUCCESS_STATUS.getStatusCode(), showRegionResp.getStatus().getCode()); + for (TRegionInfo regionInfo : showRegionResp.getRegionInfoList()) { + if (RegionStatus.Running.getStatus().equals(regionInfo.getStatus())) { + runningCnt += 1; + } else if (RegionStatus.Unknown.getStatus().equals(regionInfo.getStatus())) { + unknownCnt += 1; + } else if (RegionStatus.Removing.getStatus().equals(regionInfo.getStatus())) { + removingCnt += 1; + } else if (RegionStatus.ReadOnly.getStatus().equals(regionInfo.getStatus())) { + readOnlyCnt += 1; + } + } + + if (runningCnt == 9 && removingCnt == 0 && readOnlyCnt == 1 && unknownCnt == 2) { + break; + } else { + LOGGER.info( + "Running: {}, Removing: {}, ReadOnly:{}, Unknown:{}", + runningCnt, + removingCnt, + readOnlyCnt, + unknownCnt); + TimeUnit.SECONDS.sleep(1); } } - Assert.assertEquals(8, runningCnt); - Assert.assertEquals(1, removingCnt); + + Assert.assertEquals(9, runningCnt); + Assert.assertEquals(0, removingCnt); Assert.assertEquals(1, readOnlyCnt); Assert.assertEquals(2, unknownCnt); @@ -455,9 +476,15 @@ public void testPartitionAllocation() throws Exception { readOnlyCnt += 1; } } - if (runningCnt == 10 && unknownCnt == 0 && readOnlyCnt == 1 && removingCnt == 1) { + if (runningCnt == 11 && unknownCnt == 0 && readOnlyCnt == 1 && removingCnt == 0) { return; } + LOGGER.info( + "Running: {}, Removing: {}, ReadOnly:{}, Unknown:{}", + runningCnt, + removingCnt, + readOnlyCnt, + unknownCnt); TimeUnit.SECONDS.sleep(1); } Assert.fail("Region status is not correct after 30s of recovery"); diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBPartitionDurableIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBPartitionDurableIT.java index 9e94d0ddabb0e..93780ec68dc0b 100644 --- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBPartitionDurableIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBPartitionDurableIT.java @@ -18,7 +18,6 @@ */ package org.apache.iotdb.confignode.it.partition; -import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.common.rpc.thrift.TSStatus; @@ -32,8 +31,6 @@ import org.apache.iotdb.confignode.rpc.thrift.TDataPartitionTableResp; import org.apache.iotdb.confignode.rpc.thrift.TDatabaseSchema; import org.apache.iotdb.confignode.rpc.thrift.TRegionInfo; -import org.apache.iotdb.confignode.rpc.thrift.TSchemaPartitionReq; -import org.apache.iotdb.confignode.rpc.thrift.TSchemaPartitionTableResp; import org.apache.iotdb.confignode.rpc.thrift.TSetDataNodeStatusReq; import org.apache.iotdb.confignode.rpc.thrift.TShowClusterResp; import org.apache.iotdb.confignode.rpc.thrift.TShowDataNodesResp; @@ -122,133 +119,6 @@ public void tearDown() { EnvFactory.getEnv().cleanClusterEnvironment(); } - // TODO: Fix this when replica completion is supported - @Test - public void testRemovingDataNode() throws Exception { - try (SyncConfigNodeIServiceClient client = - (SyncConfigNodeIServiceClient) EnvFactory.getEnv().getLeaderConfigNodeConnection()) { - - /* Test getOrCreateSchemaPartition, ConfigNode should create SchemaPartition and return */ - TSchemaPartitionReq schemaPartitionReq = - new TSchemaPartitionReq() - .setPathPatternTree(ConfigNodeTestUtils.generatePatternTreeBuffer(new String[] {d0})); - TSchemaPartitionTableResp schemaPartitionTableResp = - client.getOrCreateSchemaPartitionTable(schemaPartitionReq); - Assert.assertEquals( - TSStatusCode.SUCCESS_STATUS.getStatusCode(), - schemaPartitionTableResp.getStatus().getCode()); - Map> schemaPartitionTable = - schemaPartitionTableResp.getSchemaPartitionTable(); - // Successfully create a SchemaPartition - Assert.assertTrue(schemaPartitionTable.containsKey(sg)); - Assert.assertEquals(1, schemaPartitionTable.get(sg).size()); - - /* Check Region distribution */ - TShowRegionResp showRegionResp = client.showRegion(new TShowRegionReq()); - Assert.assertEquals( - TSStatusCode.SUCCESS_STATUS.getStatusCode(), showRegionResp.getStatus().getCode()); - // Create exactly one RegionGroup - Assert.assertEquals(3, showRegionResp.getRegionInfoListSize()); - // Each DataNode has exactly one Region - Set dataNodeIdSet = new HashSet<>(); - showRegionResp - .getRegionInfoList() - .forEach(regionInfo -> dataNodeIdSet.add(regionInfo.getDataNodeId())); - Assert.assertEquals(3, dataNodeIdSet.size()); - - /* Change the NodeStatus of the test DataNode to Removing */ - TSetDataNodeStatusReq setDataNodeStatusReq = new TSetDataNodeStatusReq(); - DataNodeWrapper dataNodeWrapper = EnvFactory.getEnv().getDataNodeWrapper(testDataNodeId); - setDataNodeStatusReq.setTargetDataNode( - new TDataNodeLocation(defaultDataNode) - .setInternalEndPoint( - new TEndPoint() - .setIp(dataNodeWrapper.getInternalAddress()) - .setPort(dataNodeWrapper.getInternalPort()))); - setDataNodeStatusReq.setStatus(NodeStatus.Removing.getStatus()); - client.setDataNodeStatus(setDataNodeStatusReq); - // Waiting for heartbeat update - while (true) { - AtomicBoolean containRemoving = new AtomicBoolean(false); - TShowDataNodesResp showDataNodesResp = client.showDataNodes(); - showDataNodesResp - .getDataNodesInfoList() - .forEach( - dataNodeInfo -> { - if (NodeStatus.Removing.getStatus().equals(dataNodeInfo.getStatus())) { - containRemoving.set(true); - } - }); - - if (containRemoving.get()) { - break; - } - TimeUnit.SECONDS.sleep(1); - } - - /* Test getOrCreateSchemaPartition, the result should be NO_ENOUGH_DATANODE */ - schemaPartitionReq = - new TSchemaPartitionReq() - .setPathPatternTree(ConfigNodeTestUtils.generatePatternTreeBuffer(new String[] {d1})); - schemaPartitionTableResp = client.getOrCreateSchemaPartitionTable(schemaPartitionReq); - Assert.assertEquals( - TSStatusCode.NO_ENOUGH_DATANODE.getStatusCode(), - schemaPartitionTableResp.getStatus().getCode()); - - /* Register a new DataNode */ - EnvFactory.getEnv().registerNewDataNode(true); - - /* Test getOrCreateSchemaPartition, ConfigNode should create SchemaPartition and return */ - schemaPartitionReq = - new TSchemaPartitionReq() - .setPathPatternTree(ConfigNodeTestUtils.generatePatternTreeBuffer(new String[] {d1})); - schemaPartitionTableResp = client.getOrCreateSchemaPartitionTable(schemaPartitionReq); - Assert.assertEquals( - TSStatusCode.SUCCESS_STATUS.getStatusCode(), - schemaPartitionTableResp.getStatus().getCode()); - schemaPartitionTable = schemaPartitionTableResp.getSchemaPartitionTable(); - // Successfully create a SchemaPartition - Assert.assertTrue(schemaPartitionTable.containsKey(sg)); - Assert.assertEquals(1, schemaPartitionTable.get(sg).size()); - - /* Check Region distribution */ - showRegionResp = client.showRegion(new TShowRegionReq()); - Assert.assertEquals( - TSStatusCode.SUCCESS_STATUS.getStatusCode(), showRegionResp.getStatus().getCode()); - // There should be 2 RegionGroups - Assert.assertEquals(6, showRegionResp.getRegionInfoListSize()); - // The new RegionGroup should keep away from the Removing DataNode - Map regionCounter = new ConcurrentHashMap<>(); - showRegionResp - .getRegionInfoList() - .forEach( - regionInfo -> - regionCounter - .computeIfAbsent(regionInfo.getDataNodeId(), empty -> new AtomicInteger(0)) - .getAndIncrement()); - dataNodeIdSet.forEach(dataNodeId -> regionCounter.get(dataNodeId).getAndDecrement()); - TShowDataNodesResp showDataNodesResp = client.showDataNodes(); - Assert.assertEquals( - TSStatusCode.SUCCESS_STATUS.getStatusCode(), showDataNodesResp.getStatus().getCode()); - regionCounter.forEach( - (dataNodeId, regionCount) -> { - String nodeStatus = - showDataNodesResp.getDataNodesInfoList().stream() - .filter(dataNodeInfo -> dataNodeInfo.getDataNodeId() == dataNodeId) - .findFirst() - .orElse(new TDataNodeInfo().setStatus("ERROR")) - .getStatus(); - if (NodeStatus.Removing.getStatus().equals(nodeStatus)) { - Assert.assertEquals(0, regionCount.get()); - } else if (NodeStatus.Running.getStatus().equals(nodeStatus)) { - Assert.assertEquals(1, regionCount.get()); - } else { - Assert.fail(); - } - }); - } - } - // TODO: Fix this when replica completion is supported @Test public void testReadOnlyDataNode() throws Exception { diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBPartitionTableAutoCleanIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBPartitionTableAutoCleanIT.java new file mode 100644 index 0000000000000..a7addb87c1d6c --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBPartitionTableAutoCleanIT.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.it.partition; + +import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; +import org.apache.iotdb.commons.client.sync.SyncConfigNodeIServiceClient; +import org.apache.iotdb.confignode.rpc.thrift.TDataPartitionReq; +import org.apache.iotdb.confignode.rpc.thrift.TDataPartitionTableResp; +import org.apache.iotdb.it.env.EnvFactory; +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.ClusterIT; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.sql.Connection; +import java.sql.Statement; +import java.util.TreeMap; +import java.util.concurrent.TimeUnit; + +@RunWith(IoTDBTestRunner.class) +@Category({ClusterIT.class}) +public class IoTDBPartitionTableAutoCleanIT { + + private static final String TREE_DATABASE_PREFIX = "root.db.g_"; + + private static final int TEST_REPLICATION_FACTOR = 1; + private static final long TEST_TIME_PARTITION_INTERVAL = 604800000; + private static final long TEST_TTL_CHECK_INTERVAL = 5_000; + + private static final TTimePartitionSlot TEST_CURRENT_TIME_SLOT = + new TTimePartitionSlot() + .setStartTime( + System.currentTimeMillis() + / TEST_TIME_PARTITION_INTERVAL + * TEST_TIME_PARTITION_INTERVAL); + private static final long TEST_TTL = 7 * TEST_TIME_PARTITION_INTERVAL; + + @Before + public void setUp() throws Exception { + EnvFactory.getEnv() + .getConfig() + .getCommonConfig() + .setSchemaReplicationFactor(TEST_REPLICATION_FACTOR) + .setDataReplicationFactor(TEST_REPLICATION_FACTOR) + .setTimePartitionInterval(TEST_TIME_PARTITION_INTERVAL) + .setTTLCheckInterval(TEST_TTL_CHECK_INTERVAL); + + // Init 1C1D environment + EnvFactory.getEnv().initClusterEnvironment(1, 1); + } + + @After + public void tearDown() { + EnvFactory.getEnv().cleanClusterEnvironment(); + } + + @Test + public void testAutoCleanPartitionTableForTreeModel() throws Exception { + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + // Create databases and insert test data + for (int i = 0; i < 3; i++) { + String databaseName = String.format("%s%d", TREE_DATABASE_PREFIX, i); + statement.execute(String.format("CREATE DATABASE %s", databaseName)); + statement.execute( + String.format( + "CREATE TIMESERIES %s.s WITH DATATYPE=INT64,ENCODING=PLAIN", databaseName)); + // Insert expired data + statement.execute( + String.format( + "INSERT INTO %s(timestamp, s) VALUES (%d, %d)", + databaseName, TEST_CURRENT_TIME_SLOT.getStartTime() - TEST_TTL * 2, -1)); + // Insert existed data + statement.execute( + String.format( + "INSERT INTO %s(timestamp, s) VALUES (%d, %d)", + databaseName, TEST_CURRENT_TIME_SLOT.getStartTime(), 1)); + } + // Let db0.TTL > device.TTL, the valid TTL should be the bigger one + statement.execute(String.format("SET TTL TO %s0 %d", TREE_DATABASE_PREFIX, TEST_TTL)); + statement.execute(String.format("SET TTL TO %s0.s %d", TREE_DATABASE_PREFIX, 10)); + // Let db1.TTL < device.TTL, the valid TTL should be the bigger one + statement.execute(String.format("SET TTL TO %s1 %d", TREE_DATABASE_PREFIX, 10)); + statement.execute(String.format("SET TTL TO %s1.s %d", TREE_DATABASE_PREFIX, TEST_TTL)); + // Set TTL to path db2.** + statement.execute(String.format("SET TTL TO %s2.** %d", TREE_DATABASE_PREFIX, TEST_TTL)); + } + + TDataPartitionReq req = new TDataPartitionReq(); + for (int i = 0; i < 3; i++) { + req.putToPartitionSlotsMap(String.format("%s%d", TREE_DATABASE_PREFIX, i), new TreeMap<>()); + } + try (SyncConfigNodeIServiceClient client = + (SyncConfigNodeIServiceClient) EnvFactory.getEnv().getLeaderConfigNodeConnection()) { + for (int retry = 0; retry < 120; retry++) { + boolean partitionTableAutoCleaned = true; + TDataPartitionTableResp resp = client.getDataPartitionTable(req); + if (TSStatusCode.SUCCESS_STATUS.getStatusCode() == resp.getStatus().getCode()) { + partitionTableAutoCleaned = + resp.getDataPartitionTable().entrySet().stream() + .flatMap(e1 -> e1.getValue().entrySet().stream()) + .allMatch(e2 -> e2.getValue().size() == 1); + } + if (partitionTableAutoCleaned) { + return; + } + TimeUnit.SECONDS.sleep(1); + } + } + Assert.fail("The PartitionTable in the ConfigNode is not auto cleaned!"); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBPartitionTableAutoCleanUSIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBPartitionTableAutoCleanUSIT.java new file mode 100644 index 0000000000000..f8e94c28dcbdc --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/partition/IoTDBPartitionTableAutoCleanUSIT.java @@ -0,0 +1,138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.it.partition; + +import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; +import org.apache.iotdb.commons.client.sync.SyncConfigNodeIServiceClient; +import org.apache.iotdb.confignode.rpc.thrift.TDataPartitionReq; +import org.apache.iotdb.confignode.rpc.thrift.TDataPartitionTableResp; +import org.apache.iotdb.it.env.EnvFactory; +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.ClusterIT; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.sql.Connection; +import java.sql.Statement; +import java.util.TreeMap; +import java.util.concurrent.TimeUnit; + +@RunWith(IoTDBTestRunner.class) +@Category({ClusterIT.class}) +public class IoTDBPartitionTableAutoCleanUSIT { + + private static final String TREE_DATABASE_PREFIX = "root.db.g_"; + + private static final int TEST_REPLICATION_FACTOR = 1; + private static final long TEST_TIME_PARTITION_INTERVAL_IN_MS = 604800_000; + private static final long TEST_TTL_CHECK_INTERVAL = 5_000; + + private static final TTimePartitionSlot TEST_CURRENT_TIME_SLOT = + new TTimePartitionSlot() + .setStartTime( + System.currentTimeMillis() + * 1000L + / TEST_TIME_PARTITION_INTERVAL_IN_MS + * TEST_TIME_PARTITION_INTERVAL_IN_MS); + private static final long TEST_TTL_IN_MS = 7 * TEST_TIME_PARTITION_INTERVAL_IN_MS; + + @Before + public void setUp() throws Exception { + EnvFactory.getEnv() + .getConfig() + .getCommonConfig() + .setSchemaReplicationFactor(TEST_REPLICATION_FACTOR) + .setDataReplicationFactor(TEST_REPLICATION_FACTOR) + .setTimePartitionInterval(TEST_TIME_PARTITION_INTERVAL_IN_MS) + .setTTLCheckInterval(TEST_TTL_CHECK_INTERVAL) + // Note that the time precision of IoTDB is us in this IT + .setTimestampPrecision("us"); + + // Init 1C1D environment + EnvFactory.getEnv().initClusterEnvironment(1, 1); + } + + @After + public void tearDown() { + EnvFactory.getEnv().cleanClusterEnvironment(); + } + + @Test + public void testAutoCleanPartitionTableForTreeModel() throws Exception { + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + // Create databases and insert test data + for (int i = 0; i < 3; i++) { + String databaseName = String.format("%s%d", TREE_DATABASE_PREFIX, i); + statement.execute(String.format("CREATE DATABASE %s", databaseName)); + statement.execute( + String.format( + "CREATE TIMESERIES %s.s WITH DATATYPE=INT64,ENCODING=PLAIN", databaseName)); + // Insert expired data + statement.execute( + String.format( + "INSERT INTO %s(timestamp, s) VALUES (%d, %d)", + databaseName, TEST_CURRENT_TIME_SLOT.getStartTime() - TEST_TTL_IN_MS * 2000, -1)); + // Insert existed data + statement.execute( + String.format( + "INSERT INTO %s(timestamp, s) VALUES (%d, %d)", + databaseName, TEST_CURRENT_TIME_SLOT.getStartTime(), 1)); + } + // Let db0.TTL > device.TTL, the valid TTL should be the bigger one + statement.execute(String.format("SET TTL TO %s0 %d", TREE_DATABASE_PREFIX, TEST_TTL_IN_MS)); + statement.execute(String.format("SET TTL TO %s0.s %d", TREE_DATABASE_PREFIX, 10)); + // Let db1.TTL < device.TTL, the valid TTL should be the bigger one + statement.execute(String.format("SET TTL TO %s1 %d", TREE_DATABASE_PREFIX, 10)); + statement.execute(String.format("SET TTL TO %s1.s %d", TREE_DATABASE_PREFIX, TEST_TTL_IN_MS)); + // Set TTL to path db2.** + statement.execute( + String.format("SET TTL TO %s2.** %d", TREE_DATABASE_PREFIX, TEST_TTL_IN_MS)); + } + TDataPartitionReq req = new TDataPartitionReq(); + for (int i = 0; i < 3; i++) { + req.putToPartitionSlotsMap(String.format("%s%d", TREE_DATABASE_PREFIX, i), new TreeMap<>()); + } + try (SyncConfigNodeIServiceClient client = + (SyncConfigNodeIServiceClient) EnvFactory.getEnv().getLeaderConfigNodeConnection()) { + for (int retry = 0; retry < 120; retry++) { + boolean partitionTableAutoCleaned = true; + TDataPartitionTableResp resp = client.getDataPartitionTable(req); + if (TSStatusCode.SUCCESS_STATUS.getStatusCode() == resp.getStatus().getCode()) { + partitionTableAutoCleaned = + resp.getDataPartitionTable().entrySet().stream() + .flatMap(e1 -> e1.getValue().entrySet().stream()) + .allMatch(e2 -> e2.getValue().size() == 1); + } + if (partitionTableAutoCleaned) { + return; + } + TimeUnit.SECONDS.sleep(1); + } + } + Assert.fail("The PartitionTable in the ConfigNode is not auto cleaned!"); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/IoTDBRegionMigrateDataNodeCrashITFramework.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/IoTDBRegionMigrateDataNodeCrashITFramework.java index 4a363d196e799..e1003eede0ccd 100644 --- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/IoTDBRegionMigrateDataNodeCrashITFramework.java +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/IoTDBRegionMigrateDataNodeCrashITFramework.java @@ -22,7 +22,8 @@ import org.apache.iotdb.commons.utils.KillPoint.KillNode; public class IoTDBRegionMigrateDataNodeCrashITFramework - extends IoTDBRegionMigrateReliabilityITFramework { + extends IoTDBRegionOperationReliabilityITFramework { + @SafeVarargs public final > void success(T... dataNodeKillPoints) throws Exception { successTest(1, 1, 1, 2, noKillPoints(), buildSet(dataNodeKillPoints), KillNode.ALL_NODES); diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/IoTDBRegionMigrateReliabilityITFramework.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/IoTDBRegionOperationReliabilityITFramework.java similarity index 75% rename from integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/IoTDBRegionMigrateReliabilityITFramework.java rename to integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/IoTDBRegionOperationReliabilityITFramework.java index d52251b4a1a56..0dccc669eebff 100644 --- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/IoTDBRegionMigrateReliabilityITFramework.java +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/IoTDBRegionOperationReliabilityITFramework.java @@ -21,6 +21,7 @@ import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; import org.apache.iotdb.commons.client.sync.SyncConfigNodeIServiceClient; +import org.apache.iotdb.commons.cluster.RegionStatus; import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory; import org.apache.iotdb.commons.conf.IoTDBConstant; import org.apache.iotdb.commons.utils.KillPoint.KillNode; @@ -29,17 +30,20 @@ import org.apache.iotdb.confignode.rpc.thrift.TShowRegionReq; import org.apache.iotdb.confignode.rpc.thrift.TShowRegionResp; import org.apache.iotdb.consensus.ConsensusFactory; -import org.apache.iotdb.consensus.iot.IoTConsensusServerImpl; import org.apache.iotdb.db.queryengine.common.header.ColumnHeaderConstant; +import org.apache.iotdb.isession.SessionDataSet; import org.apache.iotdb.it.env.EnvFactory; import org.apache.iotdb.it.env.cluster.env.AbstractEnv; import org.apache.iotdb.it.env.cluster.node.AbstractNodeWrapper; import org.apache.iotdb.it.env.cluster.node.ConfigNodeWrapper; import org.apache.iotdb.it.env.cluster.node.DataNodeWrapper; -import org.apache.iotdb.itbase.exception.InconsistentDataException; import org.apache.iotdb.metrics.utils.SystemType; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.Session; import org.apache.thrift.TException; +import org.apache.tsfile.read.common.Field; import org.awaitility.Awaitility; import org.awaitility.core.ConditionTimeoutException; import org.junit.After; @@ -52,9 +56,6 @@ import java.io.File; import java.io.IOException; import java.io.InputStreamReader; -import java.lang.reflect.InvocationHandler; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Proxy; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; @@ -69,27 +70,30 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap.KeySetView; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; +import java.util.function.Predicate; import java.util.stream.Collectors; -public class IoTDBRegionMigrateReliabilityITFramework { +import static org.apache.iotdb.util.MagicUtils.makeItCloseQuietly; + +public class IoTDBRegionOperationReliabilityITFramework { private static final Logger LOGGER = - LoggerFactory.getLogger(IoTDBRegionMigrateReliabilityITFramework.class); - private static final String INSERTION1 = + LoggerFactory.getLogger(IoTDBRegionOperationReliabilityITFramework.class); + public static final String INSERTION1 = "INSERT INTO root.sg.d1(timestamp,speed,temperature) values(100, 1, 2)"; private static final String INSERTION2 = "INSERT INTO root.sg.d1(timestamp,speed,temperature) values(101, 3, 4)"; - private static final String FLUSH_COMMAND = "flush"; + public static final String FLUSH_COMMAND = "flush on cluster"; private static final String SHOW_REGIONS = "show regions"; private static final String SHOW_DATANODES = "show datanodes"; private static final String COUNT_TIMESERIES = "select count(*) from root.sg.**"; private static final String REGION_MIGRATE_COMMAND_FORMAT = "migrate region %d from %d to %d"; - private static final String CONFIGURATION_FILE_NAME = "configuration.dat"; ExecutorService executorService = IoTDBThreadPoolFactory.newCachedThreadPool("regionMigrateIT"); public static Consumer actionOfKillNode = @@ -208,34 +212,24 @@ public void generalTestWithAllOptions( EnvFactory.getEnv().registerDataNodeKillPoints(new ArrayList<>(dataNodeKeywords)); EnvFactory.getEnv().initClusterEnvironment(configNodeNum, dataNodeNum); - try (final Connection connection = closeQuietly(EnvFactory.getEnv().getConnection()); - final Statement statement = closeQuietly(connection.createStatement()); + try (final Connection connection = makeItCloseQuietly(EnvFactory.getEnv().getConnection()); + final Statement statement = makeItCloseQuietly(connection.createStatement()); SyncConfigNodeIServiceClient client = (SyncConfigNodeIServiceClient) EnvFactory.getEnv().getLeaderConfigNodeConnection()) { + // prepare data statement.execute(INSERTION1); + statement.execute(FLUSH_COMMAND); - ResultSet result = statement.executeQuery(SHOW_REGIONS); - Map> regionMap = getRegionMap(result); - - result = statement.executeQuery(SHOW_DATANODES); - Set allDataNodeId = new HashSet<>(); - while (result.next()) { - allDataNodeId.add(result.getInt(ColumnHeaderConstant.NODE_ID)); - } + // collect necessary information + Map> regionMap = getDataRegionMap(statement); + Set allDataNodeId = getAllDataNodes(statement); + // select region migration related DataNodes final int selectedRegion = selectRegion(regionMap); final int originalDataNode = selectOriginalDataNode(regionMap, selectedRegion); - final int destDataNode = selectDestDataNode(allDataNodeId, regionMap, selectedRegion); - + final int destDataNode = + selectDataNodeNotContainsRegion(allDataNodeId, regionMap, selectedRegion); checkRegionFileExist(originalDataNode); - checkPeersExist(regionMap.get(selectedRegion), originalDataNode, selectedRegion); - - try { - awaitUntilFlush(statement, originalDataNode); - } catch (ConditionTimeoutException e) { - LOGGER.error("Flush timeout:", e); - Assert.fail(); - } // set kill points if (killNode == KillNode.ORIGINAL_DATANODE) { @@ -270,8 +264,19 @@ public void generalTestWithAllOptions( statement.execute(buildRegionMigrateCommand(selectedRegion, originalDataNode, destDataNode)); boolean success = false; + Predicate migrateRegionPredicate = + tShowRegionResp -> { + Map> newRegionMap = + getRegionMap(tShowRegionResp.getRegionInfoList()); + Set dataNodes = newRegionMap.get(selectedRegion); + return !dataNodes.contains(originalDataNode) && dataNodes.contains(destDataNode); + }; try { - awaitUntilSuccess(client, selectedRegion, originalDataNode, destDataNode); + awaitUntilSuccess( + client, + migrateRegionPredicate, + Optional.of(destDataNode), + Optional.of(originalDataNode)); success = true; } catch (ConditionTimeoutException e) { if (expectMigrateSuccess) { @@ -292,42 +297,23 @@ public void generalTestWithAllOptions( if (success) { checkRegionFileClearIfNodeAlive(originalDataNode); checkRegionFileExistIfNodeAlive(destDataNode); - checkPeersClearIfNodeAlive(allDataNodeId, originalDataNode, selectedRegion); checkClusterStillWritable(); } else { checkRegionFileClearIfNodeAlive(destDataNode); checkRegionFileExistIfNodeAlive(originalDataNode); - checkPeersClearIfNodeAlive(allDataNodeId, destDataNode, selectedRegion); } LOGGER.info("test pass"); - } catch (InconsistentDataException ignore) { - } } - private void restartDataNodes(List dataNodeWrappers) { - dataNodeWrappers.parallelStream() - .forEach( - nodeWrapper -> { - nodeWrapper.stop(); - Awaitility.await() - .atMost(1, TimeUnit.MINUTES) - .pollDelay(2, TimeUnit.SECONDS) - .until(() -> !nodeWrapper.isAlive()); - LOGGER.info("Node {} stopped.", nodeWrapper.getId()); - nodeWrapper.start(); - Awaitility.await() - .atMost(1, TimeUnit.MINUTES) - .pollDelay(2, TimeUnit.SECONDS) - .until(nodeWrapper::isAlive); - try { - TimeUnit.SECONDS.sleep(10); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - LOGGER.info("Node {} restarted.", nodeWrapper.getId()); - }); + protected Set getAllDataNodes(Statement statement) throws Exception { + ResultSet result = statement.executeQuery(SHOW_DATANODES); + Set allDataNodeId = new HashSet<>(); + while (result.next()) { + allDataNodeId.add(result.getInt(ColumnHeaderConstant.NODE_ID)); + } + return allDataNodeId; } private void setConfigNodeKillPoints( @@ -444,8 +430,8 @@ private static String buildRegionMigrateCommand(int who, int from, int to) { return result; } - private static Map> getRegionMap(ResultSet showRegionsResult) - throws SQLException { + public static Map> getDataRegionMap(Statement statement) throws Exception { + ResultSet showRegionsResult = statement.executeQuery(SHOW_REGIONS); Map> regionMap = new HashMap<>(); while (showRegionsResult.next()) { if (String.valueOf(TConsensusGroupType.DataRegion) @@ -458,7 +444,18 @@ private static Map> getRegionMap(ResultSet showRegionsResu return regionMap; } - private static Map> getRegionMap(List regionInfoList) { + public static Map> getAllRegionMap(Statement statement) throws Exception { + ResultSet showRegionsResult = statement.executeQuery(SHOW_REGIONS); + Map> regionMap = new HashMap<>(); + while (showRegionsResult.next()) { + int regionId = showRegionsResult.getInt(ColumnHeaderConstant.REGION_ID); + int dataNodeId = showRegionsResult.getInt(ColumnHeaderConstant.DATA_NODE_ID); + regionMap.computeIfAbsent(regionId, id -> new HashSet<>()).add(dataNodeId); + } + return regionMap; + } + + protected static Map> getRegionMap(List regionInfoList) { Map> regionMap = new HashMap<>(); regionInfoList.forEach( regionInfo -> { @@ -470,7 +467,22 @@ private static Map> getRegionMap(List regionI return regionMap; } - private static int selectRegion(Map> regionMap) { + protected static Map> getRunningRegionMap( + List regionInfoList) { + Map> regionMap = new HashMap<>(); + regionInfoList.stream() + .filter(regionInfo -> RegionStatus.Running.getStatus().equals(regionInfo.getStatus())) + .forEach( + regionInfo -> { + int regionId = regionInfo.getConsensusGroupId().getId(); + regionMap + .computeIfAbsent(regionId, regionId1 -> new HashSet<>()) + .add(regionInfo.getDataNodeId()); + }); + return regionMap; + } + + protected static int selectRegion(Map> regionMap) { return regionMap.keySet().stream().findAny().orElseThrow(() -> new RuntimeException("gg")); } @@ -481,7 +493,7 @@ private static int selectOriginalDataNode( .orElseThrow(() -> new RuntimeException("cannot find original DataNode")); } - private static int selectDestDataNode( + protected static int selectDataNodeNotContainsRegion( Set dataNodeSet, Map> regionMap, int selectedRegion) { return dataNodeSet.stream() .filter(dataNodeId -> !regionMap.get(selectedRegion).contains(dataNodeId)) @@ -489,7 +501,16 @@ private static int selectDestDataNode( .orElseThrow(() -> new RuntimeException("cannot find dest DataNode")); } - private static void awaitUntilFlush(Statement statement, int originalDataNode) { + protected static int selectDataNodeContainsRegion( + Set dataNodeSet, Map> regionMap, int selectedRegion) { + return dataNodeSet.stream() + .filter(dataNodeId -> regionMap.get(selectedRegion).contains(dataNodeId)) + .findAny() + .orElseThrow(() -> new RuntimeException("cannot find dest DataNode")); + } + + // I believe this function is not necessary, just keep it here in case it's necessary + private static void awaitUntilFlush(Statement statement, int originalDataNode) throws Exception { long startTime = System.currentTimeMillis(); File sequence = new File(buildDataPath(originalDataNode, true)); File unsequence = new File(buildDataPath(originalDataNode, false)); @@ -512,11 +533,11 @@ private static void awaitUntilFlush(Statement statement, int originalDataNode) { LOGGER.info("Flush cost time: {}ms", System.currentTimeMillis() - startTime); } - private static void awaitUntilSuccess( + protected static void awaitUntilSuccess( SyncConfigNodeIServiceClient client, - int selectedRegion, - int originalDataNode, - int destDataNode) { + Predicate predicate, + Optional dataNodeExpectInRegionGroup, + Optional dataNodeExpectNotInRegionGroup) { AtomicReference> lastTimeDataNodes = new AtomicReference<>(); AtomicReference lastException = new AtomicReference<>(); AtomicReference clientRef = new AtomicReference<>(client); @@ -528,10 +549,7 @@ private static void awaitUntilSuccess( () -> { try { TShowRegionResp resp = clientRef.get().showRegion(new TShowRegionReq()); - Map> newRegionMap = getRegionMap(resp.getRegionInfoList()); - Set dataNodes = newRegionMap.get(selectedRegion); - lastTimeDataNodes.set(dataNodes); - return !dataNodes.contains(originalDataNode) && dataNodes.contains(destDataNode); + return predicate.test(resp); } catch (TException e) { clientRef.set( (SyncConfigNodeIServiceClient) @@ -552,8 +570,8 @@ private static void awaitUntilSuccess( throw e; } String actualSetStr = lastTimeDataNodes.get().toString(); - lastTimeDataNodes.get().remove(originalDataNode); - lastTimeDataNodes.get().add(destDataNode); + dataNodeExpectNotInRegionGroup.ifPresent(x -> lastTimeDataNodes.get().remove(x)); + dataNodeExpectInRegionGroup.ifPresent(x -> lastTimeDataNodes.get().add(x)); String expectSetStr = lastTimeDataNodes.toString(); LOGGER.error("DataNode Set {} is unexpected, expect {}", actualSetStr, expectSetStr); if (lastException.get() == null) { @@ -600,63 +618,6 @@ private static void checkRegionFileClear(int dataNode) { LOGGER.info("Original DataNode {} region file clear", dataNode); } - private static void checkPeersExistIfNodeAlive( - Set dataNodes, int originalDataNode, int regionId) { - dataNodes.forEach( - targetDataNode -> checkPeerExistIfNodeAlive(targetDataNode, originalDataNode, regionId)); - } - - private static void checkPeersExist(Set dataNodes, int originalDataNode, int regionId) { - dataNodes.forEach(targetDataNode -> checkPeerExist(targetDataNode, originalDataNode, regionId)); - } - - private static void checkPeerExistIfNodeAlive( - int checkTargetDataNode, int originalDataNode, int regionId) { - if (EnvFactory.getEnv().dataNodeIdToWrapper(checkTargetDataNode).get().isAlive()) { - checkPeerExist(checkTargetDataNode, originalDataNode, regionId); - } - } - - private static void checkPeerExist(int checkTargetDataNode, int originalDataNode, int regionId) { - File expectExistedFile = - new File(buildConfigurationDataFilePath(checkTargetDataNode, originalDataNode, regionId)); - Assert.assertTrue( - "configuration file should exist, but it didn't: " + expectExistedFile.getPath(), - expectExistedFile.exists()); - } - - private static void checkPeersClearIfNodeAlive( - Set dataNodes, int originalDataNode, int regionId) { - dataNodes.stream() - .filter(dataNode -> dataNode != originalDataNode) - .forEach( - targetDataNode -> - checkPeerClearIfNodeAlive(targetDataNode, originalDataNode, regionId)); - } - - private static void checkPeersClear(Set dataNodes, int originalDataNode, int regionId) { - dataNodes.stream() - .filter(dataNode -> dataNode != originalDataNode) - .forEach(targetDataNode -> checkPeerClear(targetDataNode, originalDataNode, regionId)); - LOGGER.info("Peer clear"); - } - - private static void checkPeerClearIfNodeAlive( - int checkTargetDataNode, int originalDataNode, int regionId) { - if (EnvFactory.getEnv().dataNodeIdToWrapper(checkTargetDataNode).get().isAlive()) { - checkPeerClear(checkTargetDataNode, originalDataNode, regionId); - } - } - - private static void checkPeerClear(int checkTargetDataNode, int originalDataNode, int regionId) { - File expectDeletedFile = - new File(buildConfigurationDataFilePath(checkTargetDataNode, originalDataNode, regionId)); - Assert.assertFalse( - "configuration file should be deleted, but it didn't: " + expectDeletedFile.getPath(), - expectDeletedFile.exists()); - LOGGER.info("configuration file has been deleted: {}", expectDeletedFile.getPath()); - } - private void checkClusterStillWritable() { try (Connection connection = EnvFactory.getEnv().getConnection(); Statement statement = connection.createStatement()) { @@ -705,16 +666,6 @@ private static String buildDataPath(int dataNode, boolean isSequence) { + (isSequence ? IoTDBConstant.SEQUENCE_FOLDER_NAME : IoTDBConstant.UNSEQUENCE_FOLDER_NAME); } - private static String buildConfigurationDataFilePath( - int localDataNodeId, int remoteDataNodeId, int regionId) { - String configurationDatDirName = - buildRegionDirPath(localDataNodeId) + File.separator + "1_" + regionId; - String expectDeletedFileName = - IoTConsensusServerImpl.generateConfigurationDatFileName( - remoteDataNodeId, CONFIGURATION_FILE_NAME); - return configurationDatDirName + File.separator + expectDeletedFileName; - } - protected static KeySetView noKillPoints() { return ConcurrentHashMap.newKeySet(); } @@ -727,26 +678,21 @@ protected static > KeySetView buildSet(T... k return result; } - private static T closeQuietly(T t) { - InvocationHandler handler = - (proxy, method, args) -> { - try { - if (method.getName().equals("close")) { - try { - method.invoke(t, args); - } catch (Throwable e) { - LOGGER.warn("Exception happens during close(): ", e); - } - return null; - } else { - return method.invoke(t, args); - } - } catch (InvocationTargetException e) { - throw e.getTargetException(); - } - }; - return (T) - Proxy.newProxyInstance( - t.getClass().getClassLoader(), t.getClass().getInterfaces(), handler); + protected static Map getRegionStatusWithoutRunning(Session session) + throws IoTDBConnectionException, StatementExecutionException { + SessionDataSet dataSet = session.executeQueryStatement("show regions"); + final int regionIdIndex = dataSet.getColumnNames().indexOf("RegionId"); + final int regionStatusIndex = dataSet.getColumnNames().indexOf("Status"); + dataSet.setFetchSize(1024); + Map result = new TreeMap<>(); + while (dataSet.hasNext()) { + List fields = dataSet.next().getFields(); + final int regionId = fields.get(regionIdIndex).getIntV(); + final String regionStatus = fields.get(regionStatusIndex).toString(); + if (!"Running".equals(regionStatus)) { + result.putIfAbsent(regionId, regionStatus); + } + } + return result; } } diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateClusterCrashIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateClusterCrashIT.java index da6bf2dfdc6f1..23829ae31ee45 100644 --- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateClusterCrashIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateClusterCrashIT.java @@ -19,7 +19,7 @@ package org.apache.iotdb.confignode.it.regionmigration.pass; -import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionMigrateReliabilityITFramework; +import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionOperationReliabilityITFramework; import org.apache.iotdb.confignode.procedure.state.AddRegionPeerState; import org.apache.iotdb.confignode.procedure.state.RemoveRegionPeerState; import org.apache.iotdb.it.framework.IoTDBTestRunner; @@ -31,7 +31,7 @@ @Category({DailyIT.class}) @RunWith(IoTDBTestRunner.class) -public class IoTDBRegionMigrateClusterCrashIT extends IoTDBRegionMigrateReliabilityITFramework { +public class IoTDBRegionMigrateClusterCrashIT extends IoTDBRegionOperationReliabilityITFramework { @Test public void clusterCrash1() throws Exception { diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateConfigNodeCrashIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateConfigNodeCrashIT.java index f6916eff40282..72d8d129cfce6 100644 --- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateConfigNodeCrashIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateConfigNodeCrashIT.java @@ -21,7 +21,7 @@ import org.apache.iotdb.commons.utils.KillPoint.KillNode; import org.apache.iotdb.commons.utils.KillPoint.KillPoint; -import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionMigrateReliabilityITFramework; +import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionOperationReliabilityITFramework; import org.apache.iotdb.confignode.procedure.state.AddRegionPeerState; import org.apache.iotdb.confignode.procedure.state.RegionTransitionState; import org.apache.iotdb.confignode.procedure.state.RemoveRegionPeerState; @@ -39,7 +39,8 @@ @Category({DailyIT.class}) @RunWith(IoTDBTestRunner.class) -public class IoTDBRegionMigrateConfigNodeCrashIT extends IoTDBRegionMigrateReliabilityITFramework { +public class IoTDBRegionMigrateConfigNodeCrashIT + extends IoTDBRegionOperationReliabilityITFramework { @Test @Ignore public void cnCrashDuringPreCheckTest() throws Exception { diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateNormalIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateNormalIT.java index f56296b9cfaea..e2d6a9db57ba2 100644 --- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateNormalIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateNormalIT.java @@ -20,7 +20,7 @@ package org.apache.iotdb.confignode.it.regionmigration.pass; import org.apache.iotdb.commons.utils.KillPoint.KillNode; -import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionMigrateReliabilityITFramework; +import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionOperationReliabilityITFramework; import org.apache.iotdb.it.framework.IoTDBTestRunner; import org.apache.iotdb.itbase.category.ClusterIT; @@ -30,7 +30,7 @@ @Category({ClusterIT.class}) @RunWith(IoTDBTestRunner.class) -public class IoTDBRegionMigrateNormalIT extends IoTDBRegionMigrateReliabilityITFramework { +public class IoTDBRegionMigrateNormalIT extends IoTDBRegionOperationReliabilityITFramework { @Test public void normal1C2DTest() throws Exception { successTest(1, 1, 1, 2, noKillPoints(), noKillPoints(), KillNode.ALL_NODES); diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateOtherIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateOtherIT.java index f4ca461edd8b6..b5b2f7759e739 100644 --- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateOtherIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateOtherIT.java @@ -21,7 +21,7 @@ import org.apache.iotdb.commons.utils.KillPoint.KillNode; import org.apache.iotdb.commons.utils.KillPoint.NeverTriggeredKillPoint; -import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionMigrateReliabilityITFramework; +import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionOperationReliabilityITFramework; import org.apache.iotdb.it.framework.IoTDBTestRunner; import org.apache.iotdb.itbase.category.ClusterIT; @@ -32,7 +32,7 @@ @RunWith(IoTDBTestRunner.class) @Category({ClusterIT.class}) -public class IoTDBRegionMigrateOtherIT extends IoTDBRegionMigrateReliabilityITFramework { +public class IoTDBRegionMigrateOtherIT extends IoTDBRegionOperationReliabilityITFramework { @Test public void badKillPoint() throws Exception { try { diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/commit/IoTDBRegionGroupExpandAndShrinkForIoTV1IT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/commit/IoTDBRegionGroupExpandAndShrinkForIoTV1IT.java new file mode 100644 index 0000000000000..3c4aa16a401e4 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/commit/IoTDBRegionGroupExpandAndShrinkForIoTV1IT.java @@ -0,0 +1,173 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.it.regionmigration.pass.commit; + +import org.apache.iotdb.commons.client.sync.SyncConfigNodeIServiceClient; +import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionOperationReliabilityITFramework; +import org.apache.iotdb.confignode.rpc.thrift.TShowRegionResp; +import org.apache.iotdb.consensus.ConsensusFactory; +import org.apache.iotdb.it.env.EnvFactory; +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.ClusterIT; + +import org.awaitility.Awaitility; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.Statement; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.function.Predicate; + +import static org.apache.iotdb.util.MagicUtils.makeItCloseQuietly; + +@Category({ClusterIT.class}) +@RunWith(IoTDBTestRunner.class) +public class IoTDBRegionGroupExpandAndShrinkForIoTV1IT + extends IoTDBRegionOperationReliabilityITFramework { + private static final String EXPAND_FORMAT = "extend region %d to %d"; + private static final String SHRINK_FORMAT = "remove region %d from %d"; + + private static Logger LOGGER = + LoggerFactory.getLogger(IoTDBRegionGroupExpandAndShrinkForIoTV1IT.class); + + /** + * 1. Expand: {a} -> {a,b} -> ... -> {a,b,c,d,e} + * + *

2. Check + * + *

3. Shrink: {a,b,c,d,e} -> {a,c,d,e} -> ... -> {d} + * + *

4. Check + */ + @Test + public void normal1C5DTest() throws Exception { + EnvFactory.getEnv() + .getConfig() + .getCommonConfig() + .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS) + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setDataReplicationFactor(1) + .setSchemaReplicationFactor(1); + + EnvFactory.getEnv().initClusterEnvironment(1, 5); + + try (final Connection connection = makeItCloseQuietly(EnvFactory.getEnv().getConnection()); + final Statement statement = makeItCloseQuietly(connection.createStatement()); + SyncConfigNodeIServiceClient client = + (SyncConfigNodeIServiceClient) EnvFactory.getEnv().getLeaderConfigNodeConnection()) { + // prepare data + statement.execute(INSERTION1); + statement.execute(FLUSH_COMMAND); + + // collect necessary information + Map> regionMap = getAllRegionMap(statement); + Set allDataNodeId = getAllDataNodes(statement); + + // expect one data region, one schema region + Assert.assertEquals(2, regionMap.size()); + + // expand + for (int selectedRegion : regionMap.keySet()) { + for (int i = 0; i < 4; i++) { + int targetDataNode = + selectDataNodeNotContainsRegion(allDataNodeId, regionMap, selectedRegion); + regionGroupExpand(statement, client, selectedRegion, targetDataNode); + // update regionMap every time + regionMap = getAllRegionMap(statement); + } + } + + // shrink + for (int selectedRegion : regionMap.keySet()) { + for (int i = 0; i < 4; i++) { + int targetDataNode = + selectDataNodeContainsRegion(allDataNodeId, regionMap, selectedRegion); + regionGroupShrink(statement, client, selectedRegion, targetDataNode); + // update regionMap every time + regionMap = getAllRegionMap(statement); + } + } + } + } + + private void regionGroupExpand( + Statement statement, + SyncConfigNodeIServiceClient client, + int selectedRegion, + int targetDataNode) + throws Exception { + Awaitility.await() + .atMost(10, TimeUnit.SECONDS) + .pollInterval(1, TimeUnit.SECONDS) + .until( + () -> { + statement.execute(String.format(EXPAND_FORMAT, selectedRegion, targetDataNode)); + return true; + }); + + Predicate expandRegionPredicate = + tShowRegionResp -> { + Map> newRegionMap = + getRunningRegionMap(tShowRegionResp.getRegionInfoList()); + Set dataNodes = newRegionMap.get(selectedRegion); + return dataNodes.contains(targetDataNode); + }; + + awaitUntilSuccess(client, expandRegionPredicate, Optional.of(targetDataNode), Optional.empty()); + + LOGGER.info("Region {} has expanded to DataNode {}", selectedRegion, targetDataNode); + } + + private void regionGroupShrink( + Statement statement, + SyncConfigNodeIServiceClient client, + int selectedRegion, + int targetDataNode) + throws Exception { + Awaitility.await() + .atMost(10, TimeUnit.SECONDS) + .pollInterval(1, TimeUnit.SECONDS) + .until( + () -> { + statement.execute(String.format(SHRINK_FORMAT, selectedRegion, targetDataNode)); + return true; + }); + + Predicate shrinkRegionPredicate = + tShowRegionResp -> { + Map> newRegionMap = + getRegionMap(tShowRegionResp.getRegionInfoList()); + Set dataNodes = newRegionMap.get(selectedRegion); + return !dataNodes.contains(targetDataNode); + }; + + awaitUntilSuccess(client, shrinkRegionPredicate, Optional.empty(), Optional.of(targetDataNode)); + + LOGGER.info("Region {} has shrunk from DataNode {}", selectedRegion, targetDataNode); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/commit/IoTDBRegionReconstructForIoTV1IT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/commit/IoTDBRegionReconstructForIoTV1IT.java new file mode 100644 index 0000000000000..9c720bfbc2746 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/commit/IoTDBRegionReconstructForIoTV1IT.java @@ -0,0 +1,149 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.it.regionmigration.pass.commit; + +import org.apache.iotdb.commons.client.sync.SyncConfigNodeIServiceClient; +import org.apache.iotdb.commons.cluster.NodeStatus; +import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionOperationReliabilityITFramework; +import org.apache.iotdb.consensus.ConsensusFactory; +import org.apache.iotdb.isession.SessionDataSet; +import org.apache.iotdb.it.env.EnvFactory; +import org.apache.iotdb.it.env.cluster.node.DataNodeWrapper; +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.ClusterIT; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.Session; + +import org.apache.commons.io.FileUtils; +import org.apache.tsfile.read.common.RowRecord; +import org.awaitility.Awaitility; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.sql.Connection; +import java.sql.Statement; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static org.apache.iotdb.util.MagicUtils.makeItCloseQuietly; + +@Category({ClusterIT.class}) +@RunWith(IoTDBTestRunner.class) +public class IoTDBRegionReconstructForIoTV1IT extends IoTDBRegionOperationReliabilityITFramework { + private static final String RECONSTRUCT_FORMAT = "reconstruct region %d on %d"; + private static Logger LOGGER = LoggerFactory.getLogger(IoTDBRegionReconstructForIoTV1IT.class); + + @Test + public void normal1C3DTest() throws Exception { + EnvFactory.getEnv() + .getConfig() + .getCommonConfig() + .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS) + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setDataReplicationFactor(2) + .setSchemaReplicationFactor(3); + + EnvFactory.getEnv().initClusterEnvironment(1, 3); + + try (Connection connection = makeItCloseQuietly(EnvFactory.getEnv().getConnection()); + Statement statement = makeItCloseQuietly(connection.createStatement()); + SyncConfigNodeIServiceClient client = + (SyncConfigNodeIServiceClient) EnvFactory.getEnv().getLeaderConfigNodeConnection()) { + // prepare data + statement.execute(INSERTION1); + statement.execute(FLUSH_COMMAND); + + // collect necessary information + Map> dataRegionMap = getDataRegionMap(statement); + Set allDataNodeId = getAllDataNodes(statement); + + // select datanode + final int selectedRegion = 1; + Assert.assertTrue(dataRegionMap.containsKey(selectedRegion)); + Assert.assertEquals(2, dataRegionMap.get(selectedRegion).size()); + Iterator iterator = dataRegionMap.get(selectedRegion).iterator(); + final int dataNodeToBeClosed = iterator.next(); + final int dataNodeToBeReconstructed = iterator.next(); + final int dataNodeAlwaysGood = + allDataNodeId.stream() + .filter(x -> x != dataNodeToBeReconstructed && x != dataNodeToBeClosed) + .findAny() + .get(); + final DataNodeWrapper dataNodeWrapper = + EnvFactory.getEnv().dataNodeIdToWrapper(dataNodeAlwaysGood).get(); + Session session = + new Session.Builder() + .host(dataNodeWrapper.getIp()) + .port(dataNodeWrapper.getPort()) + .build(); + session.open(); + + // delete one DataNode's data dir, stop another DataNode + File dataDirToBeReconstructed = + new File( + EnvFactory.getEnv() + .dataNodeIdToWrapper(dataNodeToBeReconstructed) + .get() + .getDataPath()); + FileUtils.deleteDirectory(dataDirToBeReconstructed); + EnvFactory.getEnv().dataNodeIdToWrapper(dataNodeToBeClosed).get().stopForcibly(); + + // now, the query should throw exception + Assert.assertThrows( + StatementExecutionException.class, + () -> session.executeQueryStatement("select * from root.**")); + + // start DataNode, reconstruct the delete one + EnvFactory.getEnv().dataNodeIdToWrapper(dataNodeToBeClosed).get().start(); + EnvFactory.getAbstractEnv().checkNodeInStatus(dataNodeToBeClosed, NodeStatus.Running); + session.executeNonQueryStatement( + String.format(RECONSTRUCT_FORMAT, selectedRegion, dataNodeToBeReconstructed)); + try { + Awaitility.await() + .pollInterval(1, TimeUnit.SECONDS) + .atMost(1, TimeUnit.MINUTES) + .until( + () -> + getRegionStatusWithoutRunning(session).isEmpty() + && dataDirToBeReconstructed.getAbsoluteFile().exists()); + } catch (Exception e) { + LOGGER.error( + "Two factor: {} && {}", + getRegionStatusWithoutRunning(session).isEmpty(), + dataDirToBeReconstructed.getAbsoluteFile().exists()); + Assert.fail(); + } + EnvFactory.getEnv().dataNodeIdToWrapper(dataNodeToBeClosed).get().stopForcibly(); + + // now, the query should work fine + SessionDataSet resultSet = session.executeQueryStatement("select * from root.**"); + RowRecord rowRecord = resultSet.next(); + Assert.assertEquals("2.0", rowRecord.getFields().get(0).getStringValue()); + Assert.assertEquals("1.0", rowRecord.getFields().get(1).getStringValue()); + } + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/datanodecrash/IoTDBRegionMigrateDataNodeCrashIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/datanodecrash/IoTDBRegionMigrateDataNodeCrashIT.java index 6819af9f87b3f..e4c99e22c1fa9 100644 --- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/datanodecrash/IoTDBRegionMigrateDataNodeCrashIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/datanodecrash/IoTDBRegionMigrateDataNodeCrashIT.java @@ -21,7 +21,7 @@ import org.apache.iotdb.commons.utils.KillPoint.DataNodeKillPoints; import org.apache.iotdb.commons.utils.KillPoint.KillNode; -import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionMigrateReliabilityITFramework; +import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionOperationReliabilityITFramework; import org.apache.iotdb.it.framework.IoTDBTestRunner; import org.apache.iotdb.itbase.category.DailyIT; @@ -31,7 +31,7 @@ @Category({DailyIT.class}) @RunWith(IoTDBTestRunner.class) -public class IoTDBRegionMigrateDataNodeCrashIT extends IoTDBRegionMigrateReliabilityITFramework { +public class IoTDBRegionMigrateDataNodeCrashIT extends IoTDBRegionOperationReliabilityITFramework { // region Coordinator DataNode crash tests private final int dataReplicateFactor = 2; diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/datanodecrash/IoTDBRegionMigrateOriginalCrashWhenDeleteLocalPeerIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/datanodecrash/IoTDBRegionMigrateOriginalCrashWhenDeleteLocalPeerIT.java index 047037c7d147e..2334bd6938513 100644 --- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/datanodecrash/IoTDBRegionMigrateOriginalCrashWhenDeleteLocalPeerIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/datanodecrash/IoTDBRegionMigrateOriginalCrashWhenDeleteLocalPeerIT.java @@ -20,7 +20,7 @@ package org.apache.iotdb.confignode.it.regionmigration.pass.datanodecrash; import org.apache.iotdb.commons.utils.KillPoint.IoTConsensusDeleteLocalPeerKillPoints; -import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionMigrateDataNodeCrashITFramework; +import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionOperationReliabilityITFramework; import org.apache.iotdb.it.framework.IoTDBTestRunner; import org.apache.iotdb.itbase.category.DailyIT; @@ -28,10 +28,12 @@ import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; +import static org.junit.platform.commons.function.Try.success; + @Category({DailyIT.class}) @RunWith(IoTDBTestRunner.class) public class IoTDBRegionMigrateOriginalCrashWhenDeleteLocalPeerIT - extends IoTDBRegionMigrateDataNodeCrashITFramework { + extends IoTDBRegionOperationReliabilityITFramework { @Test public void crashBeforeDelete() throws Exception { success(IoTConsensusDeleteLocalPeerKillPoints.BEFORE_DELETE); diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/removeconfignode/IoTDBRemoveConfigNodeITFramework.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/removeconfignode/IoTDBRemoveConfigNodeITFramework.java new file mode 100644 index 0000000000000..561e99ec3da39 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/removeconfignode/IoTDBRemoveConfigNodeITFramework.java @@ -0,0 +1,199 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.it.removeconfignode; + +import org.apache.iotdb.commons.client.sync.SyncConfigNodeIServiceClient; +import org.apache.iotdb.confignode.it.removedatanode.SQLModel; +import org.apache.iotdb.consensus.ConsensusFactory; +import org.apache.iotdb.db.queryengine.common.header.ColumnHeaderConstant; +import org.apache.iotdb.it.env.EnvFactory; +import org.apache.iotdb.itbase.exception.InconsistentDataException; +import org.apache.iotdb.jdbc.IoTDBSQLException; + +import org.awaitility.Awaitility; +import org.awaitility.core.ConditionTimeoutException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.Statement; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionOperationReliabilityITFramework.getDataRegionMap; +import static org.apache.iotdb.util.MagicUtils.makeItCloseQuietly; + +public class IoTDBRemoveConfigNodeITFramework { + private static final Logger LOGGER = + LoggerFactory.getLogger(IoTDBRemoveConfigNodeITFramework.class); + private static final String TREE_MODEL_INSERTION = + "INSERT INTO root.sg.d1(timestamp,speed,temperature) values(100, 1, 2)"; + + private static final String SHOW_CONFIGNODES = "show confignodes"; + + private static final String defaultSchemaRegionGroupExtensionPolicy = "CUSTOM"; + private static final String defaultDataRegionGroupExtensionPolicy = "CUSTOM"; + + @Before + public void setUp() throws Exception { + EnvFactory.getEnv() + .getConfig() + .getCommonConfig() + .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS) + .setSchemaRegionGroupExtensionPolicy(defaultSchemaRegionGroupExtensionPolicy) + .setDataRegionGroupExtensionPolicy(defaultDataRegionGroupExtensionPolicy); + } + + @After + public void tearDown() throws InterruptedException { + EnvFactory.getEnv().cleanClusterEnvironment(); + } + + public void testRemoveConfigNode( + final int dataReplicateFactor, + final int schemaReplicationFactor, + final int configNodeNum, + final int dataNodeNum, + final int dataRegionPerDataNode, + final SQLModel model) + throws Exception { + + // Set up the environment + EnvFactory.getEnv() + .getConfig() + .getCommonConfig() + .setSchemaReplicationFactor(schemaReplicationFactor) + .setDataReplicationFactor(dataReplicateFactor) + .setDefaultDataRegionGroupNumPerDatabase( + dataRegionPerDataNode * dataNodeNum / dataReplicateFactor); + EnvFactory.getEnv().initClusterEnvironment(configNodeNum, dataNodeNum); + + try (final Connection connection = makeItCloseQuietly(EnvFactory.getEnv().getConnection()); + final Statement statement = makeItCloseQuietly(connection.createStatement()); + SyncConfigNodeIServiceClient client = + (SyncConfigNodeIServiceClient) EnvFactory.getEnv().getLeaderConfigNodeConnection()) { + + // Insert data in tree model + statement.execute(TREE_MODEL_INSERTION); + + Map> regionMap = getDataRegionMap(statement); + regionMap.forEach( + (key, valueSet) -> { + LOGGER.info("Key: {}, Value: {}", key, valueSet); + if (valueSet.size() != dataReplicateFactor) { + Assert.fail(); + } + }); + + // Get all config nodes + ResultSet result = statement.executeQuery(SHOW_CONFIGNODES); + Set allConfigNodeId = new HashSet<>(); + while (result.next()) { + allConfigNodeId.add(result.getInt(ColumnHeaderConstant.NODE_ID)); + } + + AtomicReference clientRef = new AtomicReference<>(client); + + int removeConfigNodeId = allConfigNodeId.iterator().next(); + String removeConfigNodeSQL = generateRemoveString(removeConfigNodeId); + LOGGER.info("Remove ConfigNodes SQL: {}", removeConfigNodeSQL); + try { + statement.execute(removeConfigNodeSQL); + } catch (IoTDBSQLException e) { + LOGGER.error("Remove ConfigNodes SQL execute fail: {}", e.getMessage()); + Assert.fail(); + } + LOGGER.info("Remove ConfigNodes SQL submit successfully."); + + // Wait until success + try { + awaitUntilSuccess(statement, removeConfigNodeId); + } catch (ConditionTimeoutException e) { + LOGGER.error("Remove ConfigNodes timeout in 2 minutes"); + Assert.fail(); + } + + LOGGER.info("Remove ConfigNodes success"); + } catch (InconsistentDataException e) { + LOGGER.error("Unexpected error:", e); + } + } + + private static void awaitUntilSuccess(Statement statement, int removeConfigNodeId) { + AtomicReference> lastTimeConfigNodes = new AtomicReference<>(); + AtomicReference lastException = new AtomicReference<>(); + + try { + Awaitility.await() + .atMost(2, TimeUnit.MINUTES) + .pollDelay(2, TimeUnit.SECONDS) + .until( + () -> { + try { + // Get all config nodes + ResultSet result = statement.executeQuery(SHOW_CONFIGNODES); + Set allConfigNodeId = new HashSet<>(); + while (result.next()) { + allConfigNodeId.add(result.getInt(ColumnHeaderConstant.NODE_ID)); + } + lastTimeConfigNodes.set(allConfigNodeId); + return !allConfigNodeId.contains(removeConfigNodeId); + } catch (Exception e) { + // Any exception can be ignored + lastException.set(e); + return false; + } + }); + } catch (ConditionTimeoutException e) { + if (lastTimeConfigNodes.get() == null) { + LOGGER.error( + "Maybe show confignodes fail, lastTimeConfigNodes is null, last Exception:", + lastException.get()); + throw e; + } + String actualSetStr = lastTimeConfigNodes.get().toString(); + lastTimeConfigNodes.get().remove(removeConfigNodeId); + String expectedSetStr = lastTimeConfigNodes.get().toString(); + LOGGER.error( + "Remove ConfigNode timeout in 2 minutes, expected set: {}, actual set: {}", + expectedSetStr, + actualSetStr); + if (lastException.get() == null) { + LOGGER.info("No exception during awaiting"); + } else { + LOGGER.error("Last exception during awaiting:", lastException.get()); + } + throw e; + } + } + + public static String generateRemoveString(Integer configNodeId) { + return "remove confignode " + configNodeId; + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/removeconfignode/IoTDBRemoveConfigNodeNormalIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/removeconfignode/IoTDBRemoveConfigNodeNormalIT.java new file mode 100644 index 0000000000000..44a1100672e5e --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/removeconfignode/IoTDBRemoveConfigNodeNormalIT.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.it.removeconfignode; + +import org.apache.iotdb.confignode.it.removedatanode.SQLModel; +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.ClusterIT; + +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +@Category({ClusterIT.class}) +@RunWith(IoTDBTestRunner.class) +public class IoTDBRemoveConfigNodeNormalIT extends IoTDBRemoveConfigNodeITFramework { + @Test + public void test3C1DUseTreeSQL() throws Exception { + testRemoveConfigNode(1, 1, 3, 1, 2, SQLModel.TREE_MODEL_SQL); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/removedatanode/IoTDBRemoveDataNodeITFramework.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/removedatanode/IoTDBRemoveDataNodeITFramework.java new file mode 100644 index 0000000000000..73d5fd6fac353 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/removedatanode/IoTDBRemoveDataNodeITFramework.java @@ -0,0 +1,402 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.it.removedatanode; + +import org.apache.iotdb.common.rpc.thrift.TDataNodeConfiguration; +import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; +import org.apache.iotdb.commons.client.sync.SyncConfigNodeIServiceClient; +import org.apache.iotdb.confignode.rpc.thrift.TDataNodeRemoveReq; +import org.apache.iotdb.confignode.rpc.thrift.TDataNodeRemoveResp; +import org.apache.iotdb.consensus.ConsensusFactory; +import org.apache.iotdb.db.queryengine.common.header.ColumnHeaderConstant; +import org.apache.iotdb.it.env.EnvFactory; +import org.apache.iotdb.it.env.cluster.node.DataNodeWrapper; +import org.apache.iotdb.itbase.exception.InconsistentDataException; +import org.apache.iotdb.jdbc.IoTDBSQLException; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.apache.thrift.TException; +import org.awaitility.Awaitility; +import org.awaitility.core.ConditionTimeoutException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.Statement; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; + +import static org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionOperationReliabilityITFramework.getDataRegionMap; +import static org.apache.iotdb.util.MagicUtils.makeItCloseQuietly; + +public class IoTDBRemoveDataNodeITFramework { + private static final Logger LOGGER = + LoggerFactory.getLogger(IoTDBRemoveDataNodeITFramework.class); + private static final String TREE_MODEL_INSERTION = + "INSERT INTO root.sg.d1(timestamp,speed,temperature) values(100, 1, 2)"; + + private static final String SHOW_REGIONS = "show regions"; + private static final String SHOW_DATANODES = "show datanodes"; + + private static final String defaultSchemaRegionGroupExtensionPolicy = "CUSTOM"; + private static final String defaultDataRegionGroupExtensionPolicy = "CUSTOM"; + + @Before + public void setUp() throws Exception { + EnvFactory.getEnv() + .getConfig() + .getCommonConfig() + .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS) + .setSchemaRegionGroupExtensionPolicy(defaultSchemaRegionGroupExtensionPolicy) + .setDataRegionGroupExtensionPolicy(defaultDataRegionGroupExtensionPolicy); + } + + @After + public void tearDown() throws InterruptedException { + EnvFactory.getEnv().cleanClusterEnvironment(); + } + + public void successTest( + final int dataReplicateFactor, + final int schemaReplicationFactor, + final int configNodeNum, + final int dataNodeNum, + final int removeDataNodeNum, + final int dataRegionPerDataNode, + final boolean rejoinRemovedDataNode, + final SQLModel model) + throws Exception { + testRemoveDataNode( + dataReplicateFactor, + schemaReplicationFactor, + configNodeNum, + dataNodeNum, + removeDataNodeNum, + dataRegionPerDataNode, + true, + rejoinRemovedDataNode, + model); + } + + public void failTest( + final int dataReplicateFactor, + final int schemaReplicationFactor, + final int configNodeNum, + final int dataNodeNum, + final int removeDataNodeNum, + final int dataRegionPerDataNode, + final boolean rejoinRemovedDataNode, + final SQLModel model) + throws Exception { + testRemoveDataNode( + dataReplicateFactor, + schemaReplicationFactor, + configNodeNum, + dataNodeNum, + removeDataNodeNum, + dataRegionPerDataNode, + false, + rejoinRemovedDataNode, + model); + } + + public void testRemoveDataNode( + final int dataReplicateFactor, + final int schemaReplicationFactor, + final int configNodeNum, + final int dataNodeNum, + final int removeDataNodeNum, + final int dataRegionPerDataNode, + final boolean expectRemoveSuccess, + final boolean rejoinRemovedDataNode, + final SQLModel model) + throws Exception { + // Set up the environment + EnvFactory.getEnv() + .getConfig() + .getCommonConfig() + .setSchemaReplicationFactor(schemaReplicationFactor) + .setDataReplicationFactor(dataReplicateFactor) + .setDefaultDataRegionGroupNumPerDatabase( + dataRegionPerDataNode * dataNodeNum / dataReplicateFactor); + EnvFactory.getEnv().initClusterEnvironment(configNodeNum, dataNodeNum); + + try (final Connection connection = makeItCloseQuietly(EnvFactory.getEnv().getConnection()); + final Statement statement = makeItCloseQuietly(connection.createStatement()); + SyncConfigNodeIServiceClient client = + (SyncConfigNodeIServiceClient) EnvFactory.getEnv().getLeaderConfigNodeConnection()) { + + // Insert data in tree model + statement.execute(TREE_MODEL_INSERTION); + + Map> regionMap = getDataRegionMap(statement); + regionMap.forEach( + (key, valueSet) -> { + LOGGER.info("Key: {}, Value: {}", key, valueSet); + if (valueSet.size() != dataReplicateFactor) { + Assert.fail(); + } + }); + + // Get all data nodes + ResultSet result = statement.executeQuery(SHOW_DATANODES); + Set allDataNodeId = new HashSet<>(); + while (result.next()) { + allDataNodeId.add(result.getInt(ColumnHeaderConstant.NODE_ID)); + } + + // Select data nodes to remove + final Set removeDataNodes = + selectRemoveDataNodes(allDataNodeId, regionMap, removeDataNodeNum); + + List removeDataNodeWrappers = + removeDataNodes.stream() + .map(dataNodeId -> EnvFactory.getEnv().dataNodeIdToWrapper(dataNodeId).get()) + .collect(Collectors.toList()); + + AtomicReference clientRef = new AtomicReference<>(client); + List removeDataNodeLocations = + clientRef + .get() + .getDataNodeConfiguration(-1) + .getDataNodeConfigurationMap() + .values() + .stream() + .map(TDataNodeConfiguration::getLocation) + .filter(location -> removeDataNodes.contains(location.getDataNodeId())) + .collect(Collectors.toList()); + if (SQLModel.NOT_USE_SQL.equals(model)) { + TDataNodeRemoveReq removeReq = new TDataNodeRemoveReq(removeDataNodeLocations); + + // Remove data nodes + TDataNodeRemoveResp removeResp = clientRef.get().removeDataNode(removeReq); + LOGGER.info("Submit Remove DataNodes result {} ", removeResp); + if (removeResp.getStatus().getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + if (expectRemoveSuccess) { + LOGGER.error("Submit Remove DataNodes fail"); + Assert.fail(); + } else { + LOGGER.info("Submit Remove DataNodes fail, as expected."); + return; + } + } + LOGGER.info("Submit Remove DataNodes request: {}", removeReq); + + } else { + String removeDataNodeSQL = generateRemoveString(removeDataNodes); + LOGGER.info("Remove DataNodes SQL: {}", removeDataNodeSQL); + try { + statement.execute(removeDataNodeSQL); + } catch (IoTDBSQLException e) { + if (expectRemoveSuccess) { + LOGGER.error("Remove DataNodes SQL execute fail: {}", e.getMessage()); + Assert.fail(); + } else { + LOGGER.info("Submit Remove DataNodes fail, as expected"); + return; + } + } + LOGGER.info("Remove DataNodes SQL submit successfully."); + } + + // Wait until success + boolean removeSuccess = false; + try { + awaitUntilSuccess(clientRef, removeDataNodeLocations); + removeSuccess = true; + } catch (ConditionTimeoutException e) { + if (expectRemoveSuccess) { + LOGGER.error("Remove DataNodes timeout in 2 minutes"); + Assert.fail(); + } + } + + if (!expectRemoveSuccess && removeSuccess) { + LOGGER.error("Remove DataNodes success, but expect fail"); + Assert.fail(); + } + + LOGGER.info("Remove DataNodes success"); + + if (rejoinRemovedDataNode) { + try { + // Use sleep and restart to ensure that removeDataNodes restarts successfully + Thread.sleep(30000); + restartDataNodes(removeDataNodeWrappers); + LOGGER.info("RemoveDataNodes:{} rejoined successfully.", removeDataNodes); + } catch (Exception e) { + LOGGER.error("RemoveDataNodes rejoin failed."); + Assert.fail(); + } + } + } catch (InconsistentDataException e) { + LOGGER.error("Unexpected error:", e); + } + + try (final Connection connection = makeItCloseQuietly(EnvFactory.getEnv().getConnection()); + final Statement statement = makeItCloseQuietly(connection.createStatement())) { + + // Check the data region distribution after removing data nodes + Map> afterRegionMap = getDataRegionMap(statement); + afterRegionMap.forEach( + (key, valueSet) -> { + LOGGER.info("Key: {}, Value: {}", key, valueSet); + if (valueSet.size() != dataReplicateFactor) { + Assert.fail(); + } + }); + + if (rejoinRemovedDataNode) { + ResultSet result = statement.executeQuery(SHOW_DATANODES); + Set allDataNodeId = new HashSet<>(); + while (result.next()) { + allDataNodeId.add(result.getInt(ColumnHeaderConstant.NODE_ID)); + } + Assert.assertEquals(allDataNodeId.size(), dataNodeNum); + } + } catch (InconsistentDataException e) { + LOGGER.error("Unexpected error:", e); + } + } + + private static Set selectRemoveDataNodes( + Set allDataNodeId, Map> regionMap, int removeDataNodeNum) { + Set removeDataNodeIds = new HashSet<>(); + for (int i = 0; i < removeDataNodeNum; i++) { + int removeDataNodeId = allDataNodeId.iterator().next(); + removeDataNodeIds.add(removeDataNodeId); + allDataNodeId.remove(removeDataNodeId); + } + return removeDataNodeIds; + } + + private static void awaitUntilSuccess( + AtomicReference clientRef, + List removeDataNodeLocations) { + AtomicReference> lastTimeDataNodeLocations = new AtomicReference<>(); + AtomicReference lastException = new AtomicReference<>(); + + try { + Awaitility.await() + .atMost(2, TimeUnit.MINUTES) + .pollDelay(2, TimeUnit.SECONDS) + .until( + () -> { + try { + List remainingDataNodes = + clientRef + .get() + .getDataNodeConfiguration(-1) + .getDataNodeConfigurationMap() + .values() + .stream() + .map(TDataNodeConfiguration::getLocation) + .collect(Collectors.toList()); + lastTimeDataNodeLocations.set(remainingDataNodes); + for (TDataNodeLocation location : removeDataNodeLocations) { + if (remainingDataNodes.contains(location)) { + return false; + } + } + return true; + } catch (TException e) { + clientRef.set( + (SyncConfigNodeIServiceClient) + EnvFactory.getEnv().getLeaderConfigNodeConnection()); + lastException.set(e); + return false; + } catch (Exception e) { + // Any exception can be ignored + lastException.set(e); + return false; + } + }); + } catch (ConditionTimeoutException e) { + if (lastTimeDataNodeLocations.get() == null) { + LOGGER.error( + "Maybe getDataNodeConfiguration fail, lastTimeDataNodeLocations is null, last Exception:", + lastException.get()); + throw e; + } + String actualSetStr = lastTimeDataNodeLocations.get().toString(); + lastTimeDataNodeLocations.get().removeAll(removeDataNodeLocations); + String expectedSetStr = lastTimeDataNodeLocations.get().toString(); + LOGGER.error( + "Remove DataNodes timeout in 2 minutes, expected set: {}, actual set: {}", + expectedSetStr, + actualSetStr); + if (lastException.get() == null) { + LOGGER.info("No exception during awaiting"); + } else { + LOGGER.error("Last exception during awaiting:", lastException.get()); + } + throw e; + } + + LOGGER.info("DataNodes has been successfully changed to {}", lastTimeDataNodeLocations.get()); + } + + public void restartDataNodes(List dataNodeWrappers) { + dataNodeWrappers.parallelStream() + .forEach( + nodeWrapper -> { + nodeWrapper.stopForcibly(); + Awaitility.await() + .atMost(1, TimeUnit.MINUTES) + .pollDelay(2, TimeUnit.SECONDS) + .until(() -> !nodeWrapper.isAlive()); + LOGGER.info("Node {} stopped.", nodeWrapper.getId()); + nodeWrapper.start(); + Awaitility.await() + .atMost(1, TimeUnit.MINUTES) + .pollDelay(2, TimeUnit.SECONDS) + .until(nodeWrapper::isAlive); + try { + TimeUnit.SECONDS.sleep(10); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + LOGGER.info("Node {} restarted.", nodeWrapper.getId()); + }); + } + + public static String generateRemoveString(Set dataNodes) { + StringBuilder sb = new StringBuilder("remove datanode "); + + for (Integer node : dataNodes) { + sb.append(node).append(", "); + } + + sb.setLength(sb.length() - 2); + + return sb.toString(); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/removedatanode/IoTDBRemoveDataNodeNormalIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/removedatanode/IoTDBRemoveDataNodeNormalIT.java new file mode 100644 index 0000000000000..517d8ee4cad67 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/removedatanode/IoTDBRemoveDataNodeNormalIT.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.it.removedatanode; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.ClusterIT; + +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +@Category({ClusterIT.class}) +@RunWith(IoTDBTestRunner.class) +public class IoTDBRemoveDataNodeNormalIT extends IoTDBRemoveDataNodeITFramework { + + @Test + public void success1C4DTest() throws Exception { + successTest(2, 3, 1, 4, 1, 2, true, SQLModel.NOT_USE_SQL); + } + + @Test + public void fail1C3DTest() throws Exception { + failTest(2, 3, 1, 3, 1, 2, false, SQLModel.NOT_USE_SQL); + } + + @Test + public void success1C4DTestUseSQL() throws Exception { + successTest(2, 3, 1, 4, 1, 2, true, SQLModel.TREE_MODEL_SQL); + } + + @Test + public void fail1C3DTestUseSQL() throws Exception { + failTest(2, 3, 1, 3, 1, 2, false, SQLModel.TREE_MODEL_SQL); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/removedatanode/SQLModel.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/removedatanode/SQLModel.java new file mode 100644 index 0000000000000..0963072d44cae --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/removedatanode/SQLModel.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.it.removedatanode; + +public enum SQLModel { + NOT_USE_SQL, + + TREE_MODEL_SQL, +} diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/utils/ConfigNodeTestUtils.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/utils/ConfigNodeTestUtils.java index 76feab5efe32d..5f9da5ac36a83 100644 --- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/utils/ConfigNodeTestUtils.java +++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/utils/ConfigNodeTestUtils.java @@ -323,8 +323,8 @@ public static TClusterParameters generateClusterParameters() { clusterParameters.setTimePartitionInterval(604800000); clusterParameters.setDataReplicationFactor(1); clusterParameters.setSchemaReplicationFactor(1); - clusterParameters.setDataRegionPerDataNode(5.0); - clusterParameters.setSchemaRegionPerDataNode(1.0); + clusterParameters.setDataRegionPerDataNode(0); + clusterParameters.setSchemaRegionPerDataNode(1); clusterParameters.setDiskSpaceWarningThreshold(0.01); clusterParameters.setReadConsistencyLevel("strong"); clusterParameters.setTimestampPrecision("ms"); diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBDatetimeFormatIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBDatetimeFormatIT.java index bb3a8de7d12f4..620043e6c7b7b 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBDatetimeFormatIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBDatetimeFormatIT.java @@ -128,5 +128,14 @@ public void testBigDateTime() { e.printStackTrace(); fail(); } + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + statement.execute("insert into root.sg.d1(time,s2) values (16182830055860000000, 8.76);"); + fail(); + } catch (SQLException e) { + Assert.assertTrue( + e.getMessage() + .contains("please check whether the timestamp 16182830055860000000 is correct.")); + } } } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBDuplicateTimeIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBDuplicateTimeIT.java index 209657e516185..047bb10bfd3ca 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBDuplicateTimeIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBDuplicateTimeIT.java @@ -43,8 +43,6 @@ public class IoTDBDuplicateTimeIT { @Before public void setUp() throws Exception { - EnvFactory.getEnv().getConfig().getCommonConfig().setAvgSeriesPointNumberThreshold(2); - // Adjust memstable threshold size to make it flush automatically EnvFactory.getEnv().initClusterEnvironment(); } @@ -62,6 +60,7 @@ public void testDuplicateTime() throws SQLException { // version-1 tsfile statement.execute("insert into root.db.d1(time,s1) values (2,2)"); statement.execute("insert into root.db.d1(time,s1) values (3,3)"); + statement.execute("flush"); // version-2 unseq work memtable statement.execute("insert into root.db.d1(time,s1) values (2,20)"); @@ -69,9 +68,11 @@ public void testDuplicateTime() throws SQLException { // version-3 tsfile statement.execute("insert into root.db.d1(time,s1) values (5,5)"); statement.execute("insert into root.db.d1(time,s1) values (6,6)"); + statement.execute("flush root.db true"); // version-2 unseq work memtable -> unseq tsfile statement.execute("insert into root.db.d1(time,s1) values (5,50)"); + statement.execute("flush"); try (ResultSet set = statement.executeQuery("SELECT s1 FROM root.db.d1 where time = 5")) { int cnt = 0; diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBEncodingIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBEncodingIT.java index aa7b7024677c0..6bcbc762fc53b 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBEncodingIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBEncodingIT.java @@ -19,13 +19,17 @@ package org.apache.iotdb.db.it; +import org.apache.iotdb.isession.ISession; import org.apache.iotdb.it.env.EnvFactory; import org.apache.iotdb.it.framework.IoTDBTestRunner; import org.apache.iotdb.itbase.category.LocalStandaloneIT; import org.apache.iotdb.rpc.TSStatusCode; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; import org.junit.After; -import org.junit.Before; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; @@ -36,22 +40,37 @@ import java.sql.Statement; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.fail; @RunWith(IoTDBTestRunner.class) @Category({LocalStandaloneIT.class}) public class IoTDBEncodingIT { - @Before - public void setUp() throws Exception { + private static final String[] databasesToClear = + new String[] {"root.db_0", "root.db1", "root.turbine1"}; + + @BeforeClass + public static void setUpClass() throws Exception { EnvFactory.getEnv().initClusterEnvironment(); } - @After - public void tearDown() throws Exception { + @AfterClass + public static void tearDownClass() throws Exception { EnvFactory.getEnv().cleanClusterEnvironment(); } + @After + public void tearDown() { + for (String database : databasesToClear) { + try (ISession session = EnvFactory.getEnv().getSessionConnection()) { + session.executeNonQueryStatement("DELETE DATABASE " + database); + } catch (Exception ignored) { + + } + } + } + @Test public void testSetEncodingRegularFailed() { try (Connection connection = EnvFactory.getEnv().getConnection(); @@ -432,4 +451,75 @@ public void testFloatPrecision2() { fail(); } } + + @Test + public void testCreateNewTypes() throws Exception { + String currDB = "root.db1"; + int seriesCnt = 0; + TSDataType[] dataTypes = + new TSDataType[] { + TSDataType.STRING, TSDataType.BLOB, TSDataType.TIMESTAMP, TSDataType.DATE + }; + + // supported encodings + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + for (TSDataType dataType : dataTypes) { + for (TSEncoding encoding : TSEncoding.values()) { + if (encoding.isSupported(dataType)) { + statement.execute( + "create timeseries " + + currDB + + ".d1.s" + + seriesCnt + + " with datatype=" + + dataType + + ", encoding=" + + encoding + + ", compression=SNAPPY"); + seriesCnt++; + } + } + } + + ResultSet resultSet = statement.executeQuery("SHOW TIMESERIES"); + + while (resultSet.next()) { + seriesCnt--; + } + assertEquals(0, seriesCnt); + statement.execute("DROP DATABASE " + currDB); + } + + // unsupported encodings + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + for (TSDataType dataType : dataTypes) { + for (TSEncoding encoding : TSEncoding.values()) { + if (!encoding.isSupported(dataType)) { + try { + statement.execute( + "create timeseries " + + currDB + + ".d1.s" + + seriesCnt + + " with datatype=" + + dataType + + ", encoding=" + + encoding + + ", compression=SNAPPY"); + fail("Should have thrown an exception"); + } catch (SQLException e) { + assertEquals( + "507: encoding " + encoding + " does not support " + dataType, e.getMessage()); + } + seriesCnt++; + } + } + } + + ResultSet resultSet = statement.executeQuery("SHOW TIMESERIES"); + assertFalse(resultSet.next()); + } + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBFileTimeIndexIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBFileTimeIndexIT.java new file mode 100644 index 0000000000000..a0d0a083701ac --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBFileTimeIndexIT.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.it; + +import org.apache.iotdb.it.env.EnvFactory; +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.ClusterIT; +import org.apache.iotdb.itbase.category.LocalStandaloneIT; + +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Locale; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +@RunWith(IoTDBTestRunner.class) +@Category({LocalStandaloneIT.class, ClusterIT.class}) +public class IoTDBFileTimeIndexIT { + + private static final String[] sqls = + new String[] { + "insert into root.db.d1(time,s1) values(2,2)", + "insert into root.db.d1(time,s1) values(3,3)", + "flush", + "insert into root.db.d2(time,s1) values(5,5)", + "flush", + "insert into root.db.d1(time,s1) values(4,4)", + "flush", + "insert into root.db.d2(time,s1) values(1,1)", + "insert into root.db.d1(time,s1) values(3,30)", + "insert into root.db.d1(time,s1) values(4,40)", + "flush", + "insert into root.db.d2(time,s1) values(2,2)", + "insert into root.db.d1(time,s1) values(4,400)", + "flush", + }; + + @BeforeClass + public static void setUp() throws Exception { + Locale.setDefault(Locale.ENGLISH); + + EnvFactory.getEnv() + .getConfig() + .getCommonConfig() + .setDataRegionGroupExtensionPolicy("CUSTOM") + .setDefaultDataRegionGroupNumPerDatabase(1) + .setEnableSeqSpaceCompaction(false) + .setEnableUnseqSpaceCompaction(false) + .setEnableCrossSpaceCompaction(false) + .setQueryMemoryProportion("1:100:200:50:200:200:0:250"); + // Adjust memstable threshold size to make it flush automatically + EnvFactory.getEnv().initClusterEnvironment(); + prepareData(); + } + + private static void prepareData() { + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + + for (String sql : sqls) { + statement.addBatch(sql); + } + statement.executeBatch(); + } catch (Exception e) { + fail(e.getMessage()); + } + } + + @AfterClass + public static void tearDown() throws Exception { + EnvFactory.getEnv().cleanClusterEnvironment(); + } + + @Test + public void testQuery() throws SQLException { + long[] time = {2L, 3L, 4L}; + double[] value = {2.0f, 30.0f, 400.0f}; + + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery("select s1 from root.db.d1")) { + int cnt = 0; + while (resultSet.next()) { + assertEquals(time[cnt], resultSet.getLong(1)); + assertEquals(value[cnt], resultSet.getDouble(2), 0.00001); + cnt++; + } + assertEquals(time.length, cnt); + } + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBFilterNullIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBFilterNullIT.java index 705f4b8d9ede7..2ddb7128d1a5c 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBFilterNullIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBFilterNullIT.java @@ -46,7 +46,8 @@ public class IoTDBFilterNullIT { "CREATE DATABASE root.testNullFilter", "CREATE TIMESERIES root.testNullFilter.d1.s1 WITH DATATYPE=INT32, ENCODING=PLAIN", "CREATE TIMESERIES root.testNullFilter.d1.s2 WITH DATATYPE=BOOLEAN, ENCODING=PLAIN", - "CREATE TIMESERIES root.testNullFilter.d1.s3 WITH DATATYPE=DOUBLE, ENCODING=PLAIN" + "CREATE TIMESERIES root.testNullFilter.d1.s3 WITH DATATYPE=DOUBLE, ENCODING=PLAIN", + "CREATE ALIGNED TIMESERIES root.testNullFilter.d2(s1 INT32, s2 BOOLEAN, s3 DOUBLE);" }; private static final String[] insertSqls = @@ -54,6 +55,9 @@ public class IoTDBFilterNullIT { "INSERT INTO root.testNullFilter.d1(timestamp,s2,s3) " + "values(1, false, 11.1)", "INSERT INTO root.testNullFilter.d1(timestamp,s1,s2) " + "values(2, 22, true)", "INSERT INTO root.testNullFilter.d1(timestamp,s1,s3) " + "values(3, 23, 33.3)", + "INSERT INTO root.testNullFilter.d2(timestamp,s2,s3) " + "values(1, false, 11.1)", + "INSERT INTO root.testNullFilter.d2(timestamp,s1,s2) " + "values(2, 22, true)", + "INSERT INTO root.testNullFilter.d2(timestamp,s1,s2) " + "values(3, 22, false)", }; private static void prepareData() { @@ -128,7 +132,34 @@ public void nullFilterTest() { assertEquals(retArray.length, count); } } catch (Exception e) { - e.printStackTrace(); + fail(e.getMessage()); + } + } + + @Test + public void inPushDownTest() { + String[] retArray = new String[] {"2,22,true,null", "3,22,false,null"}; + try (Connection connectionIsNull = EnvFactory.getEnv().getConnection(); + Statement statementIsNull = connectionIsNull.createStatement()) { + int count = 0; + try (ResultSet resultSet = + statementIsNull.executeQuery( + "select * from root.testNullFilter.d2 where s1 in (22, 23)")) { + while (resultSet.next()) { + String ans = + resultSet.getString(ColumnHeaderConstant.TIME) + + "," + + resultSet.getString("root.testNullFilter.d2.s1") + + "," + + resultSet.getString("root.testNullFilter.d2.s2") + + "," + + resultSet.getString("root.testNullFilter.d2.s3"); + assertEquals(retArray[count], ans); + count++; + } + assertEquals(retArray.length, count); + } + } catch (Exception e) { fail(e.getMessage()); } } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBFloatPrecisionIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBFloatPrecisionIT.java index c72c7e41b1253..3a73be3adb309 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBFloatPrecisionIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBFloatPrecisionIT.java @@ -169,4 +169,49 @@ public void selectAllSQLTest() { fail(e.getMessage()); } } + + @Test + public void bigFloatNumberTest2() { + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + float[] floats = new float[] {6.5536403E8F, 3.123456768E20F, Float.NaN}; + double[] doubles = new double[] {9.223372036854E18, 9.223372036854E100, Double.NaN}; + + statement.execute("create timeseries root.sg.d1.s1 with datatype=float, encoding=rle"); + statement.execute("create timeseries root.sg.d1.s2 with datatype=double, encoding=rle"); + statement.execute( + "insert into root.sg.d1(time, s1, s2) values (1, 6.5536403E8, 9.223372036854E18)"); + statement.execute( + "insert into root.sg.d1(time, s1, s2) values (2, 3.123456768E20, 9.223372036854E100)"); + statement.execute("insert into root.sg.d1(time, s1, s2) values (3, NaN, NaN)"); + + int cnt; + try (ResultSet resultSet = statement.executeQuery("select s1, s2 from root.sg.d1")) { + assertNotNull(resultSet); + cnt = 0; + while (resultSet.next()) { + assertEquals(floats[cnt], resultSet.getFloat("root.sg.d1.s1"), DELTA_FLOAT); + assertEquals(doubles[cnt], resultSet.getDouble("root.sg.d1.s2"), DELTA_DOUBLE); + cnt++; + } + assertEquals(3, cnt); + } + + statement.execute("flush"); + + try (ResultSet resultSet = statement.executeQuery("select s1, s2 from root.sg.d1")) { + assertNotNull(resultSet); + cnt = 0; + while (resultSet.next()) { + assertEquals(floats[cnt], resultSet.getFloat("root.sg.d1.s1"), DELTA_FLOAT); + assertEquals(doubles[cnt], resultSet.getDouble("root.sg.d1.s2"), DELTA_DOUBLE); + cnt++; + } + assertEquals(3, cnt); + } + } catch (Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBInsertMultiRowIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBInsertMultiRowIT.java index 7ebbcfc8472b3..6e52368be4ce4 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBInsertMultiRowIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBInsertMultiRowIT.java @@ -56,7 +56,7 @@ public class IoTDBInsertMultiRowIT { @BeforeClass public static void setUp() throws Exception { - EnvFactory.getEnv().getConfig().getCommonConfig().setMaxInnerCompactionCandidateFileNum(2); + EnvFactory.getEnv().getConfig().getCommonConfig().setInnerCompactionCandidateFileNum(2); EnvFactory.getEnv().initClusterEnvironment(); initCreateSQLStatement(); insertData(); @@ -171,6 +171,13 @@ public void testInsertMultiRowWithWrongTimestampPrecision() { } catch (SQLException e) { assertTrue(e.getMessage().contains("Current system timestamp precision is ms")); } + try (Statement st1 = connection.createStatement()) { + st1.execute( + "insert into root.t1.d99.wt01(timestamp, s1, s2) values(-1618283005586000, 1, 1), (-1618283005586001, 1, 2)"); + fail(); + } catch (SQLException e) { + assertTrue(e.getMessage().contains("Current system timestamp precision is ms")); + } } @Test diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBInsertWithQueryIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBInsertWithQueryIT.java index d237423c52f41..3ebefbba2026d 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBInsertWithQueryIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBInsertWithQueryIT.java @@ -52,6 +52,7 @@ public class IoTDBInsertWithQueryIT { @Before public void setUp() throws Exception { + EnvFactory.getEnv().getConfig().getCommonConfig().setTimestampPrecisionCheckEnabled(false); EnvFactory.getEnv().initClusterEnvironment(); } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBLoadLastCacheIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBLoadLastCacheIT.java new file mode 100644 index 0000000000000..a005f80d19df2 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBLoadLastCacheIT.java @@ -0,0 +1,556 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.it; + +import org.apache.iotdb.commons.path.PartialPath; +import org.apache.iotdb.db.queryengine.common.header.ColumnHeaderConstant; +import org.apache.iotdb.db.queryengine.plan.relational.metadata.fetcher.cache.LastCacheLoadStrategy; +import org.apache.iotdb.it.env.EnvFactory; +import org.apache.iotdb.it.utils.TsFileGenerator; +import org.apache.iotdb.itbase.category.ClusterIT; +import org.apache.iotdb.itbase.category.LocalStandaloneIT; +import org.apache.iotdb.jdbc.IoTDBSQLException; + +import com.google.common.util.concurrent.RateLimiter; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.PlainDeviceID; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.write.TsFileWriter; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.IMeasurementSchema; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.nio.file.Files; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.Objects; +import java.util.Random; +import java.util.concurrent.TimeUnit; + +@SuppressWarnings({"ResultOfMethodCallIgnored", "UnstableApiUsage"}) +@RunWith(Parameterized.class) +@Category({LocalStandaloneIT.class, ClusterIT.class}) +public class IoTDBLoadLastCacheIT { + + private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBLoadLastCacheIT.class); + private static final long PARTITION_INTERVAL = 10 * 1000L; + private static final int connectionTimeoutInMS = (int) TimeUnit.SECONDS.toMillis(300); + private static final long loadTsFileAnalyzeSchemaMemorySizeInBytes = 10 * 1024L; + + private File tmpDir; + private final LastCacheLoadStrategy lastCacheLoadStrategy; + + @Parameters(name = "loadLastCacheStrategy={0}") + public static Collection data() { + return Arrays.asList( + new Object[][] { + {LastCacheLoadStrategy.CLEAN_ALL}, + {LastCacheLoadStrategy.UPDATE}, + {LastCacheLoadStrategy.UPDATE_NO_BLOB}, + {LastCacheLoadStrategy.CLEAN_DEVICE} + }); + } + + public IoTDBLoadLastCacheIT(LastCacheLoadStrategy lastCacheLoadStrategy) { + this.lastCacheLoadStrategy = lastCacheLoadStrategy; + } + + @Before + public void setUp() throws Exception { + tmpDir = new File(Files.createTempDirectory("load").toUri()); + EnvFactory.getEnv().getConfig().getCommonConfig().setTimePartitionInterval(PARTITION_INTERVAL); + EnvFactory.getEnv() + .getConfig() + .getDataNodeConfig() + .setConnectionTimeoutInMS(connectionTimeoutInMS) + .setLoadTsFileAnalyzeSchemaMemorySizeInBytes(loadTsFileAnalyzeSchemaMemorySizeInBytes); + EnvFactory.getEnv() + .getConfig() + .getDataNodeConfig() + .setLoadLastCacheStrategy(lastCacheLoadStrategy.name()) + .setCacheLastValuesForLoad(true); + EnvFactory.getEnv().initClusterEnvironment(); + } + + @After + public void tearDown() throws Exception { + deleteSG(); + EnvFactory.getEnv().cleanClusterEnvironment(); + + if (!deleteDir()) { + LOGGER.error("Can not delete tmp dir for loading tsfile."); + } + } + + private void registerSchema() throws SQLException { + try (final Connection connection = EnvFactory.getEnv().getConnection(); + final Statement statement = connection.createStatement()) { + + statement.execute("CREATE DATABASE " + SchemaConfig.STORAGE_GROUP_0); + statement.execute("CREATE DATABASE " + SchemaConfig.STORAGE_GROUP_1); + + statement.execute(convert2SQL(SchemaConfig.DEVICE_0, SchemaConfig.MEASUREMENT_00)); + statement.execute(convert2SQL(SchemaConfig.DEVICE_0, SchemaConfig.MEASUREMENT_01)); + statement.execute(convert2SQL(SchemaConfig.DEVICE_0, SchemaConfig.MEASUREMENT_02)); + statement.execute(convert2SQL(SchemaConfig.DEVICE_0, SchemaConfig.MEASUREMENT_03)); + + statement.execute( + convert2AlignedSQL( + SchemaConfig.DEVICE_1, + Arrays.asList( + SchemaConfig.MEASUREMENT_10, + SchemaConfig.MEASUREMENT_11, + SchemaConfig.MEASUREMENT_12, + SchemaConfig.MEASUREMENT_13, + SchemaConfig.MEASUREMENT_14, + SchemaConfig.MEASUREMENT_15, + SchemaConfig.MEASUREMENT_16, + SchemaConfig.MEASUREMENT_17))); + + statement.execute(convert2SQL(SchemaConfig.DEVICE_2, SchemaConfig.MEASUREMENT_20)); + + statement.execute(convert2SQL(SchemaConfig.DEVICE_3, SchemaConfig.MEASUREMENT_30)); + + statement.execute( + convert2AlignedSQL( + SchemaConfig.DEVICE_4, Collections.singletonList(SchemaConfig.MEASUREMENT_40))); + } + } + + private String convert2SQL(final String device, final MeasurementSchema schema) { + final String sql = + String.format( + "create timeseries %s %s", + new Path(device, schema.getMeasurementId(), true).getFullPath(), + schema.getType().name()); + LOGGER.info("schema execute: {}", sql); + return sql; + } + + private String convert2AlignedSQL(final String device, final List schemas) { + StringBuilder sql = new StringBuilder(String.format("create aligned timeseries %s(", device)); + for (int i = 0; i < schemas.size(); i++) { + final IMeasurementSchema schema = schemas.get(i); + sql.append(String.format("%s %s", schema.getMeasurementId(), schema.getType().name())); + sql.append(i == schemas.size() - 1 ? ")" : ","); + } + LOGGER.info("schema execute: {}.", sql); + return sql.toString(); + } + + private void deleteSG() throws SQLException { + try (final Connection connection = EnvFactory.getEnv().getConnection(); + final Statement statement = connection.createStatement()) { + + statement.execute(String.format("delete database %s", SchemaConfig.STORAGE_GROUP_0)); + statement.execute(String.format("delete database %s", SchemaConfig.STORAGE_GROUP_1)); + } catch (final IoTDBSQLException ignored) { + } + } + + private boolean deleteDir() { + for (final File file : Objects.requireNonNull(tmpDir.listFiles())) { + if (!file.delete()) { + return false; + } + } + return tmpDir.delete(); + } + + @Test + public void testTreeModelLoadWithLastCache() throws Exception { + registerSchema(); + + final String device = SchemaConfig.DEVICE_0; + final String measurement = SchemaConfig.MEASUREMENT_00.getMeasurementId(); + + try (final Connection connection = EnvFactory.getEnv().getConnection(); + final Statement statement = connection.createStatement()) { + + statement.execute( + String.format("insert into %s(timestamp, %s) values(100, 100)", device, measurement)); + + try (final ResultSet resultSet = + statement.executeQuery(String.format("select last %s from %s", measurement, device))) { + if (resultSet.next()) { + final String lastValue = resultSet.getString(ColumnHeaderConstant.VALUE); + Assert.assertEquals("100", lastValue); + } else { + Assert.fail("This ResultSet is empty."); + } + } + } + + final File file1 = new File(tmpDir, "1-0-0-0.tsfile"); + final File file2 = new File(tmpDir, "2-0-0-0.tsfile"); + // device 0, device 1, sg 0 + try (final TsFileGenerator generator = new TsFileGenerator(file1)) { + generator.registerTimeseries( + SchemaConfig.DEVICE_0, + Arrays.asList( + SchemaConfig.MEASUREMENT_00, + SchemaConfig.MEASUREMENT_01, + SchemaConfig.MEASUREMENT_02, + SchemaConfig.MEASUREMENT_03, + SchemaConfig.MEASUREMENT_04, + SchemaConfig.MEASUREMENT_05, + SchemaConfig.MEASUREMENT_06, + SchemaConfig.MEASUREMENT_07)); + generator.registerAlignedTimeseries( + SchemaConfig.DEVICE_1, + Arrays.asList( + SchemaConfig.MEASUREMENT_10, + SchemaConfig.MEASUREMENT_11, + SchemaConfig.MEASUREMENT_12, + SchemaConfig.MEASUREMENT_13, + SchemaConfig.MEASUREMENT_14, + SchemaConfig.MEASUREMENT_15, + SchemaConfig.MEASUREMENT_16, + SchemaConfig.MEASUREMENT_17)); + generator.generateData(SchemaConfig.DEVICE_0, 10000, PARTITION_INTERVAL / 10_000, false); + generator.generateData(SchemaConfig.DEVICE_1, 10000, PARTITION_INTERVAL / 10_000, true); + } + + // device 2, device 3, device4, sg 1 + try (final TsFileGenerator generator = new TsFileGenerator(file2)) { + generator.registerTimeseries( + SchemaConfig.DEVICE_2, Collections.singletonList(SchemaConfig.MEASUREMENT_20)); + generator.registerTimeseries( + SchemaConfig.DEVICE_3, Collections.singletonList(SchemaConfig.MEASUREMENT_30)); + generator.registerAlignedTimeseries( + SchemaConfig.DEVICE_4, Collections.singletonList(SchemaConfig.MEASUREMENT_40)); + generator.generateData(SchemaConfig.DEVICE_2, 10000, PARTITION_INTERVAL / 10_000, false); + generator.generateData(SchemaConfig.DEVICE_3, 10000, PARTITION_INTERVAL / 10_000, false); + generator.generateData(SchemaConfig.DEVICE_4, 10000, PARTITION_INTERVAL / 10_000, true); + } + + try (final Connection connection = EnvFactory.getEnv().getConnection(); + final Statement statement = connection.createStatement()) { + + statement.execute(String.format("load \"%s\" sglevel=2", tmpDir.getAbsolutePath())); + + try (final ResultSet resultSet = + statement.executeQuery(String.format("select last %s from %s", measurement, device))) { + if (resultSet.next()) { + final String lastTime = resultSet.getString(ColumnHeaderConstant.TIME); + Assert.assertEquals(String.valueOf(PARTITION_INTERVAL), lastTime); + } else { + Assert.fail("This ResultSet is empty."); + } + } + } + } + + private static class PerformanceSchemas { + + private final String database; + private final List measurementSchemas; + private final List columnNames; + private final List dataTypes; + + public PerformanceSchemas( + String database, String tableName, int measurementNum, int blobMeasurementNum) { + this.database = database; + columnNames = new ArrayList<>(measurementNum + blobMeasurementNum); + dataTypes = new ArrayList<>(measurementNum + blobMeasurementNum); + measurementSchemas = new ArrayList<>(measurementNum + blobMeasurementNum); + + columnNames.add("device_id"); + dataTypes.add(TSDataType.STRING); + for (int i = 0; i < measurementNum; i++) { + columnNames.add("s" + i); + dataTypes.add(TSDataType.INT64); + measurementSchemas.add(new MeasurementSchema("s" + i, TSDataType.INT64)); + } + for (int i = 0; i < blobMeasurementNum; i++) { + columnNames.add("s" + (measurementNum + i)); + dataTypes.add(TSDataType.BLOB); + measurementSchemas.add(new MeasurementSchema("s" + (measurementNum + i), TSDataType.BLOB)); + } + } + } + + private void generateAndLoadOne( + int deviceCnt, + int measurementCnt, + int blobMeasurementCnt, + int pointCnt, + int offset, + PerformanceSchemas schemas, + int fileNum, + Statement statement) + throws Exception { + File file = new File("target" + File.separator + fileNum + ".tsfile"); + try (TsFileWriter tsFileWriter = new TsFileWriter(file)) { + int rowIndex = 0; + for (int i = 0; i < deviceCnt; i++) { + PartialPath devicePath = new PartialPath(new PlainDeviceID("d" + i)); + tsFileWriter.registerAlignedTimeseries(devicePath, schemas.measurementSchemas); + Tablet tablet = + new Tablet("root.db1.d" + i, schemas.measurementSchemas, pointCnt * deviceCnt); + for (int j = 0; j < pointCnt; j++) { + tablet.addTimestamp(rowIndex, j + offset); + for (int k = 0; k < measurementCnt; k++) { + tablet.addValue("s" + k, rowIndex, (long) j + offset); + } + for (int k = 0; k < blobMeasurementCnt; k++) { + tablet.addValue("s" + (k + 1 + measurementCnt), rowIndex, String.valueOf(j + offset)); + } + rowIndex++; + } + tsFileWriter.writeAligned(tablet); + } + } + + statement.execute(String.format("load '%s'", file.getAbsolutePath(), schemas.database)); + + file.delete(); + } + + private void generateAndLoadAll( + int deviceCnt, + int measurementCnt, + int blobMeasurementCnt, + int pointCnt, + PerformanceSchemas schemas, + int fileNum) + throws Exception { + try (final Connection connection = EnvFactory.getEnv().getConnection(); + final Statement statement = connection.createStatement()) { + + for (int i = 0; i < fileNum; i++) { + generateAndLoadOne( + deviceCnt, + measurementCnt, + blobMeasurementCnt, + pointCnt, + pointCnt * i, + schemas, + fileNum, + statement); + } + } + } + + private long queryLastOnce(int deviceNum, int measurementNum, Statement statement) + throws SQLException { + try (final ResultSet resultSet = + statement.executeQuery( + String.format( + "select last %s from root.db1.d%s", "s" + measurementNum, "d" + deviceNum))) { + if (resultSet.next()) { + return resultSet.getLong("_col0"); + } else { + return -1; + } + } catch (SQLException e) { + if (!e.getMessage().contains("does not exist")) { + throw e; + } + } + return -1; + } + + @SuppressWarnings("BusyWait") + private void queryAll( + int deviceCnt, + int measurementCnt, + int pointCnt, + int fileCnt, + PerformanceSchemas schemas, + RateLimiter rateLimiter) + throws SQLException { + Random random = new Random(); + long totalStart = System.currentTimeMillis(); + List timeConsumptions = new ArrayList<>(); + + try (final Connection connection = EnvFactory.getEnv().getConnection(); + final Statement statement = connection.createStatement()) { + + while (true) { + int deviceNum = random.nextInt(deviceCnt); + int measurementNum = random.nextInt(measurementCnt); + rateLimiter.acquire(); + long start = System.nanoTime(); + long result = queryLastOnce(deviceNum, measurementNum, statement); + long timeConsumption = System.nanoTime() - start; + if (result == -1) { + try { + Thread.sleep(1000); + continue; + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + System.out.printf( + "%s: d%d.s%d %s %s%n", new Date(), deviceNum, measurementNum, result, timeConsumption); + timeConsumptions.add(timeConsumption); + if (result == (long) pointCnt * fileCnt - 1) { + break; + } + } + } + + System.out.printf( + "Synchronization ends after %dms, query latency avg %fms %n", + System.currentTimeMillis() - totalStart, + timeConsumptions.stream().mapToLong(i -> i).average().orElse(0.0) / 1000000); + } + + @Ignore("Performance") + @Test + public void testLoadPerformance() throws Exception { + int deviceCnt = 100; + int measurementCnt = 100; + int blobMeasurementCnt = 10; + int pointCnt = 100; + int fileCnt = 100000; + int queryPerSec = 100; + int queryThreadsNum = 10; + + PerformanceSchemas schemas = + new PerformanceSchemas("test", "test_table", measurementCnt, blobMeasurementCnt); + + try (final Connection connection = EnvFactory.getEnv().getConnection(); + final Statement statement = connection.createStatement()) { + statement.execute("CREATE DATABASE IF NOT EXISTS " + schemas.database); + } + + Thread loadThread = + new Thread( + () -> { + try { + generateAndLoadAll( + deviceCnt, measurementCnt, blobMeasurementCnt, pointCnt, schemas, fileCnt); + } catch (Throwable e) { + e.printStackTrace(); + } + }); + + RateLimiter rateLimiter = RateLimiter.create(queryPerSec); + List queryThreads = new ArrayList<>(queryThreadsNum); + for (int i = 0; i < queryThreadsNum; i++) { + Thread queryThread = + new Thread( + () -> { + try { + queryAll( + deviceCnt, + measurementCnt + blobMeasurementCnt, + pointCnt, + fileCnt, + schemas, + rateLimiter); + } catch (Throwable e) { + e.printStackTrace(); + } + }); + queryThreads.add(queryThread); + } + + loadThread.start(); + queryThreads.forEach(Thread::start); + + loadThread.join(); + for (Thread queryThread : queryThreads) { + queryThread.join(); + } + } + + private static class SchemaConfig { + + private static final String STORAGE_GROUP_0 = "root.sg.test_0"; + private static final String STORAGE_GROUP_1 = "root.sg.test_1"; + + // device 0, nonaligned, sg 0 + private static final String DEVICE_0 = "root.sg.test_0.d_0"; + private static final MeasurementSchema MEASUREMENT_00 = + new MeasurementSchema("sensor_00", TSDataType.INT32, TSEncoding.RLE); + private static final MeasurementSchema MEASUREMENT_01 = + new MeasurementSchema("sensor_01", TSDataType.INT64, TSEncoding.RLE); + private static final MeasurementSchema MEASUREMENT_02 = + new MeasurementSchema("sensor_02", TSDataType.DOUBLE, TSEncoding.GORILLA); + private static final MeasurementSchema MEASUREMENT_03 = + new MeasurementSchema("sensor_03", TSDataType.TEXT, TSEncoding.PLAIN); + private static final MeasurementSchema MEASUREMENT_04 = + new MeasurementSchema("sensor_04", TSDataType.TIMESTAMP, TSEncoding.RLE); + private static final MeasurementSchema MEASUREMENT_05 = + new MeasurementSchema("sensor_05", TSDataType.DATE, TSEncoding.RLE); + private static final MeasurementSchema MEASUREMENT_06 = + new MeasurementSchema("sensor_06", TSDataType.BLOB, TSEncoding.PLAIN); + private static final MeasurementSchema MEASUREMENT_07 = + new MeasurementSchema("sensor_07", TSDataType.STRING, TSEncoding.PLAIN); + + // device 1, aligned, sg 0 + private static final String DEVICE_1 = "root.sg.test_0.a_1"; + private static final MeasurementSchema MEASUREMENT_10 = + new MeasurementSchema("sensor_10", TSDataType.INT32, TSEncoding.RLE); + private static final MeasurementSchema MEASUREMENT_11 = + new MeasurementSchema("sensor_11", TSDataType.INT64, TSEncoding.RLE); + private static final MeasurementSchema MEASUREMENT_12 = + new MeasurementSchema("sensor_12", TSDataType.DOUBLE, TSEncoding.GORILLA); + private static final MeasurementSchema MEASUREMENT_13 = + new MeasurementSchema("sensor_13", TSDataType.TEXT, TSEncoding.PLAIN); + private static final MeasurementSchema MEASUREMENT_14 = + new MeasurementSchema("sensor_14", TSDataType.TIMESTAMP, TSEncoding.RLE); + private static final MeasurementSchema MEASUREMENT_15 = + new MeasurementSchema("sensor_15", TSDataType.DATE, TSEncoding.RLE); + private static final MeasurementSchema MEASUREMENT_16 = + new MeasurementSchema("sensor_16", TSDataType.BLOB, TSEncoding.PLAIN); + private static final MeasurementSchema MEASUREMENT_17 = + new MeasurementSchema("sensor_17", TSDataType.STRING, TSEncoding.PLAIN); + + // device 2, non aligned, sg 1 + private static final String DEVICE_2 = "root.sg.test_1.d_2"; + private static final MeasurementSchema MEASUREMENT_20 = + new MeasurementSchema("sensor_20", TSDataType.INT32, TSEncoding.RLE); + + // device 3, non aligned, sg 1 + private static final String DEVICE_3 = "root.sg.test_1.d_3"; + private static final MeasurementSchema MEASUREMENT_30 = + new MeasurementSchema("sensor_30", TSDataType.INT32, TSEncoding.RLE); + + // device 4, aligned, sg 1 + private static final String DEVICE_4 = "root.sg.test_1.a_4"; + private static final MeasurementSchema MEASUREMENT_40 = + new MeasurementSchema("sensor_40", TSDataType.INT32, TSEncoding.RLE); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBLoadTsFileIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBLoadTsFileIT.java index c1b1fdf91091f..12247e277abdc 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBLoadTsFileIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBLoadTsFileIT.java @@ -20,6 +20,7 @@ package org.apache.iotdb.db.it; import org.apache.iotdb.commons.auth.entity.PrivilegeType; +import org.apache.iotdb.db.it.utils.TestUtils; import org.apache.iotdb.db.queryengine.common.header.ColumnHeaderConstant; import org.apache.iotdb.it.env.EnvFactory; import org.apache.iotdb.it.framework.IoTDBTestRunner; @@ -31,10 +32,12 @@ import org.apache.tsfile.enums.TSDataType; import org.apache.tsfile.file.metadata.enums.TSEncoding; import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.utils.Pair; import org.apache.tsfile.write.schema.MeasurementSchema; import org.junit.After; import org.junit.Assert; import org.junit.Before; +import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; @@ -47,6 +50,7 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -55,6 +59,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import static org.apache.iotdb.db.it.utils.TestUtils.assertNonQueryTestFail; import static org.apache.iotdb.db.it.utils.TestUtils.createUser; @@ -542,7 +547,9 @@ public void testLoadWithOnSuccess() throws Exception { final Statement statement = connection.createStatement()) { statement.execute( - String.format("load \"%s\" sglevel=2 onSuccess=none", file1.getAbsolutePath())); + String.format( + "load \"%s\" with ('database-level'='2', 'on-success'='none')", + file1.getAbsolutePath())); try (final ResultSet resultSet = statement.executeQuery("select count(*) from root.** group by level=1,2")) { @@ -560,7 +567,9 @@ public void testLoadWithOnSuccess() throws Exception { final Statement statement = connection.createStatement()) { statement.execute( - String.format("load \"%s\" sglevel=2 onSuccess=delete", file2.getAbsolutePath())); + String.format( + "load \"%s\" with ('database-level'='2', 'on-success'='delete')", + file2.getAbsolutePath())); try (final ResultSet resultSet = statement.executeQuery("select count(*) from root.** group by level=1,2")) { @@ -760,7 +769,6 @@ public void testLoadWithMods() throws Exception { generator.generateData(SchemaConfig.DEVICE_0, 100000, PARTITION_INTERVAL / 10_000, false); generator.generateData(SchemaConfig.DEVICE_1, 100000, PARTITION_INTERVAL / 10_000, true); generator.generateDeletion(SchemaConfig.DEVICE_0, 10); - generator.generateDeletion(SchemaConfig.DEVICE_1, 10); writtenPoint1 = generator.getTotalNumber(); } @@ -774,9 +782,11 @@ public void testLoadWithMods() throws Exception { generator.registerTimeseries( SchemaConfig.DEVICE_3, Collections.singletonList(SchemaConfig.MEASUREMENT_30)); generator.registerAlignedTimeseries( - SchemaConfig.DEVICE_4, Collections.singletonList(SchemaConfig.MEASUREMENT_40)); + SchemaConfig.DEVICE_4, + new ArrayList<>(Arrays.asList(SchemaConfig.MEASUREMENT_30, SchemaConfig.MEASUREMENT_40))); generator.generateData(SchemaConfig.DEVICE_2, 100, PARTITION_INTERVAL / 10_000, false); generator.generateData(SchemaConfig.DEVICE_3, 100, PARTITION_INTERVAL / 10_000, false); + generator.generateDeletion(SchemaConfig.DEVICE_3); generator.generateData(SchemaConfig.DEVICE_4, 100, PARTITION_INTERVAL / 10_000, true); generator.generateDeletion(SchemaConfig.DEVICE_2, 2); generator.generateDeletion(SchemaConfig.DEVICE_4, 2); @@ -784,6 +794,7 @@ public void testLoadWithMods() throws Exception { generator.generateData(SchemaConfig.DEVICE_4, 100, PARTITION_INTERVAL / 10_000, true); generator.generateDeletion(SchemaConfig.DEVICE_2, 2); generator.generateDeletion(SchemaConfig.DEVICE_4, 2); + generator.generateDeletion(SchemaConfig.DEVICE_4, SchemaConfig.MEASUREMENT_30); writtenPoint2 = generator.getTotalNumber(); } @@ -803,6 +814,10 @@ public void testLoadWithMods() throws Exception { Assert.fail("This ResultSet is empty."); } } + + TestUtils.assertSingleResultSetEqual( + TestUtils.executeQueryWithRetry(statement, "count timeSeries"), + Collections.singletonMap("count(timeseries)", "18")); } } @@ -894,6 +909,75 @@ public void testLoadLocally() throws Exception { } } + @Test + @Ignore("Load with conversion is currently banned") + public void testLoadWithConvertOnTypeMismatch() throws Exception { + + List> measurementSchemas = + generateMeasurementSchemasForDataTypeConvertion(); + + final File file = new File(tmpDir, "1-0-0-0.tsfile"); + + long writtenPoint = 0; + List schemaList1 = + measurementSchemas.stream().map(pair -> pair.left).collect(Collectors.toList()); + List schemaList2 = + measurementSchemas.stream().map(pair -> pair.right).collect(Collectors.toList()); + + try (final TsFileGenerator generator = new TsFileGenerator(file)) { + generator.registerTimeseries(SchemaConfig.DEVICE_0, schemaList2); + + generator.generateData(SchemaConfig.DEVICE_0, 100, PARTITION_INTERVAL / 10_000, false); + + writtenPoint = generator.getTotalNumber(); + } + + try (final Connection connection = EnvFactory.getEnv().getConnection(); + final Statement statement = connection.createStatement()) { + + for (MeasurementSchema schema : schemaList1) { + statement.execute(convert2SQL(SchemaConfig.DEVICE_0, schema)); + } + + statement.execute(String.format("load \"%s\" ", file.getAbsolutePath())); + + try (final ResultSet resultSet = + statement.executeQuery("select count(*) from root.** group by level=1,2")) { + if (resultSet.next()) { + final long sgCount = resultSet.getLong("count(root.sg.test_0.*.*)"); + Assert.assertEquals(writtenPoint, sgCount); + } else { + Assert.fail("This ResultSet is empty."); + } + } + } + } + + private List> + generateMeasurementSchemasForDataTypeConvertion() { + TSDataType[] dataTypes = { + TSDataType.STRING, + TSDataType.TEXT, + TSDataType.BLOB, + TSDataType.TIMESTAMP, + TSDataType.BOOLEAN, + TSDataType.DATE, + TSDataType.DOUBLE, + TSDataType.FLOAT, + TSDataType.INT32, + TSDataType.INT64 + }; + List> pairs = new ArrayList<>(); + + for (TSDataType type : dataTypes) { + for (TSDataType dataType : dataTypes) { + String id = String.format("%s2%s", type.name(), dataType.name()); + pairs.add(new Pair<>(new MeasurementSchema(id, type), new MeasurementSchema(id, dataType))); + } + } + return pairs; + } + private static class SchemaConfig { private static final String STORAGE_GROUP_0 = "root.sg.test_0"; private static final String STORAGE_GROUP_1 = "root.sg.test_1"; diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBPartialInsertionIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBPartialInsertionIT.java index 074da6873eea0..182167be41c9f 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBPartialInsertionIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBPartialInsertionIT.java @@ -108,7 +108,7 @@ public void testPartialInsertionRestart() throws SQLException { EnvironmentUtils.restartDaemon(); StorageEngine.getInstance().recover(); // wait for recover - while (!StorageEngine.getInstance().isAllSgReady()) { + while (!StorageEngine.getInstance().isReadyForReadAndWrite()) { Thread.sleep(500); time += 500; if (time > 10000) { diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBRestartIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBRestartIT.java index 069036e10a534..f423b5928ee3f 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBRestartIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBRestartIT.java @@ -315,8 +315,6 @@ public void testRecoverWALDeleteSchema() throws Exception { @Test public void testRecoverWALDeleteSchemaCheckResourceTime() throws Exception { IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig(); - int avgSeriesPointNumberThreshold = config.getAvgSeriesPointNumberThreshold(); - config.setAvgSeriesPointNumberThreshold(2); long tsFileSize = config.getSeqTsFileSize(); long unFsFileSize = config.getSeqTsFileSize(); config.setSeqTsFileSize(10000000); @@ -327,6 +325,7 @@ public void testRecoverWALDeleteSchemaCheckResourceTime() throws Exception { statement.execute("create timeseries root.turbine1.d1.s1 with datatype=INT64"); statement.execute("insert into root.turbine1.d1(timestamp,s1) values(1,1)"); statement.execute("insert into root.turbine1.d1(timestamp,s1) values(2,1)"); + statement.execute("flush"); statement.execute("create timeseries root.turbine1.d1.s2 with datatype=BOOLEAN"); statement.execute("insert into root.turbine1.d1(timestamp,s2) values(3,true)"); statement.execute("insert into root.turbine1.d1(timestamp,s2) values(4,true)"); @@ -350,7 +349,6 @@ public void testRecoverWALDeleteSchemaCheckResourceTime() throws Exception { assertEquals(2, cnt); } - config.setAvgSeriesPointNumberThreshold(avgSeriesPointNumberThreshold); config.setSeqTsFileSize(tsFileSize); config.setUnSeqTsFileSize(unFsFileSize); } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBSetConfigurationIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBSetConfigurationIT.java index f6e9f98726b5b..09a3a29511276 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBSetConfigurationIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBSetConfigurationIT.java @@ -20,6 +20,7 @@ package org.apache.iotdb.db.it; import org.apache.iotdb.commons.conf.CommonConfig; +import org.apache.iotdb.commons.conf.ConfigurationFileUtils; import org.apache.iotdb.it.env.EnvFactory; import org.apache.iotdb.it.env.cluster.node.AbstractNodeWrapper; import org.apache.iotdb.it.framework.IoTDBTestRunner; @@ -38,10 +39,16 @@ import java.nio.file.Files; import java.sql.Connection; import java.sql.ResultSet; +import java.sql.SQLException; import java.sql.Statement; import java.util.Arrays; +import java.util.Properties; import java.util.concurrent.TimeUnit; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + @RunWith(IoTDBTestRunner.class) @Category({LocalStandaloneIT.class}) public class IoTDBSetConfigurationIT { @@ -55,6 +62,42 @@ public static void tearDown() throws Exception { EnvFactory.getEnv().cleanClusterEnvironment(); } + @Test + public void testSetConfigurationWithUndefinedConfigKey() { + String expectedExceptionMsg = + "301: ignored config items: [a] because they are immutable or undefined."; + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + executeAndExpectException( + statement, "set configuration \"a\"=\"false\"", expectedExceptionMsg); + int configNodeNum = EnvFactory.getEnv().getConfigNodeWrapperList().size(); + int dataNodeNum = EnvFactory.getEnv().getDataNodeWrapperList().size(); + + for (int i = 0; i < configNodeNum; i++) { + executeAndExpectException( + statement, "set configuration \"a\"=\"false\" on " + i, expectedExceptionMsg); + } + for (int i = 0; i < dataNodeNum; i++) { + int dnId = configNodeNum + i; + executeAndExpectException( + statement, "set configuration \"a\"=\"false\" on " + dnId, expectedExceptionMsg); + } + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + private void executeAndExpectException( + Statement statement, String sql, String expectedContentInExceptionMsg) { + try { + statement.execute(sql); + } catch (Exception e) { + Assert.assertTrue(e.getMessage().contains(expectedContentInExceptionMsg)); + return; + } + Assert.fail(); + } + @Test public void testSetConfiguration() { try (Connection connection = EnvFactory.getEnv().getConnection(); @@ -101,7 +144,8 @@ public void testSetClusterName() throws Exception { EnvFactory.getEnv().getDataNodeWrapper(0).start(); // set cluster name on datanode Awaitility.await() - .atMost(10, TimeUnit.SECONDS) + .atMost(30, TimeUnit.SECONDS) + .pollDelay(1, TimeUnit.SECONDS) .until( () -> { try (Connection connection = EnvFactory.getEnv().getConnection(); @@ -118,24 +162,110 @@ public void testSetClusterName() throws Exception { Awaitility.await() .atMost(10, TimeUnit.SECONDS) .until(() -> !EnvFactory.getEnv().getDataNodeWrapper(0).isAlive()); + AbstractNodeWrapper datanode = EnvFactory.getEnv().getDataNodeWrapper(0); Assert.assertTrue( checkConfigFileContains(EnvFactory.getEnv().getDataNodeWrapper(0), "cluster_name=yy")); + + // Modify the config file manually because the datanode can not restart + Properties properties = new Properties(); + properties.put("cluster_name", "xx"); + ConfigurationFileUtils.updateConfiguration(getConfigFile(datanode), properties, null); + EnvFactory.getEnv().getDataNodeWrapper(0).stop(); + EnvFactory.getEnv().getDataNodeWrapper(0).start(); + // wait the datanode restart successfully (won't do any meaningful modification) + Awaitility.await() + .atMost(30, TimeUnit.SECONDS) + .pollDelay(1, TimeUnit.SECONDS) + .until( + () -> { + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + statement.execute("set configuration \"cluster_name\"=\"xx\" on 1"); + } catch (Exception e) { + return false; + } + return true; + }); } private static boolean checkConfigFileContains( AbstractNodeWrapper nodeWrapper, String... contents) { try { - String systemPropertiesPath = - nodeWrapper.getNodePath() - + File.separator - + "conf" - + File.separator - + CommonConfig.SYSTEM_CONFIG_NAME; - File f = new File(systemPropertiesPath); - String fileContent = new String(Files.readAllBytes(f.toPath())); + String fileContent = new String(Files.readAllBytes(getConfigFile(nodeWrapper).toPath())); return Arrays.stream(contents).allMatch(fileContent::contains); } catch (IOException ignore) { return false; } } + + private static File getConfigFile(AbstractNodeWrapper nodeWrapper) { + String systemPropertiesPath = + nodeWrapper.getNodePath() + + File.separator + + "conf" + + File.separator + + CommonConfig.SYSTEM_CONFIG_NAME; + return new File(systemPropertiesPath); + } + + @Test + public void testSetDefaultSGLevel() throws SQLException { + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + // legal value + statement.execute("set configuration \"default_storage_group_level\"=\"3\""); + statement.execute("INSERT INTO root.a.b.c.d1(timestamp, s1) VALUES (1, 1)"); + ResultSet databases = statement.executeQuery("show databases"); + databases.next(); + Assert.assertEquals("root.a.b.c", databases.getString(1)); + assertFalse(databases.next()); + + // path too short + try { + statement.execute("INSERT INTO root.fail(timestamp, s1) VALUES (1, 1)"); + } catch (SQLException e) { + assertEquals( + "509: An error occurred when executing getDeviceToDatabase():root.fail is not a legal path, because it is no longer than default sg level: 3", + e.getMessage()); + } + + // illegal value + try { + statement.execute("set configuration \"default_storage_group_level\"=\"-1\""); + } catch (SQLException e) { + assertTrue(e.getMessage().contains("Illegal defaultStorageGroupLevel: -1, should >= 1")); + } + + // Failed updates will not change the files. + assertFalse( + checkConfigFileContains( + EnvFactory.getEnv().getDataNodeWrapper(0), "default_storage_group_level=-1")); + assertTrue( + checkConfigFileContains( + EnvFactory.getEnv().getDataNodeWrapper(0), "default_storage_group_level=3")); + } + + // can start with an illegal value + EnvFactory.getEnv().cleanClusterEnvironment(); + EnvFactory.getEnv().getConfig().getCommonConfig().setDefaultStorageGroupLevel(-1); + EnvFactory.getEnv().initClusterEnvironment(); + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + statement.execute("INSERT INTO root.a.b.c.d1(timestamp, s1) VALUES (1, 1)"); + ResultSet databases = statement.executeQuery("show databases"); + databases.next(); + // the default value should take effect + Assert.assertEquals("root.a", databases.getString(1)); + assertFalse(databases.next()); + + // create timeseries with an illegal path + try { + statement.execute("CREATE TIMESERIES root.db1.s3 WITH datatype=INT32"); + } catch (SQLException e) { + assertEquals( + "509: An error occurred when executing getDeviceToDatabase():root.db1 is not a legal path, because it is no longer than default sg level: 3", + e.getMessage()); + } + } + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBSimpleQueryIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBSimpleQueryIT.java index 19b487193834b..441b3afbbcb05 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBSimpleQueryIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBSimpleQueryIT.java @@ -1188,4 +1188,42 @@ public void testNewDataType() { fail(); } } + + @Test + public void testIllegalDateType() { + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + + statement.execute("CREATE DATABASE root.sg1"); + statement.execute( + "CREATE TIMESERIES root.sg1.d1.s4 WITH DATATYPE=DATE, ENCODING=PLAIN, COMPRESSOR=SNAPPY"); + statement.execute( + "CREATE TIMESERIES root.sg1.d1.s5 WITH DATATYPE=TIMESTAMP, ENCODING=PLAIN, COMPRESSOR=SNAPPY"); + try { + statement.execute("insert into root.sg1.d1(timestamp, s4) values(1, '2022-04-31')"); + fail(); + } catch (Exception e) { + assertEquals( + TSStatusCode.METADATA_ERROR.getStatusCode() + + ": Fail to insert measurements [s4] caused by [data type is not consistent, " + + "input '2022-04-31', registered DATE because Invalid date format. " + + "Please use YYYY-MM-DD format.]", + e.getMessage()); + } + try { + statement.execute( + "insert into root.sg1.d1(timestamp, s5) values(1999-04-31T00:00:00.000+08:00, 1999-04-31T00:00:00.000+08:00)"); + fail(); + } catch (Exception e) { + assertEquals( + TSStatusCode.SEMANTIC_ERROR.getStatusCode() + + ": Input time format 1999-04-31T00:00:00.000+08:00 error. " + + "Input like yyyy-MM-dd HH:mm:ss, yyyy-MM-ddTHH:mm:ss " + + "or refer to user document for more info.", + e.getMessage()); + } + } catch (SQLException e) { + fail(); + } + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBSyntaxConventionStringLiteralIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBSyntaxConventionStringLiteralIT.java index 95204c783ec21..06ed9dd4397da 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBSyntaxConventionStringLiteralIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBSyntaxConventionStringLiteralIT.java @@ -255,8 +255,7 @@ public void testIllegalFilePath() { String errorMsg1 = TSStatusCode.SQL_PARSE_ERROR.getStatusCode() - + ": Error occurred while parsing SQL to physical plan: " - + "line 1:7 mismatched input 'path' expecting STRING_LITERAL"; + + ": Error occurred while parsing SQL to physical plan: line 1:7 no viable alternative at input 'REMOVE path'"; try (Connection connection = EnvFactory.getEnv().getConnection(); Statement statement = connection.createStatement()) { statement.execute("REMOVE path"); diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/aggregation/IoTDBCountIfIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/aggregation/IoTDBCountIfIT.java index bc82992065bbd..7e1bf625bf82d 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/aggregation/IoTDBCountIfIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/aggregation/IoTDBCountIfIT.java @@ -208,6 +208,19 @@ public void testContIfWithoutTransform() { resultSetEqualTest("select Count_if(s3, 1) from root.db.d1", expectedHeader, retArray); } + @Test + public void testMultiAttributes() { + String[] expectedHeader = + new String[] { + "Count_if(root.db.d1.s3, 1, \"attr1\"=\"1\", \"attr2\"=\"2\", \"attr3\"=\"3\")" + }; + String[] retArray = new String[] {"1,"}; + resultSetEqualTest( + "select Count_if(s3, 1, \"attr1\"=\"1\",\"attr2\"=\"2\",\"attr3\"=\"3\") from root.db.d1", + expectedHeader, + retArray); + } + @Test public void testContIfWithGroupByLevel() { String[] expectedHeader = new String[] {"Count_if(root.db.*.s1 = 0 & root.db.*.s2 = 0, 3)"}; diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/alignbydevice/IoTDBAlignByDeviceIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/alignbydevice/IoTDBAlignByDeviceIT.java index cc5fdeb5b27fe..1bb3e76061dab 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/alignbydevice/IoTDBAlignByDeviceIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/alignbydevice/IoTDBAlignByDeviceIT.java @@ -100,6 +100,7 @@ public class IoTDBAlignByDeviceIT { "insert into root.vehicle.d1(timestamp,s0) values(1,999)", "insert into root.vehicle.d1(timestamp,s0) values(1000,888)", "insert into root.other.d1(timestamp,s0) values(2, 3.14)", + "insert into root.other.d2(timestamp,s6) values(6, 6.66)", }; @BeforeClass @@ -1184,4 +1185,48 @@ public void removeDeviceWhereMeasurementWhenNoDeviceSelectTest() { + e.getMessage()); } } + + @Test + public void nonExistMeasurementInHavingTest() { + String[] retArray = + new String[] { + "1,root.other.d1,3.14,null,", "5,root.other.d2,null,6.66,", + }; + + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + + try (ResultSet resultSet = + statement.executeQuery( + "select last_value(s0),last_value(s6) from root.other.** group by ([1,10),2ms) having last_value(s0) is not null or last_value(s6) is not null align by device")) { + ResultSetMetaData resultSetMetaData = resultSet.getMetaData(); + List actualIndexToExpectedIndexList = + checkHeader( + resultSetMetaData, + "Time,Device,last_value(s0),last_value(s6)", + new int[] { + Types.TIMESTAMP, Types.VARCHAR, Types.FLOAT, Types.DOUBLE, + }); + + int cnt = 0; + while (resultSet.next()) { + String[] expectedStrings = retArray[cnt].split(","); + StringBuilder expectedBuilder = new StringBuilder(); + StringBuilder actualBuilder = new StringBuilder(); + for (int i = 1; i <= resultSetMetaData.getColumnCount(); i++) { + actualBuilder.append(resultSet.getString(i)).append(","); + expectedBuilder + .append(expectedStrings[actualIndexToExpectedIndexList.get(i - 1)]) + .append(","); + } + Assert.assertEquals(expectedBuilder.toString(), actualBuilder.toString()); + cnt++; + } + Assert.assertEquals(retArray.length, cnt); + } + } catch (Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/alignbydevice/IoTDBAlignByDeviceWithTemplateIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/alignbydevice/IoTDBAlignByDeviceWithTemplateIT.java index 05e2104b904d2..a71cb77371359 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/alignbydevice/IoTDBAlignByDeviceWithTemplateIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/alignbydevice/IoTDBAlignByDeviceWithTemplateIT.java @@ -59,6 +59,9 @@ public class IoTDBAlignByDeviceWithTemplateIT { "INSERT INTO root.sg2.d4(timestamp,s1,s2,s3) values(1,1111.1,true,1111), (5,5555.5,false,5555);", }; + String[] expectedHeader; + String[] retArray; + @BeforeClass public static void setUp() throws Exception { EnvFactory.getEnv().initClusterEnvironment(); @@ -70,11 +73,32 @@ public static void tearDown() throws Exception { EnvFactory.getEnv().cleanClusterEnvironment(); } + @Test + public void singleDeviceTest() { + expectedHeader = new String[] {"Time,Device,s3,s1,s2"}; + retArray = + new String[] { + "1,root.sg1.d1,1,1.1,false,", + }; + resultSetEqualTest( + "SELECT * FROM root.sg1.d1 order by time desc offset 1 limit 1 ALIGN BY DEVICE;", + expectedHeader, + retArray); + retArray = + new String[] { + "1,root.sg2.d1,1,1.1,false,", + }; + resultSetEqualTest( + "SELECT * FROM root.sg2.d1 order by time desc offset 1 limit 1 ALIGN BY DEVICE;", + expectedHeader, + retArray); + } + @Test public void selectWildcardNoFilterTest() { // 1. order by device - String[] expectedHeader = new String[] {"Time,Device,s3,s1,s2"}; - String[] retArray = + expectedHeader = new String[] {"Time,Device,s3,s1,s2"}; + retArray = new String[] { "1,root.sg1.d1,1,1.1,false,", "2,root.sg1.d1,2,2.2,false,", diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/alignbydevice/IoTDBOrderByLimitOffsetAlignByDeviceIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/alignbydevice/IoTDBOrderByLimitOffsetAlignByDeviceIT.java index a595c5a96cbee..60cf1a5d5f0c1 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/alignbydevice/IoTDBOrderByLimitOffsetAlignByDeviceIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/alignbydevice/IoTDBOrderByLimitOffsetAlignByDeviceIT.java @@ -53,11 +53,30 @@ public static void tearDown() throws Exception { EnvFactory.getEnv().cleanClusterEnvironment(); } + String[] expectedHeader; + String[] retArray; + + @Test + public void singleDeviceTest() { + expectedHeader = new String[] {"Time,Device,precipitation"}; + retArray = new String[] {"1668960000200,root.weather.London,1667492178318,"}; + resultSetEqualTest( + "select precipitation from root.weather.London where precipitation>1667492178118 order by time offset 1 limit 1 align by device", + expectedHeader, + retArray); + + retArray = new String[] {"1668960000200,root.weather.London,1667492178318,"}; + resultSetEqualTest( + "select precipitation from root.weather.London where precipitation>1667492178118 order by precipitation offset 1 limit 1 align by device", + expectedHeader, + retArray); + } + @Test public void orderByCanNotPushLimitTest() { // 1. value filter, can not push down LIMIT - String[] expectedHeader = new String[] {"Time,Device,s1"}; - String[] retArray = new String[] {"3,root.db.d1,111,"}; + expectedHeader = new String[] {"Time,Device,s1"}; + retArray = new String[] {"3,root.db.d1,111,"}; resultSetEqualTest( "SELECT * FROM root.db.** WHERE s1>40 ORDER BY TIME LIMIT 1 ALIGN BY DEVICE;", expectedHeader, diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/aligned/IoTDBPredicatePushDownIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/aligned/IoTDBPredicatePushDownIT.java index 1a65682fc5b83..f9639d3ea2d2c 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/aligned/IoTDBPredicatePushDownIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/aligned/IoTDBPredicatePushDownIT.java @@ -74,6 +74,15 @@ public void testAlignedRawDataAlignByTime1() { resultSetEqualTest( "select s2, s3 from root.sg1.d1 where s2 - 1 >= 9 and s2 < 30", expectedHeader1, retArray1); + resultSetEqualTest( + "select s2, s3 from root.sg1.d1 where 9 <= s2 - 1 and 30 > s2", expectedHeader1, retArray1); + + retArray1 = new String[] {"20,20,20,"}; + resultSetEqualTest( + "select s2, s3 from root.sg1.d1 where 9 <= s2 - 1 and 30 > s2 and 19 < time", + expectedHeader1, + retArray1); + String expectedHeader2 = "Time,root.sg1.d1.s3,"; String[] retArray2 = new String[] { diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/auth/IoTDBAuthIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/auth/IoTDBAuthIT.java index 93165a16ff6a6..18a2586bde36b 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/auth/IoTDBAuthIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/auth/IoTDBAuthIT.java @@ -25,7 +25,10 @@ import org.apache.iotdb.it.framework.IoTDBTestRunner; import org.apache.iotdb.itbase.category.ClusterIT; import org.apache.iotdb.itbase.category.LocalStandaloneIT; +import org.apache.iotdb.jdbc.IoTDBSQLException; +import org.apache.iotdb.rpc.TSStatusCode; +import com.google.common.collect.ImmutableList; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -45,6 +48,7 @@ import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.concurrent.Callable; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -1244,4 +1248,83 @@ public void testCreateRoleIdentifierName() throws SQLException { adminStmt.execute("create role tail"); adminStmt.execute("create user tail 'password'"); } + + @Test + public void testClusterManagementSqlOfTreeModel() throws Exception { + ImmutableList clusterManagementSQLList = + ImmutableList.of( + // show cluster, nodes, regions, + "show ainodes", + "show confignodes", + "show datanodes", + "show cluster", + "show clusterid", + "show regions", + "show data regionid where database=root.**", + + // remove node + "remove datanode 0", + "remove confignode 0", + + // region operation + "migrate region 0 from 1 to 2", + "reconstruct region 0 on 1", + "extend region 0 to 1", + "remove region 0 from 1", + + // others + "show timeslotid where database=root.test", + "count timeslotid where database=root.test", + "show data seriesslotid where database=root.test", + "verify connection"); + + try (Connection adminCon = EnvFactory.getEnv().getConnection(); + Statement adminStmt = adminCon.createStatement()) { + adminStmt.execute("CREATE USER Jack 'temppw'"); + + try (Connection JackConnection = EnvFactory.getEnv().getConnection("Jack", "temppw"); + Statement Jack = JackConnection.createStatement()) { + testClusterManagementSqlImpl( + clusterManagementSQLList, + () -> adminStmt.execute("GRANT MAINTAIN ON root.** TO USER Jack"), + Jack); + } + } + } + + private void testClusterManagementSqlImpl( + List clusterManagementSqlList, Callable giveJackAuthority, Statement Jack) + throws Exception { + // Jack has no authority to execute these SQLs + for (String sql : clusterManagementSqlList) { + try { + Jack.execute(sql); + } catch (IoTDBSQLException e) { + if (TSStatusCode.NO_PERMISSION.getStatusCode() != e.getErrorCode()) { + fail( + String.format( + "SQL should fail because of no permission, but the error code is %d: %s", + e.getErrorCode(), sql)); + } + continue; + } + fail(String.format("SQL should fail because of no permission: %s", sql)); + } + + // Give Jack authority + giveJackAuthority.call(); + + // Jack is able to execute these SQLs now + for (String sql : clusterManagementSqlList) { + try { + // No exception is fine + Jack.execute(sql); + } catch (IoTDBSQLException e) { + // If there is an exception, error code must not be NO_PERMISSION + if (TSStatusCode.NO_PERMISSION.getStatusCode() == e.getErrorCode()) { + fail(String.format("SQL should not fail with no permission: %s", sql)); + } + } + } + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/auth/IoTDBSystemPermissionIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/auth/IoTDBSystemPermissionIT.java index 4b3cebd68f1b3..e8dd4e5f650ff 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/auth/IoTDBSystemPermissionIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/auth/IoTDBSystemPermissionIT.java @@ -247,7 +247,5 @@ public void adminOperationsTest() { "803: Only the admin user can perform this operation", "test", "test123"); - assertTestFail( - "show regions", "803: Only the admin user can perform this operation", "test", "test123"); } } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/groupby/IoTDBGroupByNaturalMonthIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/groupby/IoTDBGroupByNaturalMonthIT.java index e08eb4c5f1d0a..9d56acf518fbf 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/groupby/IoTDBGroupByNaturalMonthIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/groupby/IoTDBGroupByNaturalMonthIT.java @@ -18,6 +18,8 @@ */ package org.apache.iotdb.db.it.groupby; +import org.apache.iotdb.isession.ISession; +import org.apache.iotdb.isession.SessionDataSet; import org.apache.iotdb.it.env.EnvFactory; import org.apache.iotdb.it.framework.IoTDBTestRunner; import org.apache.iotdb.itbase.category.ClusterIT; @@ -49,6 +51,7 @@ import static org.apache.iotdb.db.utils.constant.TestConstant.sum; import static org.apache.iotdb.itbase.constant.TestConstant.TIMESTAMP_STR; import static org.apache.iotdb.itbase.constant.TestConstant.count; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; @RunWith(IoTDBTestRunner.class) @@ -75,6 +78,8 @@ public class IoTDBGroupByNaturalMonthIT { calendar.add(Calendar.MONTH, 1), i = calendar.getTimeInMillis()) { dataSet.add("insert into root.test.d1(timestamp, s1) values (" + i + ", 1)"); } + + dataSet.add("insert into root.testTimeZone.d1(timestamp, s1) values (1, 1)"); } protected static final DateFormat df = new SimpleDateFormat("MM/dd/yyyy:HH:mm:ss"); @@ -413,4 +418,27 @@ public void groupByNaturalMonthWithMixedUnit2() { null, currPrecision); } + + @Test + public void groupByNaturalMonthWithNonSystemDefaultTimeZone() { + try (ISession session = + EnvFactory.getEnv().getSessionConnection(TimeZone.getTimeZone("UTC+09:00").toZoneId())) { + + SessionDataSet sessionDataSet = + session.executeQueryStatement( + "select count(s1) from root.testTimeZone.d1 group by([2024-07-01, 2024-08-01), 1mo)"); + + int count = 0; + while (sessionDataSet.hasNext()) { + sessionDataSet.next(); + count++; + } + assertEquals(1, count); + + sessionDataSet.closeOperationHandle(); + } catch (Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/groupby/IoTDBGroupByUnseqIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/groupby/IoTDBGroupByUnseqIT.java index 111217e56857f..a15991c487683 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/groupby/IoTDBGroupByUnseqIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/groupby/IoTDBGroupByUnseqIT.java @@ -110,7 +110,7 @@ public void test2() { .getConfig() .getCommonConfig() .setMaxNumberOfPointsInPage(4) - .setAvgSeriesPointNumberThreshold(2); + .setTargetChunkPointNum(2); EnvFactory.getEnv().initClusterEnvironment(); String[] expectedHeader = new String[] {TIMESTAMP_STR, count("root.sg2.d1.s1")}; String[] retArray = new String[] {"5,1,", "10,1,", "15,2,", "20,0,", "25,1,"}; diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/last/IoTDBLastQueryLastCacheIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/last/IoTDBLastQueryLastCacheIT.java index 6d30db0008564..feb710be4fa5c 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/last/IoTDBLastQueryLastCacheIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/last/IoTDBLastQueryLastCacheIT.java @@ -165,4 +165,18 @@ public void cacheHitTest() { testLastQueryOrderByTimeDesc(); testLastQuery1(); } + + @Test + public void testLastQuerySortWithLimit() { + String[] expectedHeader = + new String[] {TIMESTAMP_STR, TIMESEIRES_STR, VALUE_STR, DATA_TYPE_STR}; + String[] retArray = + new String[] { + "1679477545000,root.ln_1.tb_6141.code_DOUBLE,2.0,DOUBLE,", + }; + resultSetEqualTest( + "select last * from root.ln_1.tb_6141 order by time desc, timeseries desc limit 1;", + expectedHeader, + retArray); + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/last/IoTDBLastQueryWithLimitOffsetIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/last/IoTDBLastQueryWithLimitOffsetIT.java index 062d5a345c3e8..0de811ff271ad 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/last/IoTDBLastQueryWithLimitOffsetIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/last/IoTDBLastQueryWithLimitOffsetIT.java @@ -190,4 +190,38 @@ public void testWithSLimitOrSOffset() { fail(e.getMessage()); } } + + @Test + public void testWithSortLimit() { + String[] retArray = + new String[] { + "2,root.sg.d2.s2,1.0,DOUBLE", + }; + + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + + try (ResultSet resultSet = + statement.executeQuery( + "select last * from root.sg.** order by time desc, timeseries desc limit 1")) { + int cnt = 0; + while (resultSet.next()) { + String ans = + resultSet.getString(ColumnHeaderConstant.TIME) + + "," + + resultSet.getString(ColumnHeaderConstant.TIMESERIES) + + "," + + resultSet.getString(ColumnHeaderConstant.VALUE) + + "," + + resultSet.getString(ColumnHeaderConstant.DATATYPE); + assertEquals(retArray[cnt++], ans); + } + assertEquals(retArray.length, cnt); + } + + } catch (SQLException e) { + e.printStackTrace(); + fail(e.getMessage()); + } + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/metric/IoTDBMetricIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/metric/IoTDBMetricIT.java index 76e6ddce8170b..3e6f660d4f2e9 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/metric/IoTDBMetricIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/metric/IoTDBMetricIT.java @@ -23,16 +23,19 @@ import org.apache.iotdb.it.framework.IoTDBTestRunner; import org.apache.iotdb.itbase.category.ClusterIT; import org.apache.iotdb.itbase.category.LocalStandaloneIT; +import org.apache.iotdb.metrics.reporter.prometheus.PrometheusReporter; -import org.junit.AfterClass; +import org.junit.After; import org.junit.Assert; -import org.junit.BeforeClass; +import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.nio.charset.StandardCharsets; +import java.util.Base64; import java.util.Collections; import java.util.List; import java.util.regex.Pattern; @@ -65,7 +68,13 @@ public class IoTDBMetricIT { private static final String VALID_LOG_STRING = "This line {} is invalid in prometheus line protocol"; - public static boolean isValidPrometheusTextFormat(String metrics) { + private static final String TEST_USERNAME = "good"; + private static final String TEST_PASSWORD = "??"; + + private static final String WRONG_USERNAME = "bad"; + private static final String WRONG_PASSWORD = "!!"; + + private static boolean isValidPrometheusTextFormat(String metrics) { String[] lines = metrics.split("\\n"); boolean valid = true; @@ -107,8 +116,8 @@ private static boolean isValidTypeLine(String line) { return Pattern.matches(TYPE_REGEX, line.trim()); } - @BeforeClass - public static void setUp() throws Exception { + @Before + public void setUp() throws Exception { // Start ConfigNode with Prometheus reporter up EnvFactory.getEnv() .getConfig() @@ -119,21 +128,86 @@ public static void setUp() throws Exception { .getConfig() .getDataNodeConfig() .setMetricReporterType(Collections.singletonList("PROMETHEUS")); - EnvFactory.getEnv().initClusterEnvironment(); } - @AfterClass - public static void tearDown() throws Exception { + @After + public void tearDown() throws Exception { EnvFactory.getEnv().cleanClusterEnvironment(); } + @Test + public void testPrometheusReporterWithoutAuth() { + EnvFactory.getEnv().initClusterEnvironment(); + + List metricContents = EnvFactory.getEnv().getMetricPrometheusReporterContents(null); + for (String metricContent : metricContents) { + Assert.assertNotNull(metricContent); + Assert.assertNotEquals(0, metricContent.length()); + Assert.assertTrue(isValidPrometheusTextFormat(metricContent)); + } + } + @Test public void testPrometheusReporter() { - List metricContents = EnvFactory.getEnv().getMetricPrometheusReporterContents(); + EnvFactory.getEnv() + .getConfig() + .getConfigNodeConfig() + .setMetricPrometheusReporterUsername(base64Encode(TEST_USERNAME)) + .setMetricPrometheusReporterPassword(base64Encode(TEST_PASSWORD)); + EnvFactory.getEnv() + .getConfig() + .getDataNodeConfig() + .setMetricPrometheusReporterUsername(base64Encode(TEST_USERNAME)) + .setMetricPrometheusReporterPassword(base64Encode(TEST_PASSWORD)); + EnvFactory.getEnv().initClusterEnvironment(); + + wrongUsernameTest(); + wrongPasswordTest(); + correctUsernameAndPasswordTest(); + } + + private void wrongUsernameTest() { + List metricContents = + EnvFactory.getEnv() + .getMetricPrometheusReporterContents( + buildPrometheusReporterAuthHeader(WRONG_USERNAME, TEST_PASSWORD)); + for (String metricContent : metricContents) { + Assert.assertNull(metricContent); + } + } + + private void wrongPasswordTest() { + List metricContents = + EnvFactory.getEnv() + .getMetricPrometheusReporterContents( + buildPrometheusReporterAuthHeader(TEST_USERNAME, WRONG_PASSWORD)); + for (String metricContent : metricContents) { + Assert.assertNull(metricContent); + } + } + + private void correctUsernameAndPasswordTest() { + List metricContents = + EnvFactory.getEnv() + .getMetricPrometheusReporterContents( + buildPrometheusReporterAuthHeader(TEST_USERNAME, TEST_PASSWORD)); for (String metricContent : metricContents) { Assert.assertNotNull(metricContent); Assert.assertNotEquals(0, metricContent.length()); Assert.assertTrue(isValidPrometheusTextFormat(metricContent)); } } + + private String buildPrometheusReporterAuthHeader(String username, String password) { + if (username == null || username.isEmpty()) { + return null; + } + String raw = username + PrometheusReporter.DIVIDER_BETWEEN_USERNAME_AND_DIVIDER + password; + String base64 = Base64.getEncoder().encodeToString(raw.getBytes(StandardCharsets.UTF_8)); + return PrometheusReporter.BASIC_AUTH_PREFIX + base64; + } + + private static String base64Encode(String raw) { + return Base64.getEncoder().encodeToString(raw.getBytes(StandardCharsets.UTF_8)); + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/orderBy/IoTDBOrderBy2IT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/orderBy/IoTDBOrderBy2IT.java index 131ea8bf3b5c4..51bfdf3dd2a44 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/orderBy/IoTDBOrderBy2IT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/orderBy/IoTDBOrderBy2IT.java @@ -37,6 +37,10 @@ public class IoTDBOrderBy2IT extends IoTDBOrderByIT { public static void setUp() throws Exception { EnvFactory.getEnv().getConfig().getDataNodeCommonConfig().setSortBufferSize(2048); EnvFactory.getEnv().getConfig().getDataNodeCommonConfig().setMaxTsBlockSizeInByte(200); + EnvFactory.getEnv() + .getConfig() + .getDataNodeCommonConfig() + .setQueryMemoryProportion("1:100:200:50:400:200:200:50"); EnvFactory.getEnv().initClusterEnvironment(); insertData(); } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/orderBy/IoTDBOrderByForDebugIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/orderBy/IoTDBOrderByForDebugIT.java index c6f502bd4b54f..2f5807388d130 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/orderBy/IoTDBOrderByForDebugIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/orderBy/IoTDBOrderByForDebugIT.java @@ -104,6 +104,10 @@ public class IoTDBOrderByForDebugIT { @BeforeClass public static void setUp() throws Exception { EnvFactory.getEnv().getConfig().getDataNodeCommonConfig().setSortBufferSize(1024 * 1024L); + EnvFactory.getEnv() + .getConfig() + .getDataNodeCommonConfig() + .setQueryMemoryProportion("1:100:200:50:400:200:200:50"); EnvFactory.getEnv().initClusterEnvironment(); insertData(); } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/orderBy/IoTDBOrderByIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/orderBy/IoTDBOrderByIT.java index 6a65ab1ed7ee2..d1f9d5a2c25a9 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/orderBy/IoTDBOrderByIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/orderBy/IoTDBOrderByIT.java @@ -38,6 +38,7 @@ import java.sql.Statement; import java.util.Objects; +import static org.apache.iotdb.db.it.utils.TestUtils.resultSetEqualTest; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.fail; @@ -97,9 +98,19 @@ public class IoTDBOrderByIT { "insert into root.sg.d2(timestamp,num,bigNum,floatNum,str,bool) values(51536000000,15,3147483648,235.213,\"watermelon\",TRUE)" }; + private static final String[] sql3 = + new String[] { + "create aligned timeseries root.test.dev (v_timestamp TIMESTAMP, v_string STRING, v_date DATE, v_blob BLOB encoding=PLAIN, v_int32 INT32);", + "insert into root.test.dev(timestamp, v_timestamp, v_string, v_date, v_blob, v_int32) aligned values(1, 2024-09-20T06:15:35.000+00:00, 'e1', '2012-12-12', X'108DCD62', 1);" + }; + @BeforeClass public static void setUp() throws Exception { EnvFactory.getEnv().getConfig().getDataNodeCommonConfig().setSortBufferSize(1024 * 1024L); + EnvFactory.getEnv() + .getConfig() + .getDataNodeCommonConfig() + .setQueryMemoryProportion("1:100:200:50:400:200:200:50"); EnvFactory.getEnv().initClusterEnvironment(); insertData(); } @@ -112,12 +123,13 @@ public static void tearDown() throws Exception { protected static void insertData() { try (Connection connection = EnvFactory.getEnv().getConnection(); Statement statement = connection.createStatement()) { - for (String sql : sql) { - statement.execute(sql); - } - for (String sql : sql2) { - statement.execute(sql); + + for (String[] sqlList : java.util.Arrays.asList(sql, sql2, sql3)) { + for (String sql : sqlList) { + statement.execute(sql); + } } + } catch (Exception e) { e.printStackTrace(); } @@ -192,6 +204,23 @@ private void testNormalOrderBy(String sql, int[] ans) { } } + @Test + public void newDataTypeTest() { + String[] expectedHeader = + new String[] {"Time,Device,v_int32,v_blob,v_date,v_timestamp,v_string"}; + String[] retArray = + new String[] { + "1,root.test.dev,1,0x108dcd62,2012-12-12,1726812935000,e1,", + }; + + for (String key : new String[] {"time", "v_int32", "v_date", "v_timestamp", "v_string"}) { + resultSetEqualTest( + String.format("SELECT * FROM root.test.dev ORDER BY %s LIMIT 1 ALIGN BY DEVICE", key), + expectedHeader, + retArray); + } + } + // 1. One-level order by test @Test public void orderByTest1() { diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/query/IoTDBQueryDemoIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/query/IoTDBQueryDemoIT.java index cb074e324a9a0..b9e3ca7b6dbea 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/query/IoTDBQueryDemoIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/query/IoTDBQueryDemoIT.java @@ -18,11 +18,13 @@ */ package org.apache.iotdb.db.it.query; +import org.apache.iotdb.isession.ISession; import org.apache.iotdb.it.env.EnvFactory; import org.apache.iotdb.it.framework.IoTDBTestRunner; import org.apache.iotdb.itbase.category.ClusterIT; import org.apache.iotdb.itbase.category.LocalStandaloneIT; +import com.google.common.collect.ImmutableList; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -41,6 +43,7 @@ import java.util.List; import java.util.Map; +import static org.apache.iotdb.db.it.utils.TestUtils.assertTestFail; import static org.junit.Assert.fail; @RunWith(IoTDBTestRunner.class) @@ -661,4 +664,26 @@ public void RegexpNonExistTest() { fail(e.getMessage()); } } + + @Test + public void selectWithTimeTest() { + try (ISession session = EnvFactory.getEnv().getSessionConnection()) { + session.executeRawDataQuery( + ImmutableList.of("root.ln.wf01.wt01.time", "root.ln.wf01.wt01.temperature"), 0, 100); + + fail(); + } catch (Exception e) { + e.getMessage().contains("509: root.ln.wf01.wt01.time is not a legal path"); + } + + String expectedErrMsg = + "701: Time column is no need to appear in SELECT Clause explicitly, it will always be returned if possible"; + assertTestFail("select time from root.ln.wf01.wt01", expectedErrMsg); + assertTestFail("select time, temperature from root.ln.wf01.wt01", expectedErrMsg); + assertTestFail("select time from root.ln.wf01.wt01 where temperature > 1", expectedErrMsg); + // parse error when process 'wt01.time' + assertTestFail( + "select wt01.time, wt01.temperature from root.ln.wf01", + "700: Error occurred while parsing SQL to physical plan"); + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBCreateTimeseriesIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBCreateTimeseriesIT.java index 9bb22ea599a42..f1af45ffee883 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBCreateTimeseriesIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBCreateTimeseriesIT.java @@ -72,24 +72,24 @@ public void tearDown() throws Exception { /** Test if creating a time series will cause the database with same name to disappear */ @Test public void testCreateTimeseries() throws Exception { - String storageGroup = "root.sg1.a.b.c"; + String database = "root.sg1.a.b.c"; try (Connection connection = EnvFactory.getEnv().getConnection(); Statement statement = connection.createStatement()) { - statement.execute(String.format("CREATE DATABASE %s", storageGroup)); + statement.execute(String.format("CREATE DATABASE %s", database)); statement.execute( String.format( "create timeseries %s with datatype=INT64, encoding=PLAIN, compression=SNAPPY", - storageGroup)); + database)); } catch (Exception ignored) { } // ensure that current database in cache is right. - createTimeSeriesTool(storageGroup); + createTimeSeriesTool(database); } - private void createTimeSeriesTool(String storageGroup) throws SQLException { + private void createTimeSeriesTool(String database) throws SQLException { Set resultList = new HashSet<>(); try (Connection connection = EnvFactory.getEnv().getConnection(); Statement statement = connection.createStatement(); @@ -99,7 +99,7 @@ private void createTimeSeriesTool(String storageGroup) throws SQLException { resultList.add(str); } } - Assert.assertFalse(resultList.contains(storageGroup)); + Assert.assertFalse(resultList.contains(database)); resultList.clear(); try (Connection connection = EnvFactory.getEnv().getConnection(); Statement statement = connection.createStatement(); @@ -109,11 +109,16 @@ private void createTimeSeriesTool(String storageGroup) throws SQLException { resultList.add(res); } } - Assert.assertTrue(resultList.contains(storageGroup)); + Assert.assertTrue(resultList.contains(database)); } @Test public void testCreateTimeseriesWithSpecialCharacter() throws Exception { + // Currently this test may fail in PBTree + // Will solve this in the future + if (schemaTestMode == SchemaTestMode.PBTree) { + return; + } try (Connection connection = EnvFactory.getEnv().getConnection()) { try (Statement statement = connection.createStatement()) { statement.execute( diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBDeleteAlignedTimeseriesIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBDeleteAlignedTimeseriesIT.java index 3defa886f8dbe..79198e97429ad 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBDeleteAlignedTimeseriesIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBDeleteAlignedTimeseriesIT.java @@ -246,4 +246,46 @@ public void deleteTimeSeriesMultiIntervalTest() { fail(e.getMessage()); } } + + @Test + public void deleteTimeseriesAndCreateSameTypeTest2() throws Exception { + String[] retArray = new String[] {"1,4.0,", "2,8.0,"}; + int cnt = 0; + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + statement.execute( + "create aligned timeseries root.turbine1.d1(s1 FLOAT encoding=PLAIN compression=SNAPPY, " + + "s2 INT64 encoding=PLAIN compression=SNAPPY, s4 DOUBLE encoding=PLAIN compression=SNAPPY)"); + statement.execute("INSERT INTO root.turbine1.d1(timestamp,s1,s2,s4) ALIGNED VALUES(1,1,2,4)"); + + try (ResultSet resultSet = statement.executeQuery("SELECT s4 FROM root.turbine1.d1")) { + ResultSetMetaData resultSetMetaData = resultSet.getMetaData(); + while (resultSet.next()) { + StringBuilder builder = new StringBuilder(); + for (int i = 1; i <= resultSetMetaData.getColumnCount(); i++) { + builder.append(resultSet.getString(i)).append(","); + } + Assert.assertEquals(retArray[cnt], builder.toString()); + cnt++; + } + } + // delete series in the middle + statement.execute("DELETE timeseries root.turbine1.d1.s4"); + statement.execute( + "INSERT INTO root.turbine1.d1(timestamp,s3,s4) ALIGNED VALUES(2,false,8.0)"); + statement.execute("FLUSH"); + + try (ResultSet resultSet = statement.executeQuery("SELECT s4 FROM root.turbine1.d1")) { + ResultSetMetaData resultSetMetaData = resultSet.getMetaData(); + while (resultSet.next()) { + StringBuilder builder = new StringBuilder(); + for (int i = 1; i <= resultSetMetaData.getColumnCount(); i++) { + builder.append(resultSet.getString(i)).append(","); + } + Assert.assertEquals(retArray[cnt], builder.toString()); + cnt++; + } + } + } + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBExtendTemplateIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBExtendTemplateIT.java index 9945843b09185..f3d53c3a46187 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBExtendTemplateIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBExtendTemplateIT.java @@ -142,10 +142,15 @@ public void testAutoExtendTemplate() throws SQLException { statement.execute("SET DEVICE TEMPLATE t1 to root.db"); + // single-row insertion statement.execute("INSERT INTO root.db.d1(time, s1, s3) values(1, 1, 1)"); statement.execute("INSERT INTO root.db.d2(time, s4, s5) values(1, 1, 1)"); statement.execute("INSERT INTO root.db1.d1(time, s2, s3) values(1, 1, 1)"); + // multi-row insertion with null + statement.execute( + "INSERT INTO root.db.d1(time, s1, s6) values(1, 1, 1), (2, 2, null), (3, 3, 3)"); + String[] sqls = new String[] { "show timeseries", @@ -159,11 +164,13 @@ public void testAutoExtendTemplate() throws SQLException { "root.db.d1.s3,null,root.db,DOUBLE,GORILLA,LZ4,null,null,null,null,BASE,", "root.db.d1.s4,null,root.db,DOUBLE,GORILLA,LZ4,null,null,null,null,BASE,", "root.db.d1.s5,null,root.db,DOUBLE,GORILLA,LZ4,null,null,null,null,BASE,", + "root.db.d1.s6,null,root.db,DOUBLE,GORILLA,LZ4,null,null,null,null,BASE,", "root.db.d2.s1,null,root.db,INT64,PLAIN,LZ4,null,null,null,null,BASE,", "root.db.d2.s2,null,root.db,DOUBLE,RLE,LZ4,null,null,null,null,BASE,", "root.db.d2.s3,null,root.db,DOUBLE,GORILLA,LZ4,null,null,null,null,BASE,", "root.db.d2.s4,null,root.db,DOUBLE,GORILLA,LZ4,null,null,null,null,BASE,", "root.db.d2.s5,null,root.db,DOUBLE,GORILLA,LZ4,null,null,null,null,BASE,", + "root.db.d2.s6,null,root.db,DOUBLE,GORILLA,LZ4,null,null,null,null,BASE,", "root.db1.d1.s2,null,root.db1,DOUBLE,GORILLA,LZ4,null,null,null,null,BASE,", "root.db1.d1.s3,null,root.db1,DOUBLE,GORILLA,LZ4,null,null,null,null,BASE,")) }; diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/selectinto/IoTDBSelectIntoIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/selectinto/IoTDBSelectIntoIT.java index 90c5ea657254c..3e013c898ba83 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/selectinto/IoTDBSelectIntoIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/selectinto/IoTDBSelectIntoIT.java @@ -785,4 +785,53 @@ public void testNewDataType() { String expectedQueryHeader = "Time,root.db.d2.s7,root.db.d2.s8,root.db.d2.s9,root.db.d2.s10,"; resultSetEqualTest("select s7,s8,s9,s10 from root.db.d2;", expectedQueryHeader, resultSet); } + + // -------------------------------------- OTHER TEST ------------------------------------- + @Test + public void testRemoveBackQuote() { + String[] intoRetArray = + new String[] { + "count(root.sg.d1.s1),root.sg_agg1.d1.count_s1,1,", + "last_value(root.sg.d1.s2),root.sg_agg1.d1.last_value_s2,1,", + "count(root.sg.d2.s1),root.sg_agg1.d2.count_s1,1,", + "last_value(root.sg.d2.s2),root.sg_agg1.d2.last_value_s2,1," + }; + resultSetEqualTest( + "select count(d1.s1), last_value(d1.s2), count(d2.s1), last_value(d2.s2) " + + "into root.sg_agg1.`d1`(`count_s1`, last_value_s2), aligned root.sg_agg1.d2(count_s1, last_value_s2) " + + "from root.sg;", + selectIntoHeader, + intoRetArray); + + String expectedQueryHeader = + "Time,root.sg_agg1.d1.count_s1,root.sg_agg1.d2.count_s1,root.sg_agg1.d1.last_value_s2,root.sg_agg1.d2.last_value_s2,"; + String[] queryRetArray = new String[] {"0,10,7,12.0,11.0,"}; + resultSetEqualTest( + "select count_s1, last_value_s2 from root.sg_agg1.d1, root.sg_agg1.d2;", + expectedQueryHeader, + queryRetArray); + } + + @Test + public void testRemoveBackQuoteAlignByDevice() { + String[] intoRetArray = + new String[] { + "root.sg.d1,count(s1),root.sg_abd_agg1.d1.count_s1,1,", + "root.sg.d1,last_value(s2),root.sg_abd_agg1.d1.last_value_s2,1," + }; + resultSetEqualTest( + "select count(s1), last_value(s2) " + + "into root.sg_abd_agg1.`d1`(`count_s1`, last_value_s2) " + + "from root.sg.d1 align by device;", + selectIntoAlignByDeviceHeader, + intoRetArray); + + String expectedQueryHeader = + "Time,root.sg_abd_agg1.d1.count_s1," + "root.sg_abd_agg1.d1.last_value_s2,"; + String[] queryRetArray = new String[] {"0,10,12.0,"}; + resultSetEqualTest( + "select count_s1, last_value_s2 from root.sg_abd_agg1.d1, root.sg_abd_agg1.d2;", + expectedQueryHeader, + queryRetArray); + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/trigger/IoTDBTriggerManagementIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/trigger/IoTDBTriggerManagementIT.java index cb21faa628d3f..a36a9c048aabf 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/trigger/IoTDBTriggerManagementIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/trigger/IoTDBTriggerManagementIT.java @@ -109,6 +109,7 @@ private static void createTimeSeries() { "CREATE TIMESERIES root.test.stateful.b with datatype=INT32,encoding=PLAIN"); statement.execute( "CREATE TIMESERIES root.test.stateful.c with datatype=INT32,encoding=PLAIN"); + statement.execute("set configuration \"trusted_uri_pattern\"='.*'"); } catch (SQLException throwable) { fail(throwable.getMessage()); } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/udaf/IoTDBUDAFManagementIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/udaf/IoTDBUDAFManagementIT.java index 75dc0c1f5f770..eff19645f63f8 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/udaf/IoTDBUDAFManagementIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/udaf/IoTDBUDAFManagementIT.java @@ -235,6 +235,7 @@ public void createFunctionWithURITest() throws SQLException { public void createFunctionWithInvalidURITest() { try (Connection connection = EnvFactory.getEnv().getConnection(); Statement statement = connection.createStatement()) { + statement.execute("set configuration \"trusted_uri_pattern\"='.*'"); try { statement.execute( String.format( diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/udf/IoTDBUDFBlockQueryIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/udf/IoTDBUDFBlockQueryIT.java index 5de421ebd2a6d..71555bce0e3cd 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/udf/IoTDBUDFBlockQueryIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/udf/IoTDBUDFBlockQueryIT.java @@ -152,4 +152,16 @@ public void testUDFSingleRowQuery() { fail(throwable.getMessage()); } } + + @Test + public void testUntrustedUri() { + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + statement.execute( + "CREATE FUNCTION two_sum AS 'org.apache.iotdb.db.query.udf.example.TwoSum' USING URI 'https://alioss.timecho.com/upload/library-udf.jar'"); + fail("should fail"); + } catch (SQLException throwable) { + assertTrue(throwable.getMessage().contains("701: Untrusted uri ")); + } + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/udf/IoTDBUDFManagementIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/udf/IoTDBUDFManagementIT.java index 798256e5a16d6..261675a3c53dc 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/udf/IoTDBUDFManagementIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/udf/IoTDBUDFManagementIT.java @@ -262,6 +262,7 @@ public void testCreateFunctionWithURI() throws SQLException { public void testCreateFunctionWithInvalidURI() { try (Connection connection = EnvFactory.getEnv().getConnection(); Statement statement = connection.createStatement()) { + statement.execute("set configuration \"trusted_uri_pattern\"='.*'"); try { statement.execute( String.format( diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/utils/TestUtils.java b/integration-test/src/test/java/org/apache/iotdb/db/it/utils/TestUtils.java index 372c9200c5e6c..168914651eda5 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/utils/TestUtils.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/utils/TestUtils.java @@ -48,6 +48,7 @@ import java.util.Set; import java.util.TreeMap; import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; import static org.apache.iotdb.itbase.constant.TestConstant.DELTA; import static org.apache.iotdb.itbase.constant.TestConstant.NULL; @@ -694,6 +695,15 @@ public static void assertDataEventuallyOnEnv( assertDataEventuallyOnEnv(env, sql, expectedHeader, expectedResSet, 600); } + public static void assertDataEventuallyOnEnv( + final BaseEnv env, + final String sql, + final String expectedHeader, + final Set expectedResSet, + final Consumer handleFailure) { + assertDataEventuallyOnEnv(env, sql, expectedHeader, expectedResSet, 600, handleFailure); + } + public static void assertDataEventuallyOnEnv( BaseEnv env, String sql, @@ -723,6 +733,86 @@ public static void assertDataEventuallyOnEnv( } } + public static void assertDataEventuallyOnEnv( + final BaseEnv env, + final String sql, + final String expectedHeader, + final Set expectedResSet, + final long timeoutSeconds, + final Consumer handleFailure) { + try (Connection connection = env.getConnection(); + Statement statement = connection.createStatement()) { + // Keep retrying if there are execution failures + await() + .pollInSameThread() + .pollDelay(1L, TimeUnit.SECONDS) + .pollInterval(1L, TimeUnit.SECONDS) + .atMost(timeoutSeconds, TimeUnit.SECONDS) + .untilAsserted( + () -> { + try { + TestUtils.assertResultSetEqual( + executeQueryWithRetry(statement, sql), expectedHeader, expectedResSet); + } catch (Exception e) { + if (handleFailure != null) { + handleFailure.accept(e.getMessage()); + } + Assert.fail(); + } catch (Error e) { + if (handleFailure != null) { + handleFailure.accept(e.getMessage()); + } + throw e; + } + }); + } catch (Exception e) { + e.printStackTrace(); + fail(); + } + } + + public static void assertDataEventuallyOnEnv( + final BaseEnv env, + final DataNodeWrapper dataNodeWrapper, + final String sql, + final String expectedHeader, + final Set expectedResSet) { + assertDataEventuallyOnEnv(env, dataNodeWrapper, sql, expectedHeader, expectedResSet, 600); + } + + public static void assertDataEventuallyOnEnv( + final BaseEnv env, + final DataNodeWrapper dataNodeWrapper, + final String sql, + final String expectedHeader, + final Set expectedResSet, + final long timeoutSeconds) { + try (Connection connection = + env.getConnection( + dataNodeWrapper, SessionConfig.DEFAULT_USER, SessionConfig.DEFAULT_PASSWORD); + Statement statement = connection.createStatement()) { + // Keep retrying if there are execution failures + await() + .pollInSameThread() + .pollDelay(1L, TimeUnit.SECONDS) + .pollInterval(1L, TimeUnit.SECONDS) + .atMost(timeoutSeconds, TimeUnit.SECONDS) + .untilAsserted( + () -> { + try { + if (sql != null && !sql.isEmpty()) { + TestUtils.assertResultSetEqual( + executeQueryWithRetry(statement, sql), expectedHeader, expectedResSet); + } + } catch (Exception e) { + Assert.fail(); + } + }); + } catch (Exception e) { + fail(e.getMessage()); + } + } + public static void assertDataEventuallyOnEnv( BaseEnv env, String sql, Map expectedHeaderWithResult) { assertDataEventuallyOnEnv(env, sql, expectedHeaderWithResult, 600); @@ -786,4 +876,27 @@ public static void assertDataAlwaysOnEnv( fail(); } } + + public static void stopForciblyAndRestartDataNodes() { + EnvFactory.getEnv().shutdownForciblyAllDataNodes(); + EnvFactory.getEnv().startAllDataNodes(); + long waitStartMS = System.currentTimeMillis(); + long maxWaitMS = 60_000L; + long retryIntervalMS = 1000; + while (true) { + try (Connection connection = EnvFactory.getEnv().getConnection()) { + break; + } catch (Exception e) { + try { + Thread.sleep(retryIntervalMS); + } catch (InterruptedException ex) { + break; + } + } + long waited = System.currentTimeMillis() - waitStartMS; + if (waited > maxWaitMS) { + fail("Timeout while waiting for datanodes restart"); + } + } + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/it/framework/IoTDBTestRunner.java b/integration-test/src/test/java/org/apache/iotdb/it/framework/IoTDBTestRunner.java index 07e0451468836..17e20d91e9ac0 100644 --- a/integration-test/src/test/java/org/apache/iotdb/it/framework/IoTDBTestRunner.java +++ b/integration-test/src/test/java/org/apache/iotdb/it/framework/IoTDBTestRunner.java @@ -37,33 +37,30 @@ public class IoTDBTestRunner extends BlockJUnit4ClassRunner { private static final Logger logger = IoTDBTestLogger.logger; private IoTDBTestListener listener; - public IoTDBTestRunner(Class testClass) throws InitializationError { + public IoTDBTestRunner(final Class testClass) throws InitializationError { super(testClass); } @Override - public void run(RunNotifier notifier) { - TimeZone.setDefault(TimeZone.getTimeZone("Bejing")); + public void run(final RunNotifier notifier) { + TimeZone.setDefault(TimeZone.getTimeZone("UTC+08:00")); listener = new IoTDBTestListener(this.getName()); notifier.addListener(listener); super.run(notifier); } @Override - protected void runChild(final FrameworkMethod method, RunNotifier notifier) { - Description description = describeChild(method); + protected void runChild(final FrameworkMethod method, final RunNotifier notifier) { + final Description description = describeChild(method); logger.info("Run {}", description.getMethodName()); - long currentTime = System.currentTimeMillis(); + final long currentTime = System.currentTimeMillis(); if (EnvType.getSystemEnvType() != EnvType.MultiCluster) { EnvFactory.getEnv().setTestMethodName(description.getMethodName()); - } else { - // TestMethodName must be set globally in MultiEnvFactory, since the - // cluster environments are not created now - MultiEnvFactory.setTestMethodName(description.getMethodName()); } + MultiEnvFactory.setTestMethodName(description.getMethodName()); super.runChild(method, notifier); - double timeCost = (System.currentTimeMillis() - currentTime) / 1000.0; - String testName = description.getClassName() + "." + description.getMethodName(); + final double timeCost = (System.currentTimeMillis() - currentTime) / 1000.0; + final String testName = description.getClassName() + "." + description.getMethodName(); logger.info("Done {}. Cost: {}s", description.getMethodName(), timeCost); listener.addTestStat(new IoTDBTestStat(testName, timeCost)); } diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/AbstractPipeDualAutoIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/AbstractPipeDualAutoIT.java index a0a27791a318a..6d3f2e85d8bd5 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/AbstractPipeDualAutoIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/AbstractPipeDualAutoIT.java @@ -36,27 +36,33 @@ public void setUp() { MultiEnvFactory.createEnv(2); senderEnv = MultiEnvFactory.getEnv(0); receiverEnv = MultiEnvFactory.getEnv(1); + setupConfig(); + senderEnv.initClusterEnvironment(); + receiverEnv.initClusterEnvironment(); + } + protected void setupConfig() { // TODO: delete ratis configurations senderEnv .getConfig() .getCommonConfig() .setAutoCreateSchemaEnabled(true) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); receiverEnv .getConfig() .getCommonConfig() .setAutoCreateSchemaEnabled(true) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); // 10 min, assert that the operations will not time out - senderEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); - receiverEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); - - senderEnv.initClusterEnvironment(); - receiverEnv.initClusterEnvironment(); + senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + receiverEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); } @After diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAlterIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAlterIT.java index fb541e7dcdb7b..364c5d475f0c0 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAlterIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAlterIT.java @@ -36,6 +36,7 @@ import java.sql.SQLException; import java.sql.Statement; import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -470,7 +471,7 @@ public void testAlterPipeSourceAndProcessor() { try (final Connection connection = senderEnv.getConnection(); final Statement statement = connection.createStatement()) { statement.execute(sql); - } catch (SQLException e) { + } catch (final SQLException e) { fail(e.getMessage()); } @@ -491,12 +492,13 @@ public void testAlterPipeSourceAndProcessor() { TestUtils.assertDataEventuallyOnEnv( receiverEnv, "select * from root.db.**", "Time,root.db.d1.at1,", expectedResSet); - // Alter pipe (modify 'source.path' and 'processor.tumbling-time.interval-seconds') + // Alter pipe (modify 'source.path', 'source.inclusion' and + // 'processor.tumbling-time.interval-seconds') try (final Connection connection = senderEnv.getConnection(); final Statement statement = connection.createStatement()) { statement.execute( - "alter pipe a2b modify source('source' = 'iotdb-source','source.path'='root.db.d2.**') modify processor ('processor.tumbling-time.interval-seconds'='2')"); - } catch (SQLException e) { + "alter pipe a2b modify source('source' = 'iotdb-source','source.path'='root.db.d2.**', 'source.inclusion'='all') modify processor ('processor.tumbling-time.interval-seconds'='2')"); + } catch (final SQLException e) { fail(e.getMessage()); } @@ -527,5 +529,15 @@ public void testAlterPipeSourceAndProcessor() { "select * from root.db.** where time > 10000", "Time,root.db.d1.at1,root.db.d2.at1,", expectedResSet); + + // Create database on sender + if (!TestUtils.tryExecuteNonQueryWithRetry( + senderEnv, "create timeSeries root.db.d2.at2 int32")) { + fail(); + } + + // Check database on receiver + TestUtils.assertDataEventuallyOnEnv( + receiverEnv, "count timeSeries", "count(timeseries),", Collections.singleton("3,")); } } diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAutoConflictIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAutoConflictIT.java index c00814cab0d4d..b89483840cb58 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAutoConflictIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAutoConflictIT.java @@ -37,6 +37,7 @@ import org.junit.runner.RunWith; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -58,18 +59,22 @@ public void setUp() { .setAutoCreateSchemaEnabled(true) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS); + .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); receiverEnv .getConfig() .getCommonConfig() .setAutoCreateSchemaEnabled(true) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS); + .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); // 10 min, assert that the operations will not time out - senderEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); - receiverEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); + senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + receiverEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); senderEnv.initClusterEnvironment(); receiverEnv.initClusterEnvironment(); @@ -328,4 +333,105 @@ public void testDoubleLivingAutoConflictTemplate() throws Exception { TestUtils.assertDataEventuallyOnEnv( receiverEnv, "select s1 from root.db.d1", "Time,root.db.d1.s1,", expectedResSet); } + + @Test + public void testAutoManualCreateRace() throws Exception { + final DataNodeWrapper receiverDataNode = receiverEnv.getDataNodeWrapper(0); + + final String receiverIp = receiverDataNode.getIp(); + final int receiverPort = receiverDataNode.getPort(); + + try (final SyncConfigNodeIServiceClient client = + (SyncConfigNodeIServiceClient) senderEnv.getLeaderConfigNodeConnection()) { + final Map extractorAttributes = new HashMap<>(); + final Map processorAttributes = new HashMap<>(); + final Map connectorAttributes = new HashMap<>(); + + extractorAttributes.put("extractor.inclusion", "all"); + + connectorAttributes.put("connector", "iotdb-thrift-connector"); + connectorAttributes.put("connector.ip", receiverIp); + connectorAttributes.put("connector.port", Integer.toString(receiverPort)); + + final TSStatus status = + client.createPipe( + new TCreatePipeReq("testPipe", connectorAttributes) + .setExtractorAttributes(extractorAttributes) + .setProcessorAttributes(processorAttributes)); + + Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); + + Assert.assertEquals( + TSStatusCode.SUCCESS_STATUS.getStatusCode(), client.startPipe("testPipe").getCode()); + + if (!TestUtils.tryExecuteNonQueryWithRetry( + receiverEnv, "create timeSeries root.ln.wf01.wt01.status with datatype=BOOLEAN")) { + return; + } + + if (!TestUtils.tryExecuteNonQueryWithRetry( + senderEnv, + "create timeSeries root.ln.wf01.wt01.status with datatype=BOOLEAN tags (tag3=v3) attributes (attr4=v4)")) { + return; + } + + TestUtils.assertDataEventuallyOnEnv( + receiverEnv, + "show timeSeries", + "Timeseries,Alias,Database,DataType,Encoding,Compression,Tags,Attributes,Deadband,DeadbandParameters,ViewType,", + Collections.singleton( + "root.ln.wf01.wt01.status,null,root.ln,BOOLEAN,RLE,LZ4,{\"tag3\":\"v3\"},{\"attr4\":\"v4\"},null,null,BASE,")); + } + } + + @Test + public void testHistoricalActivationRace() throws Exception { + final DataNodeWrapper receiverDataNode = receiverEnv.getDataNodeWrapper(0); + + final String receiverIp = receiverDataNode.getIp(); + final int receiverPort = receiverDataNode.getPort(); + + try (final SyncConfigNodeIServiceClient client = + (SyncConfigNodeIServiceClient) senderEnv.getLeaderConfigNodeConnection()) { + + if (!TestUtils.tryExecuteNonQueriesWithRetry( + senderEnv, + Arrays.asList( + "create database root.sg_aligned", + "create device template aligned_template aligned (s0 int32, s1 int64, s2 float, s3 double, s4 boolean, s5 text)", + "set device template aligned_template to root.sg_aligned.device_aligned", + "create timeseries using device template on root.sg_aligned.device_aligned.d10", + "create timeseries using device template on root.sg_aligned.device_aligned.d12", + "insert into root.sg_aligned.device_aligned.d10(time, s0, s1, s2,s3,s4,s5) values (1706659200,1706659200,10,20.245,25.24555,true,''),(1706662800,null,1706662800,20.241,25.24111,false,'2'),(1706666400,3,null,20.242,25.24222,true,'3'),(1706670000,4,40,null,35.5474,true,'4'),(1706670600,5,1706670600000,20.246,null,false,'5'),(1706671200,6,60,20.248,25.24888,null,'6'),(1706671800,7,1706671800,20.249,25.24999,false,null),(1706672400,8,80,1245.392,75.51234,false,'8'),(1706672600,9,90,2345.397,2285.58734,false,'9'),(1706673000,10,100,20.241,25.24555,false,'10'),(1706673600,11,110,3345.394,4105.544,false,'11'),(1706674200,12,1706674200,30.245,35.24555,false,'12'),(1706674800,13,130,5.39,125.51234,false,'13'),(1706675400,14,1706675400,5.39,135.51234,false,'14'),(1706676000,15,150,5.39,145.51234,false,'15'),(1706676600,16,160,5.39,155.51234,false,'16'),(1706677200,17,170,5.39,165.51234,false,'17'),(1706677600,18,180,5.39,175.51234,false,'18'),(1706677800,19,190,5.39,185.51234,false,'19'),(1706678000,20,200,5.39,195.51234,false,'20'),(1706678200,21,210,5.39,null,false,'21')", + "insert into root.sg_aligned.device_aligned.d10(time, s0, s1, s2,s3,s4,s5) values (-1,1,10,5.39,5.51234,false,'negative')", + "insert into root.sg_aligned.device_aligned.d11(time, s0, s1, s2,s3,s4,s5) values (-1,-11,-110,-5.39,-5.51234,false,'activate:1')", + "insert into root.sg_aligned.device_aligned.d10(time, s0, s1, s2,s3,s4,s5,s6) values(1706678800,1,1706678800,5.39,5.51234,false,'add:s6',32);"))) { + return; + } + + final Map extractorAttributes = new HashMap<>(); + final Map processorAttributes = new HashMap<>(); + final Map connectorAttributes = new HashMap<>(); + + extractorAttributes.put("extractor.inclusion", "all"); + + connectorAttributes.put("connector", "iotdb-thrift-connector"); + connectorAttributes.put("connector.ip", receiverIp); + connectorAttributes.put("connector.port", Integer.toString(receiverPort)); + + final TSStatus status = + client.createPipe( + new TCreatePipeReq("testPipe", connectorAttributes) + .setExtractorAttributes(extractorAttributes) + .setProcessorAttributes(processorAttributes)); + + Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); + + Assert.assertEquals( + TSStatusCode.SUCCESS_STATUS.getStatusCode(), client.startPipe("testPipe").getCode()); + + TestUtils.assertDataEventuallyOnEnv( + receiverEnv, "count devices", "count(devices),", Collections.singleton("3,")); + } + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeClusterIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeClusterIT.java index a017cf112d7aa..3a4ca6fcada14 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeClusterIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeClusterIT.java @@ -74,24 +74,123 @@ public void setUp() { .setAutoCreateSchemaEnabled(true) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS); + .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); receiverEnv .getConfig() .getCommonConfig() .setAutoCreateSchemaEnabled(true) + .setDataReplicationFactor(2) + .setSchemaReplicationFactor(3) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS); + .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); // 10 min, assert that the operations will not time out - senderEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); - receiverEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); + senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + receiverEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); senderEnv.initClusterEnvironment(3, 3, 180); receiverEnv.initClusterEnvironment(3, 3, 180); } + @Test + public void testMachineDowntimeAsync() { + testMachineDowntime("iotdb-thrift-connector"); + } + + @Test + public void testMachineDowntimeSync() { + testMachineDowntime("iotdb-thrift-sync-connector"); + } + + private void testMachineDowntime(String sink) { + StringBuilder a = new StringBuilder(); + for (DataNodeWrapper nodeWrapper : receiverEnv.getDataNodeWrapperList()) { + a.append(nodeWrapper.getIp()).append(":").append(nodeWrapper.getPort()); + a.append(","); + } + a.deleteCharAt(a.length() - 1); + + try (final SyncConfigNodeIServiceClient client = + (SyncConfigNodeIServiceClient) senderEnv.getLeaderConfigNodeConnection()) { + + if (!TestUtils.tryExecuteNonQueriesWithRetry( + senderEnv, + Arrays.asList( + "insert into root.db.d1(time, s1) values (2010-01-01T10:00:00+08:00, 1)", + "insert into root.db.d1(time, s1) values (2010-01-02T10:00:00+08:00, 2)", + "flush"))) { + return; + } + + final Map extractorAttributes = new HashMap<>(); + final Map processorAttributes = new HashMap<>(); + final Map connectorAttributes = new HashMap<>(); + + extractorAttributes.put("extractor", "iotdb-extractor"); + extractorAttributes.put("capture.tree", "true"); + + processorAttributes.put("processor", "do-nothing-processor"); + + connectorAttributes.put("connector", sink); + connectorAttributes.put("connector.batch.enable", "false"); + connectorAttributes.put("connector.node-urls", a.toString()); + + final TSStatus status = + client.createPipe( + new TCreatePipeReq("p1", connectorAttributes) + .setExtractorAttributes(extractorAttributes) + .setProcessorAttributes(processorAttributes)); + + Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); + + receiverEnv.getDataNodeWrapper(0).stop(); + + // Ensure that the kill -9 operation is completed + Thread.sleep(5000); + for (DataNodeWrapper nodeWrapper : receiverEnv.getDataNodeWrapperList()) { + if (!nodeWrapper.isAlive()) { + continue; + } + TestUtils.assertDataEventuallyOnEnv( + receiverEnv, + nodeWrapper, + "select count(*) from root.**", + "count(root.db.d1.s1),", + Collections.singleton("2,"), + 600); + } + if (!TestUtils.tryExecuteNonQueriesWithRetry( + senderEnv, + Arrays.asList("insert into root.db.d1(time, s1) values (now(), 3)", "flush"))) { + return; + } + + } catch (Exception e) { + fail(e.getMessage()); + } + + for (DataNodeWrapper nodeWrapper : receiverEnv.getDataNodeWrapperList()) { + if (!nodeWrapper.isAlive()) { + continue; + } + + TestUtils.assertDataEventuallyOnEnv( + receiverEnv, + nodeWrapper, + "select count(*) from root.**", + "count(root.db.d1.s1),", + Collections.singleton("3,"), + 600); + return; + } + } + @Test public void testWithAllParametersInLogMode() throws Exception { testWithAllParameters("log"); diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeDataSinkIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeDataSinkIT.java index 7fda1250b1bec..5f33393111715 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeDataSinkIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeDataSinkIT.java @@ -38,6 +38,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Map; +import java.util.function.Consumer; @RunWith(IoTDBTestRunner.class) @Category({MultiClusterIT2AutoCreateSchema.class}) @@ -280,6 +281,12 @@ public void testReceiverAutoCreateWithPattern() throws Exception { private void testReceiverAutoCreate(final Map extractorAttributes) throws Exception { + final Consumer handleFailure = + o -> { + TestUtils.executeNonQueryWithRetry(senderEnv, "flush"); + TestUtils.executeNonQueryWithRetry(receiverEnv, "flush"); + }; + final DataNodeWrapper receiverDataNode = receiverEnv.getDataNodeWrapper(0); final String receiverIp = receiverDataNode.getIp(); @@ -343,12 +350,182 @@ private void testReceiverAutoCreate(final Map extractorAttribute "root.ln.wf01.wt01.date,null,root.ln,DATE,TS_2DIFF,LZ4,null,null,null,null,BASE,", "root.ln.wf01.wt01.text,null,root.ln,TEXT,PLAIN,LZ4,null,null,null,null,BASE,", "root.ln.wf01.wt01.string,null,root.ln,STRING,PLAIN,LZ4,null,null,null,null,BASE,", - "root.ln.wf01.wt01.blob,null,root.ln,BLOB,PLAIN,LZ4,null,null,null,null,BASE,")))); + "root.ln.wf01.wt01.blob,null,root.ln,BLOB,PLAIN,LZ4,null,null,null,null,BASE,"))), + handleFailure); TestUtils.assertDataEventuallyOnEnv( receiverEnv, "show devices root.ln.wf01.wt02", "Device,IsAligned,Template,TTL(ms),", - Collections.singleton("root.ln.wf01.wt02,true,null,INF,")); + Collections.singleton("root.ln.wf01.wt02,true,null,INF,"), + handleFailure); + } + } + + @Test + public void testSyncLoadTsFile() throws Exception { + testReceiverLoadTsFile("sync"); + } + + @Test + public void testAsyncLoadTsFile() throws Exception { + testReceiverLoadTsFile("async"); + } + + private void testReceiverLoadTsFile(final String loadTsFileStrategy) throws Exception { + final DataNodeWrapper receiverDataNode = receiverEnv.getDataNodeWrapper(0); + + final String receiverIp = receiverDataNode.getIp(); + final int receiverPort = receiverDataNode.getPort(); + + try (final SyncConfigNodeIServiceClient client = + (SyncConfigNodeIServiceClient) senderEnv.getLeaderConfigNodeConnection()) { + + // Do not fail if the failure has nothing to do with pipe + // Because the failures will randomly generate due to resource limitation + if (!TestUtils.tryExecuteNonQueriesWithRetry( + senderEnv, + Arrays.asList("insert into root.vehicle.d0(time, s1) values (1, 1)", "flush"))) { + return; + } + + final Map extractorAttributes = new HashMap<>(); + final Map processorAttributes = new HashMap<>(); + final Map connectorAttributes = new HashMap<>(); + + extractorAttributes.put("extractor.realtime.mode", "forced-log"); + + connectorAttributes.put("sink", "iotdb-thrift-sink"); + connectorAttributes.put("sink.batch.enable", "false"); + connectorAttributes.put("sink.ip", receiverIp); + connectorAttributes.put("sink.port", Integer.toString(receiverPort)); + connectorAttributes.put("sink.load-tsfile-strategy", loadTsFileStrategy); + + Assert.assertEquals( + TSStatusCode.SUCCESS_STATUS.getStatusCode(), + client + .createPipe( + new TCreatePipeReq("testPipe", connectorAttributes) + .setExtractorAttributes(extractorAttributes) + .setProcessorAttributes(processorAttributes)) + .getCode()); + + Assert.assertEquals( + TSStatusCode.SUCCESS_STATUS.getStatusCode(), client.startPipe("testPipe").getCode()); + + // Do not fail if the failure has nothing to do with pipe + // Because the failures will randomly generate due to resource limitation + if (!TestUtils.tryExecuteNonQueriesWithRetry( + senderEnv, + Arrays.asList("insert into root.vehicle.d0(time, s1) values (2, 1)", "flush"))) { + return; + } + + TestUtils.assertDataEventuallyOnEnv( + receiverEnv, + "select * from root.**", + "Time,root.vehicle.d0.s1,", + Collections.unmodifiableSet(new HashSet<>(Arrays.asList("1,1.0,", "2,1.0,")))); + + Assert.assertEquals( + TSStatusCode.SUCCESS_STATUS.getStatusCode(), client.dropPipe("testPipe").getCode()); + + Assert.assertEquals( + TSStatusCode.SUCCESS_STATUS.getStatusCode(), + client + .createPipe( + new TCreatePipeReq("testPipe", connectorAttributes) + .setExtractorAttributes(extractorAttributes) + .setProcessorAttributes(processorAttributes)) + .getCode()); + + // Do not fail if the failure has nothing to do with pipe + // Because the failures will randomly generate due to resource limitation + if (!TestUtils.tryExecuteNonQueriesWithRetry( + senderEnv, + Arrays.asList( + "insert into root.vehicle.d0(time, s1) values (4, 1)", + "insert into root.vehicle.d0(time, s1) values (3, 1), (0, 1)", + "flush"))) { + return; + } + + TestUtils.assertDataEventuallyOnEnv( + receiverEnv, + "select * from root.**", + "Time,root.vehicle.d0.s1,", + Collections.unmodifiableSet( + new HashSet<>(Arrays.asList("0,1.0,", "1,1.0,", "2,1.0,", "3,1.0,", "4,1.0,")))); + } + } + + @Test + public void testSyncLoadTsFileWithoutVerify() throws Exception { + testLoadTsFileWithoutVerify("sync"); + } + + @Test + public void testAsyncLoadTsFileWithoutVerify() throws Exception { + testLoadTsFileWithoutVerify("async"); + } + + private void testLoadTsFileWithoutVerify(final String loadTsFileStrategy) throws Exception { + final DataNodeWrapper receiverDataNode = receiverEnv.getDataNodeWrapper(0); + + final String receiverIp = receiverDataNode.getIp(); + final int receiverPort = receiverDataNode.getPort(); + + try (final SyncConfigNodeIServiceClient client = + (SyncConfigNodeIServiceClient) senderEnv.getLeaderConfigNodeConnection()) { + + // Do not fail if the failure has nothing to do with pipe + // Because the failures will randomly generate due to resource limitation + if (!TestUtils.tryExecuteNonQueriesWithRetry( + senderEnv, + Arrays.asList("insert into root.vehicle.d0(time, s1) values (1, 1)", "flush"))) { + return; + } + + final Map extractorAttributes = new HashMap<>(); + final Map processorAttributes = new HashMap<>(); + final Map connectorAttributes = new HashMap<>(); + + extractorAttributes.put("extractor.realtime.mode", "forced-log"); + + connectorAttributes.put("sink", "iotdb-thrift-sink"); + connectorAttributes.put("sink.batch.enable", "false"); + connectorAttributes.put("sink.ip", receiverIp); + connectorAttributes.put("sink.port", Integer.toString(receiverPort)); + connectorAttributes.put("sink.load-tsfile-strategy", loadTsFileStrategy); + connectorAttributes.put("sink.tsfile.validation", "false"); + + Assert.assertEquals( + TSStatusCode.SUCCESS_STATUS.getStatusCode(), + client + .createPipe( + new TCreatePipeReq("testPipe", connectorAttributes) + .setExtractorAttributes(extractorAttributes) + .setProcessorAttributes(processorAttributes)) + .getCode()); + + Assert.assertEquals( + TSStatusCode.SUCCESS_STATUS.getStatusCode(), client.startPipe("testPipe").getCode()); + + // Do not fail if the failure has nothing to do with pipe + // Because the failures will randomly generate due to resource limitation + if (!TestUtils.tryExecuteNonQueriesWithRetry( + senderEnv, + Arrays.asList( + "create timeSeries root.vehicle.d0.s1 int32", + "insert into root.vehicle.d0(time, s1) values (2, 1)", + "flush"))) { + return; + } + + TestUtils.assertDataEventuallyOnEnv( + receiverEnv, + "select * from root.**", + "Time,root.vehicle.d0.s1,", + Collections.unmodifiableSet(new HashSet<>(Arrays.asList("1,1.0,", "2,1.0,")))); } } } diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeIdempotentIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeIdempotentIT.java index fab2ef9597a4d..4e1270577d814 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeIdempotentIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeIdempotentIT.java @@ -65,17 +65,21 @@ public void setUp() { // of the tested idempotent sql. .setDefaultSchemaRegionGroupNumPerDatabase(1) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); receiverEnv .getConfig() .getCommonConfig() .setAutoCreateSchemaEnabled(true) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); // 10 min, assert that the operations will not time out - senderEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); - receiverEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); + senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + receiverEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); senderEnv.initClusterEnvironment(); receiverEnv.initClusterEnvironment(); @@ -168,10 +172,10 @@ public void testDeleteTimeSeriesIdempotent() throws Exception { Collections.singletonList( "create timeSeries root.ln.wf01.wt01.status0(status0) with datatype=BOOLEAN,encoding=PLAIN"), "delete timeSeries root.ln.wf01.wt01.status0", - "create timeSeries root.ln.wf01.wt01.status2(status2) with datatype=BOOLEAN,encoding=PLAIN", - "count timeSeries", - "count(timeseries),", - Collections.singleton("1,")); + "create database root.sg", + "count databases", + "count,", + Collections.singleton("2,")); } @Test @@ -260,10 +264,10 @@ public void testDeactivateTemplateIdempotent() throws Exception { "create timeSeries using device template on root.sg1.d1", "create timeSeries using device template on root.sg1.d2"), "delete timeSeries of schema template t1 from root.sg1.*", - "create timeSeries using device template on root.sg1.d3", - "count timeSeries", - "count(timeseries),", - Collections.singleton("3,")); + "create database root.sg2", + "count databases", + "count,", + Collections.singleton("2,")); } @Test diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeLifeCycleIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeLifeCycleIT.java index 87e00151db159..7ad604e01d8c7 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeLifeCycleIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeLifeCycleIT.java @@ -648,7 +648,8 @@ public void testDoubleLiving() throws Exception { final Map connectorAttributes = new HashMap<>(); // Add this property to avoid to make self cycle. - connectorAttributes.put("source.forwarding-pipe-requests", "false"); + extractorAttributes.put("source.forwarding-pipe-requests", "false"); + connectorAttributes.put("connector", "iotdb-thrift-connector"); connectorAttributes.put("connector.batch.enable", "false"); connectorAttributes.put("connector.ip", receiverIp); @@ -691,7 +692,8 @@ public void testDoubleLiving() throws Exception { final Map connectorAttributes = new HashMap<>(); // Add this property to avoid to make self cycle. - connectorAttributes.put("source.forwarding-pipe-requests", "false"); + extractorAttributes.put("source.forwarding-pipe-requests", "false"); + connectorAttributes.put("connector", "iotdb-thrift-connector"); connectorAttributes.put("connector.batch.enable", "false"); connectorAttributes.put("connector.ip", senderIp); @@ -844,7 +846,7 @@ public void testPermission() { assertNonQueryTestFail( senderEnv, "create pipePlugin TestProcessor as 'org.apache.iotdb.db.pipe.example.TestProcessor' USING URI 'xxx'", - "1603: The scheme of URI is not set, please specify the scheme of URI.", + "701: Untrusted uri xxx", "test", "test123"); tryExecuteNonQueryWithRetry(senderEnv, "drop pipePlugin TestProcessor", "test", "test123"); diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeMemoryIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeMemoryIT.java new file mode 100644 index 0000000000000..bb211a1ffe895 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeMemoryIT.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.pipe.it.autocreate; + +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.client.sync.SyncConfigNodeIServiceClient; +import org.apache.iotdb.confignode.rpc.thrift.TCreatePipeReq; +import org.apache.iotdb.it.env.cluster.node.DataNodeWrapper; +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2AutoCreateSchema; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.util.HashMap; +import java.util.Map; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2AutoCreateSchema.class}) +public class IoTDBPipeMemoryIT extends AbstractPipeDualAutoIT { + + @Override + @Before + public void setUp() { + super.setUp(); + } + + @Override + protected void setupConfig() { + super.setupConfig(); + senderEnv + .getConfig() + .getCommonConfig() + .setPipeMemoryManagementEnabled(true) + .setIsPipeEnableMemoryCheck(true) + .setDatanodeMemoryProportion("1000:1000:1000:1000:1:1000"); + receiverEnv + .getConfig() + .getCommonConfig() + .setPipeMemoryManagementEnabled(true) + .setIsPipeEnableMemoryCheck(true) + .setDatanodeMemoryProportion("1000:1000:1000:1000:1:1000"); + } + + @Test + public void testCreatePipeMemoryManage() { + final DataNodeWrapper receiverDataNode = receiverEnv.getDataNodeWrapper(0); + final String receiverIp = receiverDataNode.getIp(); + final int receiverPort = receiverDataNode.getPort(); + + try (final SyncConfigNodeIServiceClient client = + (SyncConfigNodeIServiceClient) senderEnv.getLeaderConfigNodeConnection()) { + final Map extractorAttributes = new HashMap<>(); + final Map processorAttributes = new HashMap<>(); + final Map connectorAttributes = new HashMap<>(); + extractorAttributes.put("user", "root"); + + connectorAttributes.put("connector", "iotdb-thrift-connector"); + connectorAttributes.put("connector.batch.enable", "false"); + connectorAttributes.put("connector.ip", receiverIp); + connectorAttributes.put("connector.port", Integer.toString(receiverPort)); + + final TSStatus status = + client.createPipe( + new TCreatePipeReq("p1", connectorAttributes) + .setExtractorAttributes(extractorAttributes) + .setProcessorAttributes(processorAttributes)); + + Assert.assertNotEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); + Assert.assertNotNull(status.getMessage()); + Assert.assertTrue(status.getMessage().contains("Not enough memory for pipe.")); + + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProcessorIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProcessorIT.java index 4556416f5fbe3..bbf4f206b59f5 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProcessorIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProcessorIT.java @@ -32,6 +32,7 @@ import org.junit.Assert; import org.junit.Before; +import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; @@ -58,22 +59,27 @@ public void setUp() { .setAutoCreateSchemaEnabled(true) .setTimestampPrecision("ms") .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); receiverEnv .getConfig() .getCommonConfig() .setAutoCreateSchemaEnabled(true) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); // 10 min, assert that the operations will not time out - senderEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); - receiverEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); + senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + receiverEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); senderEnv.initClusterEnvironment(); receiverEnv.initClusterEnvironment(); } + @Ignore @Test public void testTumblingTimeSamplingProcessor() throws Exception { final DataNodeWrapper receiverDataNode = receiverEnv.getDataNodeWrapper(0); diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProtocolIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProtocolIT.java index 373e9d4dbea7e..ed68cc23ebada 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProtocolIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProtocolIT.java @@ -21,7 +21,7 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.client.sync.SyncConfigNodeIServiceClient; -import org.apache.iotdb.commons.pipe.plugin.builtin.BuiltinPipePlugin; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.BuiltinPipePlugin; import org.apache.iotdb.confignode.rpc.thrift.TCreatePipeReq; import org.apache.iotdb.consensus.ConsensusFactory; import org.apache.iotdb.db.it.utils.TestUtils; @@ -74,7 +74,9 @@ private void innerSetUp( .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) .setDataRegionConsensusProtocolClass(dataRegionConsensus) .setSchemaReplicationFactor(schemaRegionReplicationFactor) - .setDataReplicationFactor(dataRegionReplicationFactor); + .setDataReplicationFactor(dataRegionReplicationFactor) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); receiverEnv .getConfig() .getCommonConfig() @@ -83,11 +85,13 @@ private void innerSetUp( .setSchemaRegionConsensusProtocolClass(schemaRegionConsensus) .setDataRegionConsensusProtocolClass(dataRegionConsensus) .setSchemaReplicationFactor(schemaRegionReplicationFactor) - .setDataReplicationFactor(dataRegionReplicationFactor); + .setDataReplicationFactor(dataRegionReplicationFactor) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); // 10 min, assert that the operations will not time out - senderEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); - receiverEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); + senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + receiverEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); senderEnv.initClusterEnvironment(configNodesNum, dataNodesNum); receiverEnv.initClusterEnvironment(configNodesNum, dataNodesNum); @@ -180,8 +184,8 @@ public void testPipeOnBothSenderAndReceiver() throws Exception { .setDataReplicationFactor(1); // 10 min, assert that the operations will not time out - senderEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); - receiverEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); + senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + receiverEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); senderEnv.initClusterEnvironment(3, 3); receiverEnv.initClusterEnvironment(1, 1); @@ -373,8 +377,8 @@ private void doTestUseNodeUrls(String connectorName) throws Exception { .setDataReplicationFactor(2); // 10 min, assert that the operations will not time out - senderEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); - receiverEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); + senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + receiverEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); senderEnv.initClusterEnvironment(1, 1); receiverEnv.initClusterEnvironment(1, 3); diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeConnectorCompressionIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeSinkCompressionIT.java similarity index 96% rename from integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeConnectorCompressionIT.java rename to integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeSinkCompressionIT.java index f7c0c63b842e8..59cfa4321e4b0 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeConnectorCompressionIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeSinkCompressionIT.java @@ -51,7 +51,7 @@ @RunWith(IoTDBTestRunner.class) @Category({MultiClusterIT2AutoCreateSchema.class}) -public class IoTDBPipeConnectorCompressionIT extends AbstractPipeDualAutoIT { +public class IoTDBPipeSinkCompressionIT extends AbstractPipeDualAutoIT { @Override @Before @@ -66,18 +66,23 @@ public void setUp() { .getCommonConfig() .setAutoCreateSchemaEnabled(true) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); + receiverEnv .getConfig() .getCommonConfig() .setAutoCreateSchemaEnabled(true) .setPipeAirGapReceiverEnabled(true) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); // 10 min, assert that the operations will not time out - senderEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); - receiverEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); + senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + receiverEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); senderEnv.initClusterEnvironment(); receiverEnv.initClusterEnvironment(); diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeConnectorParallelIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeSinkParallelIT.java similarity index 98% rename from integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeConnectorParallelIT.java rename to integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeSinkParallelIT.java index 87bb4b465e56f..d550310409daf 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeConnectorParallelIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeSinkParallelIT.java @@ -41,7 +41,7 @@ @RunWith(IoTDBTestRunner.class) @Category({MultiClusterIT2AutoCreateSchema.class}) -public class IoTDBPipeConnectorParallelIT extends AbstractPipeDualAutoIT { +public class IoTDBPipeSinkParallelIT extends AbstractPipeDualAutoIT { @Test public void testIoTConnectorParallel() throws Exception { final DataNodeWrapper receiverDataNode = receiverEnv.getDataNodeWrapper(0); diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeExtractorIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeSourceIT.java similarity index 99% rename from integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeExtractorIT.java rename to integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeSourceIT.java index aea757e63b391..fe3c550334436 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeExtractorIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeSourceIT.java @@ -53,7 +53,7 @@ @RunWith(IoTDBTestRunner.class) @Category({MultiClusterIT2AutoCreateSchema.class}) -public class IoTDBPipeExtractorIT extends AbstractPipeDualAutoIT { +public class IoTDBPipeSourceIT extends AbstractPipeDualAutoIT { @Before public void setUp() { @@ -71,17 +71,21 @@ public void setUp() { // Disable sender compaction for tsfile determination in loose range test .setEnableSeqSpaceCompaction(false) .setEnableUnseqSpaceCompaction(false) - .setEnableCrossSpaceCompaction(false); + .setEnableCrossSpaceCompaction(false) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); receiverEnv .getConfig() .getCommonConfig() .setAutoCreateSchemaEnabled(true) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); // 10 min, assert that the operations will not time out - senderEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); - receiverEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); + senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + receiverEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); senderEnv.initClusterEnvironment(); receiverEnv.initClusterEnvironment(); diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeSyntaxIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeSyntaxIT.java index b0293361b6255..b9de28b715869 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeSyntaxIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeSyntaxIT.java @@ -718,4 +718,15 @@ public void testInclusionPattern() throws Exception { Assert.assertEquals(1, showPipeResult.size()); } } + + @Test + public void testValidPipeWithoutWithSink() { + try (final Connection connection = senderEnv.getConnection(); + final Statement statement = connection.createStatement()) { + statement.execute("create pipe p1('sink'='do-nothing-sink')"); + } catch (SQLException e) { + e.printStackTrace(); + fail(e.getMessage()); + } + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeWithLoadIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeWithLoadIT.java new file mode 100644 index 0000000000000..d87aa3b5fae1c --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeWithLoadIT.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.pipe.it.autocreate; + +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.client.sync.SyncConfigNodeIServiceClient; +import org.apache.iotdb.confignode.rpc.thrift.TCreatePipeReq; +import org.apache.iotdb.consensus.ConsensusFactory; +import org.apache.iotdb.db.it.utils.TestUtils; +import org.apache.iotdb.it.env.MultiEnvFactory; +import org.apache.iotdb.it.env.cluster.node.DataNodeWrapper; +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2AutoCreateSchema; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2AutoCreateSchema.class}) +public class IoTDBPipeWithLoadIT extends AbstractPipeDualAutoIT { + + @Before + public void setUp() { + MultiEnvFactory.createEnv(2); + senderEnv = MultiEnvFactory.getEnv(0); + receiverEnv = MultiEnvFactory.getEnv(1); + + // TODO: delete ratis configurations + senderEnv + .getConfig() + .getCommonConfig() + .setAutoCreateSchemaEnabled(true) + .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + // Disable sender compaction to test mods + .setEnableSeqSpaceCompaction(false) + .setEnableUnseqSpaceCompaction(false) + .setEnableCrossSpaceCompaction(false) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); + receiverEnv + .getConfig() + .getCommonConfig() + .setAutoCreateSchemaEnabled(true) + .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); + + // 10 min, assert that the operations will not time out + senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + receiverEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + + senderEnv.initClusterEnvironment(); + receiverEnv.initClusterEnvironment(); + } + + /** + * Test that when the receiver loads data from TsFile, it will not load timeseries that are + * completed deleted by mods. + * + * @throws Exception + */ + @Test + public void testReceiverNotLoadDeletedTimeseries() throws Exception { + final DataNodeWrapper receiverDataNode = receiverEnv.getDataNodeWrapper(0); + final String receiverIp = receiverDataNode.getIp(); + final int receiverPort = receiverDataNode.getPort(); + + final Map extractorAttributes = new HashMap<>(); + final Map processorAttributes = new HashMap<>(); + final Map connectorAttributes = new HashMap<>(); + + // Enable mods transfer + extractorAttributes.put("source.mods.enable", "true"); + + connectorAttributes.put("connector.batch.enable", "false"); + connectorAttributes.put("connector.ip", receiverIp); + connectorAttributes.put("connector.port", Integer.toString(receiverPort)); + + try (final SyncConfigNodeIServiceClient client = + (SyncConfigNodeIServiceClient) senderEnv.getLeaderConfigNodeConnection()) { + // Generate TsFile and mods on sender. There are 6 time-series in total. + // Time-series not affected by mods: d1.s1, d2.s1 + // Time-series partially deleted by mods: d1.s2, d3.s1 + // Time-series completely deleted by mods: d1.s3, d4.s1 (should not be loaded by receiver) + if (!TestUtils.tryExecuteNonQueriesWithRetry( + senderEnv, + Arrays.asList( + "insert into root.db.d1 (time, s1, s2, s3) values (1, 1, 1, 1), (3, 3, 3, 3)", + "insert into root.db.d2 (time, s1) values (1, 1), (3, 3)", + "insert into root.db.d3 (time, s1) values (1, 1), (3, 3)", + "insert into root.db.d4 (time, s1) values (1, 1), (3, 3)", + "flush", + "delete from root.db.d1.s2 where time <= 2", + "delete from root.db.d1.s3 where time >= 1 and time <= 3", + "delete from root.db.d3.** where time <= 2", + "delete from root.db.d4.** where time >= 1 and time <= 3", + "flush"))) { + return; + } + + TSStatus status = + client.createPipe( + new TCreatePipeReq("p1", connectorAttributes) + .setExtractorAttributes(extractorAttributes) + .setProcessorAttributes(processorAttributes)); + Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); + Assert.assertEquals( + TSStatusCode.SUCCESS_STATUS.getStatusCode(), client.startPipe("p1").getCode()); + + TestUtils.assertDataEventuallyOnEnv( + receiverEnv, "count timeseries", "count(timeseries),", Collections.singleton("4,")); + } + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/PipeNowFunctionIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/PipeNowFunctionIT.java new file mode 100644 index 0000000000000..16b9cb05df03b --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/PipeNowFunctionIT.java @@ -0,0 +1,281 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.pipe.it.autocreate; + +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.client.sync.SyncConfigNodeIServiceClient; +import org.apache.iotdb.confignode.rpc.thrift.TAlterPipeReq; +import org.apache.iotdb.confignode.rpc.thrift.TCreatePipeReq; +import org.apache.iotdb.confignode.rpc.thrift.TShowPipeInfo; +import org.apache.iotdb.confignode.rpc.thrift.TShowPipeReq; +import org.apache.iotdb.it.env.cluster.node.DataNodeWrapper; +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2AutoCreateSchema; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.Assert.fail; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2AutoCreateSchema.class}) +public class PipeNowFunctionIT extends AbstractPipeDualAutoIT { + + @Test + public void testPipeNowFunction() throws Exception { + final DataNodeWrapper receiverDataNode = receiverEnv.getDataNodeWrapper(0); + + final String receiverIp = receiverDataNode.getIp(); + final int receiverPort = receiverDataNode.getPort(); + + try (final SyncConfigNodeIServiceClient client = + (SyncConfigNodeIServiceClient) senderEnv.getLeaderConfigNodeConnection()) { + Map extractorAttributes = new HashMap<>(); + Map processorAttributes = new HashMap<>(); + Map connectorAttributes = new HashMap<>(); + + extractorAttributes.put("source.start-time", "now"); + extractorAttributes.put("source.end-time", "now"); + extractorAttributes.put("source.history.start-time", "now"); + extractorAttributes.put("source.history.end-time", "now"); + extractorAttributes.put("source.history.enable", "true"); + + connectorAttributes.put("connector", "iotdb-thrift-connector"); + connectorAttributes.put("connector.batch.enable", "false"); + connectorAttributes.put("connector.ip", receiverIp); + connectorAttributes.put("connector.port", Integer.toString(receiverPort)); + + TSStatus status = + client.createPipe( + new TCreatePipeReq("p1", connectorAttributes) + .setExtractorAttributes(extractorAttributes) + .setProcessorAttributes(processorAttributes)); + Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); + + extractorAttributes.clear(); + extractorAttributes.put("start-time", "now"); + extractorAttributes.put("end-time", "now"); + extractorAttributes.put("history.start-time", "now"); + extractorAttributes.put("history.end-time", "now"); + extractorAttributes.put("history.enable", "true"); + + status = + client.createPipe( + new TCreatePipeReq("p2", connectorAttributes) + .setExtractorAttributes(extractorAttributes) + .setProcessorAttributes(processorAttributes)); + Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); + + extractorAttributes.clear(); + extractorAttributes.put("extractor.start-time", "now"); + extractorAttributes.put("extractor.end-time", "now"); + extractorAttributes.put("extractor.history.start-time", "now"); + extractorAttributes.put("extractor.history.end-time", "now"); + extractorAttributes.put("history.enable", "true"); + + status = + client.createPipe( + new TCreatePipeReq("p3", connectorAttributes) + .setExtractorAttributes(extractorAttributes) + .setProcessorAttributes(processorAttributes)); + Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); + + List showPipeResult = client.showPipe(new TShowPipeReq()).pipeInfoList; + Assert.assertTrue( + showPipeResult.stream().anyMatch((o) -> o.id.equals("p1") && o.state.equals("RUNNING"))); + + Assert.assertEquals( + TSStatusCode.SUCCESS_STATUS.getStatusCode(), client.startPipe("p1").getCode()); + + showPipeResult = client.showPipe(new TShowPipeReq()).pipeInfoList; + Assert.assertTrue( + showPipeResult.stream().anyMatch((o) -> o.id.equals("p1") && o.state.equals("RUNNING"))); + + extractorAttributes.clear(); + extractorAttributes.put("extractor.start-time", "now"); + extractorAttributes.put("extractor.end-time", "now"); + extractorAttributes.put("extractor.history.start-time", "now"); + extractorAttributes.put("extractor.history.end-time", "now"); + client.alterPipe( + new TAlterPipeReq() + .setPipeName("p1") + .setExtractorAttributes(extractorAttributes) + .setIsReplaceAllExtractorAttributes(false) + .setProcessorAttributes(new HashMap<>()) + .setIsReplaceAllProcessorAttributes(false) + .setConnectorAttributes(new HashMap<>()) + .setIsReplaceAllConnectorAttributes(false)); + + showPipeResult = client.showPipe(new TShowPipeReq()).pipeInfoList; + Assert.assertTrue( + showPipeResult.stream().anyMatch((o) -> o.id.equals("p1") && o.state.equals("RUNNING"))); + + extractorAttributes.clear(); + extractorAttributes.put("start-time", "now"); + extractorAttributes.put("end-time", "now"); + extractorAttributes.put("history.start-time", "now"); + extractorAttributes.put("history.end-time", "now"); + client.alterPipe( + new TAlterPipeReq() + .setPipeName("p1") + .setExtractorAttributes(extractorAttributes) + .setIsReplaceAllExtractorAttributes(false) + .setProcessorAttributes(new HashMap<>()) + .setIsReplaceAllProcessorAttributes(false) + .setConnectorAttributes(new HashMap<>()) + .setIsReplaceAllConnectorAttributes(false)); + + Assert.assertEquals( + TSStatusCode.SUCCESS_STATUS.getStatusCode(), client.stopPipe("p1").getCode()); + + showPipeResult = client.showPipe(new TShowPipeReq()).pipeInfoList; + Assert.assertTrue(showPipeResult.stream().anyMatch((o) -> o.id.equals("p1"))); + } + } + + @Test + public void testTreeModeSQLSupportNowFunc() { + final DataNodeWrapper receiverDataNode = receiverEnv.getDataNodeWrapper(0); + + final String receiverIp = receiverDataNode.getIp(); + final int receiverPort = receiverDataNode.getPort(); + + final String p1 = + String.format( + "create pipe p1" + + " with extractor (" + + "'extractor.history.enable'='true'," + + "'source.start-time'='now'," + + "'source.end-time'='now'," + + "'source.history.start-time'='now'," + + "'source.history.end-time'='now')" + + " with connector (" + + "'connector'='iotdb-thrift-connector'," + + "'connector.ip'='%s'," + + "'connector.port'='%s'," + + "'connector.batch.enable'='false')", + receiverIp, receiverPort); + try (final Connection connection = senderEnv.getConnection(); + final Statement statement = connection.createStatement()) { + statement.execute(p1); + } catch (final SQLException e) { + fail(e.getMessage()); + } + + final String p2 = + String.format( + "create pipe p2" + + " with extractor (" + + "'extractor.history.enable'='true'," + + "'start-time'='now'," + + "'end-time'='now'," + + "'history.start-time'='now'," + + "'history.end-time'='now')" + + " with connector (" + + "'connector'='iotdb-thrift-connector'," + + "'connector.ip'='%s'," + + "'connector.port'='%s'," + + "'connector.batch.enable'='false')", + receiverIp, receiverPort); + try (final Connection connection = senderEnv.getConnection(); + final Statement statement = connection.createStatement()) { + statement.execute(p2); + } catch (final SQLException e) { + fail(e.getMessage()); + } + + final String p3 = + String.format( + "create pipe p3" + + " with extractor (" + + "'extractor.history.enable'='true'," + + "'extractor.start-time'='now'," + + "'extractor.end-time'='now'," + + "'extractor.history.start-time'='now'," + + "'extractor.history.end-time'='now')" + + " with connector (" + + "'connector'='iotdb-thrift-connector'," + + "'connector.ip'='%s'," + + "'connector.port'='%s'," + + "'connector.batch.enable'='false')", + receiverIp, receiverPort); + try (final Connection connection = senderEnv.getConnection(); + final Statement statement = connection.createStatement()) { + statement.execute(p3); + } catch (final SQLException e) { + fail(e.getMessage()); + } + + String alterP3 = + "alter pipe p3" + + " modify extractor (" + + "'history.enable'='true'," + + "'start-time'='now'," + + "'end-time'='now'," + + "'history.start-time'='now'," + + "'history.end-time'='now')"; + try (final Connection connection = senderEnv.getConnection(); + final Statement statement = connection.createStatement()) { + statement.execute(alterP3); + } catch (final SQLException e) { + fail(e.getMessage()); + } + + alterP3 = + "alter pipe p3" + + " modify extractor (" + + "'extractor.history.enable'='true'," + + "'extractor.start-time'='now'," + + "'extractor.end-time'='now'," + + "'extractor.history.start-time'='now'," + + "'extractor.history.end-time'='now')"; + try (final Connection connection = senderEnv.getConnection(); + final Statement statement = connection.createStatement()) { + statement.execute(alterP3); + } catch (final SQLException e) { + fail(e.getMessage()); + } + + alterP3 = + "alter pipe p3" + + " modify source (" + + "'extractor.history.enable'='true'," + + "'source.start-time'='now'," + + "'source.end-time'='now'," + + "'source.history.start-time'='now'," + + "'source.history.end-time'='now')"; + try (final Connection connection = senderEnv.getConnection(); + final Statement statement = connection.createStatement()) { + statement.execute(alterP3); + } catch (final SQLException e) { + fail(e.getMessage()); + } + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/AbstractPipeDualManualIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/AbstractPipeDualManualIT.java index ae81f1ffb20a3..a13e8dc152dbe 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/AbstractPipeDualManualIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/AbstractPipeDualManualIT.java @@ -36,27 +36,33 @@ public void setUp() { MultiEnvFactory.createEnv(2); senderEnv = MultiEnvFactory.getEnv(0); receiverEnv = MultiEnvFactory.getEnv(1); + setupConfig(); + senderEnv.initClusterEnvironment(); + receiverEnv.initClusterEnvironment(); + } + protected void setupConfig() { // TODO: delete ratis configurations senderEnv .getConfig() .getCommonConfig() .setAutoCreateSchemaEnabled(false) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); receiverEnv .getConfig() .getCommonConfig() .setAutoCreateSchemaEnabled(false) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); // 10 min, assert that the operations will not time out - senderEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); - receiverEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); - - senderEnv.initClusterEnvironment(); - receiverEnv.initClusterEnvironment(); + senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + receiverEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); } @After diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeInclusionIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeInclusionIT.java index 4515536c2533c..76affe08850fa 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeInclusionIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeInclusionIT.java @@ -180,6 +180,7 @@ public void testAuthInclusionWithPattern() throws Exception { senderEnv, Arrays.asList( "create user `ln_write_user` 'write_pwd'", + "grant manage_database,manage_user,manage_role,use_trigger,use_udf,use_cq,use_pipe on root.** to USER ln_write_user with grant option", "GRANT READ_DATA, WRITE_DATA ON root.** TO USER ln_write_user;"))) { return; } @@ -189,7 +190,16 @@ public void testAuthInclusionWithPattern() throws Exception { "LIST PRIVILEGES OF USER ln_write_user", "ROLE,PATH,PRIVILEGES,GRANT OPTION,", new HashSet<>( - Arrays.asList(",root.ln.**,READ_DATA,false,", ",root.ln.**,WRITE_DATA,false,"))); + Arrays.asList( + ",root.**,MANAGE_USER,true,", + ",root.**,MANAGE_ROLE,true,", + ",root.**,USE_TRIGGER,true,", + ",root.**,USE_UDF,true,", + ",root.**,USE_CQ,true,", + ",root.**,USE_PIPE,true,", + ",root.**,MANAGE_DATABASE,true,", + ",root.ln.**,READ_DATA,false,", + ",root.ln.**,WRITE_DATA,false,"))); } } diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeMetaHistoricalIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeMetaHistoricalIT.java index b6fd01a8efe76..94f629d846cb0 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeMetaHistoricalIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeMetaHistoricalIT.java @@ -74,8 +74,8 @@ public void setUp() { .setDataReplicationFactor(2); // 10 min, assert that the operations will not time out - senderEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); - receiverEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); + senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + receiverEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); senderEnv.initClusterEnvironment(); receiverEnv.initClusterEnvironment(3, 3); @@ -191,6 +191,7 @@ public void testAuthInclusion() throws Exception { "create role `admin`", "grant role `admin` to `thulab`", "grant read on root.** to role `admin`", + "grant manage_database,manage_user,manage_role,use_trigger,use_udf,use_cq,use_pipe on root.** to role `admin`;", "create schema template t1 (temperature FLOAT encoding=RLE, status BOOLEAN encoding=PLAIN compression=SNAPPY)", "set schema template t1 to root.ln.wf01", "create timeseries using schema template on root.ln.wf01.wt01", @@ -240,7 +241,16 @@ public void testAuthInclusion() throws Exception { + ColumnHeaderConstant.GRANT_OPTION + ",", new HashSet<>( - Arrays.asList("admin,root.**,READ_DATA,false,", "admin,root.**,READ_SCHEMA,false,"))); + Arrays.asList( + "admin,root.**,MANAGE_USER,false,", + "admin,root.**,MANAGE_ROLE,false,", + "admin,root.**,USE_TRIGGER,false,", + "admin,root.**,USE_UDF,false,", + "admin,root.**,USE_CQ,false,", + "admin,root.**,USE_PIPE,false,", + "admin,root.**,MANAGE_DATABASE,false,", + "admin,root.**,READ_DATA,false,", + "admin,root.**,READ_SCHEMA,false,"))); TestUtils.assertDataAlwaysOnEnv( receiverEnv, @@ -261,4 +271,60 @@ public void testAuthInclusion() throws Exception { new HashSet<>(Arrays.asList("admin,", "test,"))); } } + + @Test + public void testTimeSeriesInclusion() throws Exception { + final DataNodeWrapper receiverDataNode = receiverEnv.getDataNodeWrapper(0); + + final String receiverIp = receiverDataNode.getIp(); + final int receiverPort = receiverDataNode.getPort(); + + try (final SyncConfigNodeIServiceClient client = + (SyncConfigNodeIServiceClient) senderEnv.getLeaderConfigNodeConnection()) { + + // Do not fail if the failure has nothing to do with pipe + // Because the failures will randomly generate due to resource limitation + if (!TestUtils.tryExecuteNonQueriesWithRetry( + senderEnv, + Arrays.asList( + "create database root.sg", + "create timeseries root.sg.a.b int32", + "create aligned timeseries root.sg.`apache|timecho-tag-attr`.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4))"))) { + return; + } + + final Map extractorAttributes = new HashMap<>(); + final Map processorAttributes = new HashMap<>(); + final Map connectorAttributes = new HashMap<>(); + + extractorAttributes.put("extractor.inclusion", "schema"); + + connectorAttributes.put("connector", "iotdb-thrift-connector"); + connectorAttributes.put("connector.ip", receiverIp); + connectorAttributes.put("connector.port", Integer.toString(receiverPort)); + connectorAttributes.put("connector.exception.conflict.resolve-strategy", "retry"); + connectorAttributes.put("connector.exception.conflict.retry-max-time-seconds", "-1"); + + final TSStatus status = + client.createPipe( + new TCreatePipeReq("testPipe", connectorAttributes) + .setExtractorAttributes(extractorAttributes) + .setProcessorAttributes(processorAttributes)); + + Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); + + Assert.assertEquals( + TSStatusCode.SUCCESS_STATUS.getStatusCode(), client.startPipe("testPipe").getCode()); + + TestUtils.assertDataEventuallyOnEnv( + receiverEnv, + "show timeseries", + "Timeseries,Alias,Database,DataType,Encoding,Compression,Tags,Attributes,Deadband,DeadbandParameters,ViewType,", + new HashSet<>( + Arrays.asList( + "root.sg.a.b,null,root.sg,INT32,TS_2DIFF,LZ4,null,null,null,null,BASE,", + "root.sg.`apache|timecho-tag-attr`.d1.s1,null,root.sg,INT32,TS_2DIFF,LZ4,{\"tag1\":\"v1\",\"tag2\":\"v2\"},{\"attr2\":\"v2\",\"attr1\":\"v1\"},null,null,BASE,", + "root.sg.`apache|timecho-tag-attr`.d1.s2,null,root.sg,DOUBLE,GORILLA,LZ4,{\"tag4\":\"v4\",\"tag3\":\"v3\"},{\"attr4\":\"v4\",\"attr3\":\"v3\"},null,null,BASE,"))); + } + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeMetaLeaderChangeIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeMetaLeaderChangeIT.java index de386f7f3dc26..c03f56bad4ee3 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeMetaLeaderChangeIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeMetaLeaderChangeIT.java @@ -59,8 +59,8 @@ public void setUp() { .setSchemaReplicationFactor(3); // 10 min, assert that the operations will not time out - senderEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); - receiverEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); + senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + receiverEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); senderEnv.initClusterEnvironment(3, 3, 180); receiverEnv.initClusterEnvironment(); diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipePermissionIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipePermissionIT.java new file mode 100644 index 0000000000000..fb1b4121e2bb7 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipePermissionIT.java @@ -0,0 +1,232 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.pipe.it.manual; + +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.client.sync.SyncConfigNodeIServiceClient; +import org.apache.iotdb.confignode.rpc.thrift.TCreatePipeReq; +import org.apache.iotdb.consensus.ConsensusFactory; +import org.apache.iotdb.db.it.utils.TestUtils; +import org.apache.iotdb.it.env.MultiEnvFactory; +import org.apache.iotdb.it.env.cluster.node.DataNodeWrapper; +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2ManualCreateSchema; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import static org.junit.jupiter.api.Assertions.fail; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2ManualCreateSchema.class}) +public class IoTDBPipePermissionIT extends AbstractPipeDualManualIT { + @Override + @Before + public void setUp() { + MultiEnvFactory.createEnv(2); + senderEnv = MultiEnvFactory.getEnv(0); + receiverEnv = MultiEnvFactory.getEnv(1); + + // TODO: delete ratis configurations + senderEnv + .getConfig() + .getCommonConfig() + .setAutoCreateSchemaEnabled(false) + .setDefaultSchemaRegionGroupNumPerDatabase(1) + .setTimestampPrecision("ms") + .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + receiverEnv + .getConfig() + .getCommonConfig() + .setAutoCreateSchemaEnabled(false) + .setTimestampPrecision("ms") + .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS) + .setSchemaReplicationFactor(3) + .setDataReplicationFactor(2); + + // 10 min, assert that the operations will not time out + senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + receiverEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + + senderEnv.initClusterEnvironment(); + receiverEnv.initClusterEnvironment(3, 3); + } + + @Test + public void testWithSyncConnector() throws Exception { + testWithConnector("iotdb-thrift-sync-connector"); + } + + @Test + public void testWithAsyncConnector() throws Exception { + testWithConnector("iotdb-thrift-async-connector"); + } + + private void testWithConnector(final String connector) throws Exception { + if (!TestUtils.tryExecuteNonQueriesWithRetry( + receiverEnv, + Arrays.asList( + "create user `thulab` 'passwd'", + "create role `admin`", + "grant role `admin` to `thulab`", + "grant WRITE, READ, MANAGE_DATABASE, MANAGE_USER on root.** to role `admin`"))) { + return; + } + + final DataNodeWrapper receiverDataNode = receiverEnv.getDataNodeWrapper(0); + final String receiverIp = receiverDataNode.getIp(); + final int receiverPort = receiverDataNode.getPort(); + + try (final SyncConfigNodeIServiceClient client = + (SyncConfigNodeIServiceClient) senderEnv.getLeaderConfigNodeConnection()) { + if (!TestUtils.tryExecuteNonQueriesWithRetry( + senderEnv, + Arrays.asList( + "create user user 'passwd'", + "create timeseries root.ln.wf02.wt01.temperature with datatype=INT64,encoding=PLAIN", + "create timeseries root.ln.wf02.wt01.status with datatype=BOOLEAN,encoding=PLAIN", + "insert into root.ln.wf02.wt01(time, temperature, status) values (1800000000000, 23, true)"))) { + fail(); + return; + } + + final Map extractorAttributes = new HashMap<>(); + final Map processorAttributes = new HashMap<>(); + final Map connectorAttributes = new HashMap<>(); + + extractorAttributes.put("extractor.inclusion", "all"); + + connectorAttributes.put("connector", connector); + connectorAttributes.put("connector.ip", receiverIp); + connectorAttributes.put("connector.port", Integer.toString(receiverPort)); + connectorAttributes.put("connector.username", "thulab"); + connectorAttributes.put("connector.password", "passwd"); + + final TSStatus status = + client.createPipe( + new TCreatePipeReq("testPipe", connectorAttributes) + .setExtractorAttributes(extractorAttributes) + .setProcessorAttributes(processorAttributes)); + + Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); + Assert.assertEquals( + TSStatusCode.SUCCESS_STATUS.getStatusCode(), client.startPipe("testPipe").getCode()); + + TestUtils.assertDataEventuallyOnEnv( + receiverEnv, + "list user", + "User,", + new HashSet<>(Arrays.asList("root,", "user,", "thulab,"))); + final Set expectedResSet = new HashSet<>(); + expectedResSet.add( + "root.ln.wf02.wt01.temperature,null,root.ln,INT64,PLAIN,LZ4,null,null,null,null,BASE,"); + expectedResSet.add( + "root.ln.wf02.wt01.status,null,root.ln,BOOLEAN,PLAIN,LZ4,null,null,null,null,BASE,"); + TestUtils.assertDataEventuallyOnEnv( + receiverEnv, + "show timeseries", + "Timeseries,Alias,Database,DataType,Encoding,Compression,Tags,Attributes,Deadband,DeadbandParameters,ViewType,", + expectedResSet); + expectedResSet.clear(); + + TestUtils.assertDataEventuallyOnEnv( + receiverEnv, + "select * from root.**", + "Time,root.ln.wf02.wt01.temperature,root.ln.wf02.wt01.status,", + Collections.singleton("1800000000000,23,true,")); + } + } + + @Test + public void testNoPermission() throws Exception { + if (!TestUtils.tryExecuteNonQueriesWithRetry( + receiverEnv, + Arrays.asList( + "create user `thulab` 'passwd'", + "create role `admin`", + "grant role `admin` to `thulab`", + "grant READ, MANAGE_DATABASE on root.ln.** to role `admin`"))) { + return; + } + + final DataNodeWrapper receiverDataNode = receiverEnv.getDataNodeWrapper(0); + final String receiverIp = receiverDataNode.getIp(); + final int receiverPort = receiverDataNode.getPort(); + + try (final SyncConfigNodeIServiceClient client = + (SyncConfigNodeIServiceClient) senderEnv.getLeaderConfigNodeConnection()) { + if (!TestUtils.tryExecuteNonQueriesWithRetry( + senderEnv, + Arrays.asList( + "create user someUser 'passwd'", + "create database root.noPermission", + "create timeseries root.ln.wf02.wt01.status with datatype=BOOLEAN,encoding=PLAIN"))) { + fail(); + return; + } + + final Map extractorAttributes = new HashMap<>(); + final Map processorAttributes = new HashMap<>(); + final Map connectorAttributes = new HashMap<>(); + + extractorAttributes.put("extractor.inclusion", "all"); + + connectorAttributes.put("connector", "iotdb-thrift-async-connector"); + connectorAttributes.put("connector.ip", receiverIp); + connectorAttributes.put("connector.port", Integer.toString(receiverPort)); + connectorAttributes.put("connector.username", "thulab"); + connectorAttributes.put("connector.password", "passwd"); + + final TSStatus status = + client.createPipe( + new TCreatePipeReq("testPipe", connectorAttributes) + .setExtractorAttributes(extractorAttributes) + .setProcessorAttributes(processorAttributes)); + + Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); + Assert.assertEquals( + TSStatusCode.SUCCESS_STATUS.getStatusCode(), client.startPipe("testPipe").getCode()); + + TestUtils.assertDataEventuallyOnEnv( + receiverEnv, "count databases", "count,", Collections.singleton("1,")); + TestUtils.assertDataAlwaysOnEnv( + receiverEnv, + "show timeseries", + "Timeseries,Alias,Database,DataType,Encoding,Compression,Tags,Attributes,Deadband,DeadbandParameters,ViewType,", + Collections.emptySet()); + TestUtils.assertDataAlwaysOnEnv( + receiverEnv, "list user", "User,", Collections.singleton("root,")); + } + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeReqAutoSliceIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeReqAutoSliceIT.java new file mode 100644 index 0000000000000..aa2fd4a356ad6 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeReqAutoSliceIT.java @@ -0,0 +1,482 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.pipe.it.manual; + +import org.apache.iotdb.commons.utils.function.CheckedTriConsumer; +import org.apache.iotdb.db.it.utils.TestUtils; +import org.apache.iotdb.isession.ISession; +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2ManualCreateSchema; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; + +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.utils.BitMap; +import org.apache.tsfile.utils.Pair; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.IMeasurementSchema; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.Assert; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Random; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2ManualCreateSchema.class}) +public class IoTDBPipeReqAutoSliceIT extends AbstractPipeDualManualIT { + private static final int generateDataSize = 10; + + @Override + protected void setupConfig() { + super.setupConfig(); + senderEnv.getConfig().getCommonConfig().setPipeConnectorRequestSliceThresholdBytes(4); + receiverEnv.getConfig().getCommonConfig().setPipeConnectorRequestSliceThresholdBytes(4); + } + + @Test + public void insertTablet() { + prepareReqAutoSliceTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + senderSession.insertTablet(tablet); + }, + false); + } + + @Ignore + @Test + public void insertTabletReceiveByTsFile() { + prepareReqAutoSliceTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + senderSession.insertTablet(tablet); + }, + true); + } + + @Ignore + @Test + public void insertAlignedTablet() { + prepareReqAutoSliceTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + senderSession.insertAlignedTablet(tablet); + }, + false); + } + + @Ignore + @Test + public void insertAlignedTabletReceiveByTsFile() { + prepareReqAutoSliceTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + senderSession.insertAlignedTablet(tablet); + }, + true); + } + + @Ignore + @Test + public void insertRecordsReceiveByTsFile() { + prepareReqAutoSliceTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertRecordForTable(tablet); + senderSession.insertRecords( + getDeviceID(tablet), timestamps, pair.left, pair.right, values); + }, + true); + } + + @Ignore + @Test + public void insertRecord() { + prepareReqAutoSliceTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertRecordForTable(tablet); + for (int i = 0; i < values.size(); i++) { + senderSession.insertRecord( + tablet.deviceId, + timestamps.get(i), + pair.left.get(i), + pair.right.get(i), + values.get(i).toArray()); + } + }, + false); + } + + @Ignore + @Test + public void insertRecordReceiveByTsFile() { + prepareReqAutoSliceTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertRecordForTable(tablet); + for (int i = 0; i < values.size(); i++) { + senderSession.insertRecord( + tablet.deviceId, + timestamps.get(i), + pair.left.get(i), + pair.right.get(i), + values.get(i).toArray()); + } + }, + true); + } + + @Ignore + @Test + public void insertAlignedRecord() { + prepareReqAutoSliceTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertRecordForTable(tablet); + for (int i = 0; i < values.size(); i++) { + senderSession.insertAlignedRecord( + tablet.deviceId, + timestamps.get(i), + pair.left.get(i), + pair.right.get(i), + values.get(i)); + } + }, + false); + } + + @Ignore + @Test + public void insertAlignedRecordReceiveByTsFile() { + prepareReqAutoSliceTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertRecordForTable(tablet); + for (int i = 0; i < values.size(); i++) { + senderSession.insertAlignedRecord( + tablet.deviceId, + timestamps.get(i), + pair.left.get(i), + pair.right.get(i), + values.get(i)); + } + }, + true); + } + + @Ignore + @Test + public void insertRecords() { + prepareReqAutoSliceTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertRecordForTable(tablet); + senderSession.insertRecords( + getDeviceID(tablet), timestamps, pair.left, pair.right, values); + }, + false); + } + + @Ignore + @Test + public void insertAlignedRecords() { + prepareReqAutoSliceTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertRecordForTable(tablet); + senderSession.insertAlignedRecords( + getDeviceID(tablet), timestamps, pair.left, pair.right, values); + }, + false); + } + + @Ignore + @Test + public void insertAlignedRecordsReceiveByTsFile() { + prepareReqAutoSliceTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertRecordForTable(tablet); + senderSession.insertAlignedRecords( + getDeviceID(tablet), timestamps, pair.left, pair.right, values); + }, + true); + } + + @Ignore + @Test + public void insertStringRecordsOfOneDevice() { + prepareReqAutoSliceTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertStrRecordForTable(tablet); + senderSession.insertStringRecordsOfOneDevice( + tablet.deviceId, timestamps, pair.left, values); + }, + false); + } + + @Ignore + @Test + public void insertStringRecordsOfOneDeviceReceiveByTsFile() { + prepareReqAutoSliceTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertStrRecordForTable(tablet); + senderSession.insertStringRecordsOfOneDevice( + tablet.deviceId, timestamps, pair.left, values); + }, + true); + } + + @Ignore + @Test + public void insertAlignedStringRecordsOfOneDevice() { + prepareReqAutoSliceTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertStrRecordForTable(tablet); + senderSession.insertAlignedStringRecordsOfOneDevice( + tablet.deviceId, timestamps, pair.left, values); + }, + false); + } + + @Ignore + @Test + public void insertAlignedStringRecordsOfOneDeviceReceiveByTsFile() { + prepareReqAutoSliceTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertStrRecordForTable(tablet); + senderSession.insertAlignedStringRecordsOfOneDevice( + tablet.deviceId, timestamps, pair.left, values); + }, + true); + } + + private void prepareReqAutoSliceTest( + CheckedTriConsumer consumer, boolean isTsFile) { + Tablet tablet = createTablet(); + createTimeSeries(); + try (ISession senderSession = senderEnv.getSessionConnection(); + ISession receiverSession = receiverEnv.getSessionConnection()) { + if (isTsFile) { + consumer.accept(senderSession, receiverSession, tablet); + senderSession.executeNonQueryStatement("flush"); + Thread.sleep(2000); + createPipe(senderSession, true); + } else { + createPipe(senderSession, false); + Thread.sleep(2000); + consumer.accept(senderSession, receiverSession, tablet); + senderSession.executeNonQueryStatement("flush"); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + verify(tablet); + } + + private void createPipe(ISession session, boolean isTsFile) + throws IoTDBConnectionException, StatementExecutionException { + session.executeNonQueryStatement( + String.format( + "create pipe test" + + " with source ('source'='iotdb-source','source.path'='root.test.**')" + + " with sink ('sink'='iotdb-thrift-sync-sink','node-urls'='%s:%s','batch.enable'='false','sink.format'='%s')", + receiverEnv.getIP(), receiverEnv.getPort(), isTsFile ? "tsfile" : "tablet")); + } + + private int[] createTestDataForInt32() { + int[] data = new int[generateDataSize]; + Random random = new Random(); + for (int i = 0; i < generateDataSize; i++) { + data[i] = random.nextInt(); + } + return data; + } + + private long[] createTestDataForInt64() { + long[] data = new long[generateDataSize]; + long time = System.currentTimeMillis(); + for (int i = 0; i < generateDataSize; i++) { + data[i] = time + i; + } + return data; + } + + private void verify(Tablet tablet) { + HashSet set = new HashSet<>(); + for (int i = 0; i < generateDataSize; i++) { + set.add( + String.format( + "%d,%d,%d,", + tablet.timestamps[i], ((int[]) tablet.values[0])[i], ((int[]) tablet.values[1])[i])); + } + TestUtils.assertDataEventuallyOnEnv( + receiverEnv, + "select * from root.test.** ORDER BY time ASC", + "Time,root.test.db.temperature,root.test.db.status,", + set, + 20); + } + + private void createTimeSeries() { + List timeSeriesCreation = + Arrays.asList( + "create timeseries root.test.db.status with datatype=INT32,encoding=PLAIN", + "create timeseries root.test.db.temperature with datatype=INT32,encoding=PLAIN"); + TestUtils.tryExecuteNonQueriesWithRetry(senderEnv, timeSeriesCreation); + TestUtils.tryExecuteNonQueriesWithRetry(receiverEnv, timeSeriesCreation); + } + + private Tablet createTablet() { + long[] timestamp = createTestDataForInt64(); + int[] temperature = createTestDataForInt32(); + int[] status = createTestDataForInt32(); + + Object[] objects = new Object[2]; + objects[0] = temperature; + objects[1] = status; + + List measurementSchemas = new ArrayList<>(2); + measurementSchemas.add(new MeasurementSchema("temperature", TSDataType.INT32)); + measurementSchemas.add(new MeasurementSchema("status", TSDataType.INT32)); + + BitMap[] bitMaps = new BitMap[2]; + for (int i = 0; i < bitMaps.length; i++) { + bitMaps[i] = new BitMap(generateDataSize); + } + + return new Tablet( + "root.test.db", measurementSchemas, timestamp, objects, bitMaps, generateDataSize); + } + + private List getTimestampList(Tablet tablet) { + long[] timestamps = tablet.timestamps; + List data = new ArrayList<>(timestamps.length); + for (long timestamp : timestamps) { + data.add(timestamp); + } + return data; + } + + private Pair>, List>> getMeasurementSchemasAndType( + Tablet tablet) { + List> schemaData = new ArrayList<>(tablet.rowSize); + List> typeData = new ArrayList<>(tablet.rowSize); + List measurementSchemas = new ArrayList<>(tablet.getSchemas().size()); + List types = new ArrayList<>(tablet.rowSize); + for (IMeasurementSchema measurementSchema : tablet.getSchemas()) { + measurementSchemas.add(measurementSchema.getMeasurementId()); + types.add(measurementSchema.getType()); + } + + for (int i = 0; i < tablet.rowSize; i++) { + schemaData.add(measurementSchemas); + typeData.add(types); + } + + return new Pair<>(schemaData, typeData); + } + + private List getDeviceID(Tablet tablet) { + List data = new ArrayList<>(tablet.rowSize); + for (int i = 0; i < tablet.rowSize; i++) { + data.add(tablet.deviceId); + } + return data; + } + + private List> generateTabletInsertRecordForTable(final Tablet tablet) { + List> insertRecords = new ArrayList<>(tablet.rowSize); + final List schemas = tablet.getSchemas(); + final Object[] values = tablet.values; + for (int i = 0; i < tablet.rowSize; i++) { + List insertRecord = new ArrayList<>(); + for (int j = 0; j < schemas.size(); j++) { + switch (schemas.get(j).getType()) { + case INT64: + case TIMESTAMP: + insertRecord.add(((long[]) values[j])[i]); + break; + case INT32: + insertRecord.add(((int[]) values[j])[i]); + break; + } + } + insertRecords.add(insertRecord); + } + + return insertRecords; + } + + private List> generateTabletInsertStrRecordForTable(Tablet tablet) { + List> insertRecords = new ArrayList<>(tablet.rowSize); + final List schemas = tablet.getSchemas(); + final Object[] values = tablet.values; + for (int i = 0; i < tablet.rowSize; i++) { + List insertRecord = new ArrayList<>(); + for (int j = 0; j < schemas.size(); j++) { + switch (schemas.get(j).getType()) { + case INT64: + insertRecord.add(String.valueOf(((long[]) values[j])[i])); + break; + case INT32: + insertRecord.add(String.valueOf(((int[]) values[j])[i])); + break; + } + } + insertRecords.add(insertRecord); + } + + return insertRecords; + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeTypeConversionISessionIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeTypeConversionISessionIT.java new file mode 100644 index 0000000000000..c4198e1a9fe70 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeTypeConversionISessionIT.java @@ -0,0 +1,815 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.pipe.it.manual; + +import org.apache.iotdb.commons.utils.function.CheckedTriConsumer; +import org.apache.iotdb.db.it.utils.TestUtils; +import org.apache.iotdb.db.pipe.receiver.transform.converter.ValueConverter; +import org.apache.iotdb.isession.ISession; +import org.apache.iotdb.isession.SessionDataSet; +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2ManualCreateSchema; +import org.apache.iotdb.itbase.env.BaseEnv; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.RpcUtils; +import org.apache.iotdb.rpc.StatementExecutionException; + +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.tsfile.common.conf.TSFileConfig; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.read.common.Field; +import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.utils.Binary; +import org.apache.tsfile.utils.BitMap; +import org.apache.tsfile.utils.BytesUtils; +import org.apache.tsfile.utils.DateUtils; +import org.apache.tsfile.utils.Pair; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.IMeasurementSchema; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.time.LocalDate; +import java.time.ZoneOffset; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.Random; +import java.util.concurrent.TimeUnit; + +import static org.awaitility.Awaitility.await; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2ManualCreateSchema.class}) +public class IoTDBPipeTypeConversionISessionIT extends AbstractPipeDualManualIT { + private static final int generateDataSize = 100; + + @Test + public void insertTablet() { + prepareTypeConversionTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + senderSession.insertTablet(tablet); + }, + false); + } + + @Test + @Ignore("The receiver conversion is currently banned, will ignore conflict") + public void insertTabletReceiveByTsFile() { + prepareTypeConversionTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + senderSession.insertTablet(tablet); + }, + true); + } + + @Test + public void insertAlignedTablet() { + prepareTypeConversionTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + senderSession.insertAlignedTablet(tablet); + }, + false); + } + + @Test + @Ignore("The receiver conversion is currently banned, will ignore conflict") + public void insertAlignedTabletReceiveByTsFile() { + prepareTypeConversionTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + senderSession.insertAlignedTablet(tablet); + }, + true); + } + + @Test + @Ignore("The receiver conversion is currently banned, will ignore conflict") + public void insertRecordsReceiveByTsFile() { + prepareTypeConversionTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertRecordForTable(tablet); + senderSession.insertRecords( + getDeviceID(tablet), timestamps, pair.left, pair.right, values); + }, + true); + } + + @Test + public void insertRecord() { + prepareTypeConversionTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertRecordForTable(tablet); + for (int i = 0; i < values.size(); i++) { + senderSession.insertRecord( + tablet.deviceId, + timestamps.get(i), + pair.left.get(i), + pair.right.get(i), + values.get(i).toArray()); + } + }, + false); + } + + @Test + @Ignore("The receiver conversion is currently banned, will ignore conflict") + public void insertRecordReceiveByTsFile() { + prepareTypeConversionTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertRecordForTable(tablet); + for (int i = 0; i < values.size(); i++) { + senderSession.insertRecord( + tablet.deviceId, + timestamps.get(i), + pair.left.get(i), + pair.right.get(i), + values.get(i).toArray()); + } + }, + true); + } + + @Test + public void insertAlignedRecord() { + prepareTypeConversionTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertRecordForTable(tablet); + for (int i = 0; i < values.size(); i++) { + senderSession.insertAlignedRecord( + tablet.deviceId, + timestamps.get(i), + pair.left.get(i), + pair.right.get(i), + values.get(i)); + } + }, + false); + } + + @Test + @Ignore("The receiver conversion is currently banned, will ignore conflict") + public void insertAlignedRecordReceiveByTsFile() { + prepareTypeConversionTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertRecordForTable(tablet); + for (int i = 0; i < values.size(); i++) { + senderSession.insertAlignedRecord( + tablet.deviceId, + timestamps.get(i), + pair.left.get(i), + pair.right.get(i), + values.get(i)); + } + }, + true); + } + + @Test + public void insertRecords() { + prepareTypeConversionTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertRecordForTable(tablet); + senderSession.insertRecords( + getDeviceID(tablet), timestamps, pair.left, pair.right, values); + }, + false); + } + + @Test + public void insertAlignedRecords() { + prepareTypeConversionTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertRecordForTable(tablet); + senderSession.insertAlignedRecords( + getDeviceID(tablet), timestamps, pair.left, pair.right, values); + }, + false); + } + + @Test + @Ignore("The receiver conversion is currently banned, will ignore conflict") + public void insertAlignedRecordsReceiveByTsFile() { + prepareTypeConversionTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertRecordForTable(tablet); + senderSession.insertAlignedRecords( + getDeviceID(tablet), timestamps, pair.left, pair.right, values); + }, + true); + } + + @Test + public void insertStringRecordsOfOneDevice() { + prepareTypeConversionTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertStrRecordForTable(tablet); + senderSession.insertStringRecordsOfOneDevice( + tablet.deviceId, timestamps, pair.left, values); + }, + false); + } + + @Test + @Ignore("The receiver conversion is currently banned, will ignore conflict") + public void insertStringRecordsOfOneDeviceReceiveByTsFile() { + prepareTypeConversionTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertStrRecordForTable(tablet); + senderSession.insertStringRecordsOfOneDevice( + tablet.deviceId, timestamps, pair.left, values); + }, + true); + } + + @Test + public void insertAlignedStringRecordsOfOneDevice() { + prepareTypeConversionTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertStrRecordForTable(tablet); + senderSession.insertAlignedStringRecordsOfOneDevice( + tablet.deviceId, timestamps, pair.left, values); + }, + false); + } + + @Test + @Ignore("The receiver conversion is currently banned, will ignore conflict") + public void insertAlignedStringRecordsOfOneDeviceReceiveByTsFile() { + prepareTypeConversionTest( + (ISession senderSession, ISession receiverSession, Tablet tablet) -> { + List timestamps = getTimestampList(tablet); + Pair>, List>> pair = + getMeasurementSchemasAndType(tablet); + List> values = generateTabletInsertStrRecordForTable(tablet); + senderSession.insertAlignedStringRecordsOfOneDevice( + tablet.deviceId, timestamps, pair.left, values); + }, + true); + } + + private SessionDataSet query( + ISession session, List measurementSchemas, String deviceId) + throws IoTDBConnectionException, StatementExecutionException { + String sql = "select "; + StringBuffer param = new StringBuffer(); + for (IMeasurementSchema schema : measurementSchemas) { + param.append(schema.getMeasurementId()); + param.append(','); + } + sql = sql + param.substring(0, param.length() - 1); + sql = sql + " from " + deviceId + " ORDER BY time ASC"; + return session.executeQueryStatement(sql); + } + + private void prepareTypeConversionTest( + CheckedTriConsumer consumer, boolean isTsFile) { + List> measurementSchemas = + generateMeasurementSchemas(); + + // Generate createTimeSeries in sender and receiver + String uuid = RandomStringUtils.random(8, true, false); + for (Pair pair : measurementSchemas) { + createTimeSeries( + uuid.toString(), pair.left.getMeasurementId(), pair.left.getType().name(), senderEnv); + createTimeSeries( + uuid.toString(), pair.right.getMeasurementId(), pair.right.getType().name(), receiverEnv); + } + + try (ISession senderSession = senderEnv.getSessionConnection(); + ISession receiverSession = receiverEnv.getSessionConnection()) { + Tablet tablet = generateTabletAndMeasurementSchema(measurementSchemas, "root.test." + uuid); + if (isTsFile) { + // Send TsFile data to receiver + consumer.accept(senderSession, receiverSession, tablet); + Thread.sleep(2000); + createDataPipe(uuid, true); + senderSession.executeNonQueryStatement("flush"); + } else { + // Send Tablet data to receiver + // Write once to create data regions, guarantee that no any tsFiles will be sent + consumer.accept(senderSession, receiverSession, tablet); + createDataPipe(uuid, false); + Thread.sleep(2000); + // The actual implementation logic of inserting data + consumer.accept(senderSession, receiverSession, tablet); + senderSession.executeNonQueryStatement("flush"); + } + + // Verify receiver data + long timeoutSeconds = 600; + List> expectedValues = + generateTabletResultSetForTable(tablet, measurementSchemas); + await() + .pollInSameThread() + .pollDelay(1L, TimeUnit.SECONDS) + .pollInterval(1L, TimeUnit.SECONDS) + .atMost(timeoutSeconds, TimeUnit.SECONDS) + .untilAsserted( + () -> { + try { + validateResultSet( + query(receiverSession, tablet.getSchemas(), tablet.deviceId), + expectedValues, + tablet.timestamps); + } catch (Exception e) { + fail(); + } + }); + senderSession.close(); + receiverSession.close(); + tablet.reset(); + } catch (Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + } + + private void createTimeSeries(String diff, String measurementID, String dataType, BaseEnv env) { + String timeSeriesCreation = + String.format( + "create timeseries root.test.%s.%s with datatype=%s,encoding=PLAIN", + diff, measurementID, dataType); + TestUtils.tryExecuteNonQueriesWithRetry(env, Collections.singletonList(timeSeriesCreation)); + } + + private void createDataPipe(String diff, boolean isTSFile) { + String sql = + String.format( + "create pipe test%s" + + " with source ('source'='iotdb-source','source.path'='root.test.**','realtime.mode'='%s','realtime.enable'='%s','history.enable'='%s')" + + " with processor ('processor'='do-nothing-processor')" + + " with sink ('node-urls'='%s:%s','batch.enable'='false','sink.format'='%s')", + diff, + isTSFile ? "file" : "forced-log", + !isTSFile, + isTSFile, + receiverEnv.getIP(), + receiverEnv.getPort(), + isTSFile ? "tsfile" : "tablet"); + TestUtils.tryExecuteNonQueriesWithRetry(senderEnv, Collections.singletonList(sql)); + } + + private void validateResultSet( + SessionDataSet dataSet, List> values, long[] timestamps) + throws IoTDBConnectionException, StatementExecutionException { + int index = 0; + while (dataSet.hasNext()) { + RowRecord record = dataSet.next(); + List fields = record.getFields(); + + assertEquals(record.getTimestamp(), timestamps[index]); + List rowValues = values.get(index++); + for (int i = 0; i < fields.size(); i++) { + Field field = fields.get(i); + switch (field.getDataType()) { + case INT64: + case TIMESTAMP: + assertEquals(field.getLongV(), (long) rowValues.get(i)); + break; + case DATE: + assertEquals(field.getDateV(), rowValues.get(i)); + break; + case BLOB: + assertEquals(field.getBinaryV(), rowValues.get(i)); + break; + case TEXT: + case STRING: + assertEquals(field.getStringValue(), rowValues.get(i)); + break; + case INT32: + assertEquals(field.getIntV(), (int) rowValues.get(i)); + break; + case DOUBLE: + assertEquals(0, Double.compare(field.getDoubleV(), (double) rowValues.get(i))); + break; + case FLOAT: + assertEquals(0, Float.compare(field.getFloatV(), (float) rowValues.get(i))); + break; + } + } + } + assertEquals(values.size(), index); + } + + private boolean[] createTestDataForBoolean() { + boolean[] data = new boolean[generateDataSize]; + Random random = new Random(); + for (int i = 0; i < data.length; i++) { + data[i] = random.nextBoolean(); + } + return data; + } + + private int[] createTestDataForInt32() { + int[] data = new int[generateDataSize]; + Random random = new Random(); + for (int i = 0; i < data.length; i++) { + data[i] = random.nextInt(); + } + return data; + } + + private long[] createTestDataForInt64() { + long[] data = new long[generateDataSize]; + Random random = new Random(); + for (int i = 0; i < data.length; i++) { + data[i] = random.nextLong(); + } + return data; + } + + private float[] createTestDataForFloat() { + float[] data = new float[generateDataSize]; + Random random = new Random(); + for (int i = 0; i < data.length; i++) { + data[i] = random.nextFloat(); + } + return data; + } + + private double[] createTestDataForDouble() { + double[] data = new double[generateDataSize]; + Random random = new Random(); + for (int i = 0; i < data.length; i++) { + data[i] = random.nextDouble(); + } + return data; + } + + private long[] createTestDataForTimestamp() { + long[] data = new long[generateDataSize]; + long time = new Date().getTime(); + for (int i = 0; i < data.length; i++) { + data[i] = time + i; + } + return data; + } + + private LocalDate[] createTestDataForDate() { + LocalDate[] data = new LocalDate[generateDataSize]; + int year = 2023; + int month = 1; + int day = 1; + for (int i = 0; i < data.length; i++) { + data[i] = DateUtils.parseIntToLocalDate(year * 10000 + (month * 100) + day); + // update + day++; + if (day > 28) { + day = 1; + month++; + if (month > 12) { + month = 1; + year++; + } + } + } + return data; + } + + private Binary[] createTestDataForString() { + String[] stringData = { + "Hello", + "Hello World!", + "This is a test.", + "IoTDB Hello World!!!!", + "IoTDB is an excellent time series database!!!!!!!!!", + "12345678910!!!!!!!!", + "123456", + "1234567.123213", + "21232131.21", + "enable = true", + "true", + "false", + "12345678910", + "123231232132131233213123123123123123131312", + "123231232132131233213123123123123123131312.212312321312312", + }; + Binary[] data = new Binary[generateDataSize]; + for (int i = 0; i < data.length; i++) { + data[i] = + new Binary(stringData[(i % stringData.length)].getBytes(TSFileConfig.STRING_CHARSET)); + } + return data; + } + + private List getTimestampList(Tablet tablet) { + long[] timestamps = tablet.timestamps; + List data = new ArrayList<>(timestamps.length); + for (long timestamp : timestamps) { + data.add(timestamp); + } + return data; + } + + private Pair>, List>> getMeasurementSchemasAndType( + Tablet tablet) { + List> schemaData = new ArrayList<>(tablet.rowSize); + List> typeData = new ArrayList<>(tablet.rowSize); + List measurementSchemas = new ArrayList<>(tablet.getSchemas().size()); + List types = new ArrayList<>(tablet.rowSize); + for (IMeasurementSchema measurementSchema : tablet.getSchemas()) { + measurementSchemas.add(measurementSchema.getMeasurementId()); + types.add(measurementSchema.getType()); + } + + for (int i = 0; i < tablet.rowSize; i++) { + schemaData.add(measurementSchemas); + typeData.add(types); + } + + return new Pair<>(schemaData, typeData); + } + + private List getDeviceID(Tablet tablet) { + List data = new ArrayList<>(tablet.rowSize); + for (int i = 0; i < tablet.rowSize; i++) { + data.add(tablet.deviceId); + } + return data; + } + + private List> generateTabletResultSetForTable( + final Tablet tablet, List> pairs) { + List> insertRecords = new ArrayList<>(tablet.rowSize); + final List schemas = tablet.getSchemas(); + final Object[] values = tablet.values; + for (int i = 0; i < tablet.rowSize; i++) { + List insertRecord = new ArrayList<>(); + for (int j = 0; j < schemas.size(); j++) { + TSDataType sourceType = pairs.get(j).left.getType(); + TSDataType targetType = pairs.get(j).right.getType(); + Object value = null; + switch (sourceType) { + case INT64: + case TIMESTAMP: + value = ValueConverter.convert(sourceType, targetType, ((long[]) values[j])[i]); + insertRecord.add(convert(value, targetType)); + break; + case INT32: + value = ValueConverter.convert(sourceType, targetType, ((int[]) values[j])[i]); + insertRecord.add(convert(value, targetType)); + break; + case DOUBLE: + value = ValueConverter.convert(sourceType, targetType, ((double[]) values[j])[i]); + insertRecord.add(convert(value, targetType)); + break; + case FLOAT: + value = ValueConverter.convert(sourceType, targetType, ((float[]) values[j])[i]); + insertRecord.add(convert(value, targetType)); + break; + case DATE: + value = + ValueConverter.convert( + sourceType, + targetType, + DateUtils.parseDateExpressionToInt(((LocalDate[]) values[j])[i])); + insertRecord.add(convert(value, targetType)); + break; + case TEXT: + case STRING: + value = ValueConverter.convert(sourceType, targetType, ((Binary[]) values[j])[i]); + insertRecord.add(convert(value, targetType)); + break; + case BLOB: + value = ValueConverter.convert(sourceType, targetType, ((Binary[]) values[j])[i]); + insertRecord.add(convert(value, targetType)); + break; + case BOOLEAN: + value = ValueConverter.convert(sourceType, targetType, ((boolean[]) values[j])[i]); + insertRecord.add(convert(value, targetType)); + break; + } + } + insertRecords.add(insertRecord); + } + + return insertRecords; + } + + private Object convert(Object value, TSDataType targetType) { + switch (targetType) { + case DATE: + return DateUtils.parseIntToLocalDate((Integer) value); + case TEXT: + case STRING: + return new String(((Binary) value).getValues(), TSFileConfig.STRING_CHARSET); + } + return value; + } + + private List> generateTabletInsertRecordForTable(final Tablet tablet) { + List> insertRecords = new ArrayList<>(tablet.rowSize); + final List schemas = tablet.getSchemas(); + final Object[] values = tablet.values; + for (int i = 0; i < tablet.rowSize; i++) { + List insertRecord = new ArrayList<>(); + for (int j = 0; j < schemas.size(); j++) { + switch (schemas.get(j).getType()) { + case INT64: + case TIMESTAMP: + insertRecord.add(((long[]) values[j])[i]); + break; + case INT32: + insertRecord.add(((int[]) values[j])[i]); + break; + case DOUBLE: + insertRecord.add(((double[]) values[j])[i]); + break; + case FLOAT: + insertRecord.add(((float[]) values[j])[i]); + break; + case DATE: + insertRecord.add(((LocalDate[]) values[j])[i]); + break; + case TEXT: + case STRING: + insertRecord.add( + new String(((Binary[]) values[j])[i].getValues(), TSFileConfig.STRING_CHARSET)); + break; + case BLOB: + insertRecord.add(((Binary[]) values[j])[i]); + break; + case BOOLEAN: + insertRecord.add(((boolean[]) values[j])[i]); + break; + } + } + insertRecords.add(insertRecord); + } + + return insertRecords; + } + + private List> generateTabletInsertStrRecordForTable(Tablet tablet) { + List> insertRecords = new ArrayList<>(tablet.rowSize); + final List schemas = tablet.getSchemas(); + final Object[] values = tablet.values; + for (int i = 0; i < tablet.rowSize; i++) { + List insertRecord = new ArrayList<>(); + for (int j = 0; j < schemas.size(); j++) { + switch (schemas.get(j).getType()) { + case INT64: + insertRecord.add(String.valueOf(((long[]) values[j])[i])); + break; + case TIMESTAMP: + insertRecord.add( + RpcUtils.formatDatetime("default", "ms", ((long[]) values[j])[i], ZoneOffset.UTC)); + break; + case INT32: + insertRecord.add(String.valueOf(((int[]) values[j])[i])); + break; + case DOUBLE: + insertRecord.add(String.valueOf(((double[]) values[j])[i])); + break; + case FLOAT: + insertRecord.add(String.valueOf(((float[]) values[j])[i])); + break; + case DATE: + insertRecord.add(((LocalDate[]) values[j])[i].toString()); + break; + case TEXT: + case STRING: + insertRecord.add( + new String(((Binary[]) values[j])[i].getValues(), TSFileConfig.STRING_CHARSET)); + break; + case BLOB: + String value = + BytesUtils.parseBlobByteArrayToString(((Binary[]) values[j])[i].getValues()) + .substring(2); + insertRecord.add(String.format("X'%s'", value)); + break; + case BOOLEAN: + insertRecord.add(String.valueOf(((boolean[]) values[j])[i])); + break; + } + } + insertRecords.add(insertRecord); + } + + return insertRecords; + } + + private Tablet generateTabletAndMeasurementSchema( + List> pairs, String deviceId) { + long[] timestamp = createTestDataForTimestamp(); + Object[] objects = new Object[pairs.size()]; + List measurementSchemas = new ArrayList<>(pairs.size()); + BitMap[] bitMaps = new BitMap[pairs.size()]; + for (int i = 0; i < bitMaps.length; i++) { + bitMaps[i] = new BitMap(generateDataSize); + } + for (int i = 0; i < objects.length; i++) { + MeasurementSchema schema = pairs.get(i).left; + measurementSchemas.add(schema); + switch (schema.getType()) { + case INT64: + objects[i] = createTestDataForInt64(); + break; + case INT32: + objects[i] = createTestDataForInt32(); + break; + case TIMESTAMP: + objects[i] = createTestDataForTimestamp(); + break; + case DOUBLE: + objects[i] = createTestDataForDouble(); + break; + case FLOAT: + objects[i] = createTestDataForFloat(); + break; + case DATE: + objects[i] = createTestDataForDate(); + break; + case STRING: + case BLOB: + case TEXT: + objects[i] = createTestDataForString(); + break; + case BOOLEAN: + objects[i] = createTestDataForBoolean(); + break; + } + } + return new Tablet(deviceId, measurementSchemas, timestamp, objects, bitMaps, generateDataSize); + } + + private List> generateMeasurementSchemas() { + TSDataType[] dataTypes = { + TSDataType.STRING, + TSDataType.TEXT, + TSDataType.BLOB, + TSDataType.TIMESTAMP, + TSDataType.BOOLEAN, + TSDataType.DATE, + TSDataType.DOUBLE, + TSDataType.FLOAT, + TSDataType.INT32, + TSDataType.INT64 + }; + List> pairs = new ArrayList<>(); + + for (TSDataType type : dataTypes) { + for (TSDataType dataType : dataTypes) { + String id = String.format("%s2%s", type.name(), dataType.name()); + pairs.add(new Pair<>(new MeasurementSchema(id, type), new MeasurementSchema(id, dataType))); + } + } + return pairs; + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeTypeConversionIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeTypeConversionIT.java new file mode 100644 index 0000000000000..24b40d2e0c9c6 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeTypeConversionIT.java @@ -0,0 +1,604 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.pipe.it.manual; + +import org.apache.iotdb.db.it.utils.TestUtils; +import org.apache.iotdb.db.pipe.receiver.transform.converter.ValueConverter; +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2ManualCreateSchema; +import org.apache.iotdb.itbase.env.BaseEnv; + +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.utils.Binary; +import org.apache.tsfile.utils.BytesUtils; +import org.apache.tsfile.utils.DateUtils; +import org.apache.tsfile.utils.Pair; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.HashSet; +import java.util.List; +import java.util.Random; +import java.util.Set; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2ManualCreateSchema.class}) +public class IoTDBPipeTypeConversionIT extends AbstractPipeDualManualIT { + + private static final int generateDataSize = 100; + + // Test for converting BOOLEAN to OtherType + @Test + public void testBooleanToOtherTypeConversion() { + createDataPipe(); + executeAndVerifyTypeConversion(TSDataType.BOOLEAN, TSDataType.INT32); + executeAndVerifyTypeConversion(TSDataType.BOOLEAN, TSDataType.INT64); + executeAndVerifyTypeConversion(TSDataType.BOOLEAN, TSDataType.FLOAT); + executeAndVerifyTypeConversion(TSDataType.BOOLEAN, TSDataType.DOUBLE); + executeAndVerifyTypeConversion(TSDataType.BOOLEAN, TSDataType.TEXT); + executeAndVerifyTypeConversion(TSDataType.BOOLEAN, TSDataType.TIMESTAMP); + executeAndVerifyTypeConversion(TSDataType.BOOLEAN, TSDataType.BLOB); + executeAndVerifyTypeConversion(TSDataType.BOOLEAN, TSDataType.STRING); + executeAndVerifyTypeConversion(TSDataType.BOOLEAN, TSDataType.DATE); + } + + // Test for converting INT32 to OtherType + @Test + public void testInt32ToOtherTypeConversion() { + createDataPipe(); + executeAndVerifyTypeConversion(TSDataType.INT32, TSDataType.BOOLEAN); + executeAndVerifyTypeConversion(TSDataType.INT32, TSDataType.INT64); + executeAndVerifyTypeConversion(TSDataType.INT32, TSDataType.FLOAT); + executeAndVerifyTypeConversion(TSDataType.INT32, TSDataType.DOUBLE); + executeAndVerifyTypeConversion(TSDataType.INT32, TSDataType.TEXT); + executeAndVerifyTypeConversion(TSDataType.INT32, TSDataType.TIMESTAMP); + executeAndVerifyTypeConversion(TSDataType.INT32, TSDataType.BLOB); + executeAndVerifyTypeConversion(TSDataType.INT32, TSDataType.STRING); + executeAndVerifyTypeConversion(TSDataType.INT32, TSDataType.DATE); + } + + // Test for converting INT64 to OtherType + @Test + public void testInt64ToOtherTypeConversion() { + createDataPipe(); + executeAndVerifyTypeConversion(TSDataType.INT64, TSDataType.BOOLEAN); + executeAndVerifyTypeConversion(TSDataType.INT64, TSDataType.INT32); + executeAndVerifyTypeConversion(TSDataType.INT64, TSDataType.FLOAT); + executeAndVerifyTypeConversion(TSDataType.INT64, TSDataType.DOUBLE); + executeAndVerifyTypeConversion(TSDataType.INT64, TSDataType.TEXT); + executeAndVerifyTypeConversion(TSDataType.INT64, TSDataType.TIMESTAMP); + executeAndVerifyTypeConversion(TSDataType.INT64, TSDataType.BLOB); + executeAndVerifyTypeConversion(TSDataType.INT64, TSDataType.STRING); + executeAndVerifyTypeConversion(TSDataType.INT64, TSDataType.DATE); + } + + // Test for converting FLOAT to OtherType + @Test + public void testFloatToOtherTypeConversion() { + createDataPipe(); + executeAndVerifyTypeConversion(TSDataType.FLOAT, TSDataType.BOOLEAN); + executeAndVerifyTypeConversion(TSDataType.FLOAT, TSDataType.INT32); + executeAndVerifyTypeConversion(TSDataType.FLOAT, TSDataType.INT64); + executeAndVerifyTypeConversion(TSDataType.FLOAT, TSDataType.DOUBLE); + executeAndVerifyTypeConversion(TSDataType.FLOAT, TSDataType.TEXT); + executeAndVerifyTypeConversion(TSDataType.FLOAT, TSDataType.TIMESTAMP); + executeAndVerifyTypeConversion(TSDataType.FLOAT, TSDataType.BLOB); + executeAndVerifyTypeConversion(TSDataType.FLOAT, TSDataType.STRING); + executeAndVerifyTypeConversion(TSDataType.FLOAT, TSDataType.DATE); + } + + // Test for converting DOUBLE to OtherType + @Test + public void testDoubleToOtherTypeConversion() { + createDataPipe(); + executeAndVerifyTypeConversion(TSDataType.DOUBLE, TSDataType.BOOLEAN); + executeAndVerifyTypeConversion(TSDataType.DOUBLE, TSDataType.INT32); + executeAndVerifyTypeConversion(TSDataType.DOUBLE, TSDataType.INT64); + executeAndVerifyTypeConversion(TSDataType.DOUBLE, TSDataType.FLOAT); + executeAndVerifyTypeConversion(TSDataType.DOUBLE, TSDataType.TEXT); + executeAndVerifyTypeConversion(TSDataType.DOUBLE, TSDataType.TIMESTAMP); + executeAndVerifyTypeConversion(TSDataType.DOUBLE, TSDataType.BLOB); + executeAndVerifyTypeConversion(TSDataType.DOUBLE, TSDataType.STRING); + executeAndVerifyTypeConversion(TSDataType.DOUBLE, TSDataType.DATE); + } + + // Test for converting TEXT to OtherType + @Test + public void testTextToOtherTypeConversion() { + createDataPipe(); + executeAndVerifyTypeConversion(TSDataType.TEXT, TSDataType.BLOB); + executeAndVerifyTypeConversion(TSDataType.TEXT, TSDataType.STRING); + executeAndVerifyTypeConversion(TSDataType.TEXT, TSDataType.BOOLEAN); + executeAndVerifyTypeConversion(TSDataType.TEXT, TSDataType.INT32); + executeAndVerifyTypeConversion(TSDataType.TEXT, TSDataType.INT64); + executeAndVerifyTypeConversion(TSDataType.TEXT, TSDataType.FLOAT); + executeAndVerifyTypeConversion(TSDataType.TEXT, TSDataType.DOUBLE); + executeAndVerifyTypeConversion(TSDataType.TEXT, TSDataType.TIMESTAMP); + executeAndVerifyTypeConversion(TSDataType.TEXT, TSDataType.DATE); + } + + // Test for converting TIMESTAMP to OtherType + @Test + public void testTimestampToOtherTypeConversion() { + createDataPipe(); + executeAndVerifyTypeConversion(TSDataType.TIMESTAMP, TSDataType.BOOLEAN); + executeAndVerifyTypeConversion(TSDataType.TIMESTAMP, TSDataType.INT32); + executeAndVerifyTypeConversion(TSDataType.TIMESTAMP, TSDataType.INT64); + executeAndVerifyTypeConversion(TSDataType.TIMESTAMP, TSDataType.FLOAT); + executeAndVerifyTypeConversion(TSDataType.TIMESTAMP, TSDataType.DOUBLE); + executeAndVerifyTypeConversion(TSDataType.TIMESTAMP, TSDataType.TEXT); + executeAndVerifyTypeConversion(TSDataType.TIMESTAMP, TSDataType.BLOB); + executeAndVerifyTypeConversion(TSDataType.TIMESTAMP, TSDataType.STRING); + executeAndVerifyTypeConversion(TSDataType.TIMESTAMP, TSDataType.DATE); + } + + // Test for converting DATE to OtherType + @Test + public void testDateToOtherTypeConversion() { + createDataPipe(); + executeAndVerifyTypeConversion(TSDataType.DATE, TSDataType.BOOLEAN); + executeAndVerifyTypeConversion(TSDataType.DATE, TSDataType.INT32); + executeAndVerifyTypeConversion(TSDataType.DATE, TSDataType.INT64); + executeAndVerifyTypeConversion(TSDataType.DATE, TSDataType.FLOAT); + executeAndVerifyTypeConversion(TSDataType.DATE, TSDataType.DOUBLE); + executeAndVerifyTypeConversion(TSDataType.DATE, TSDataType.TEXT); + executeAndVerifyTypeConversion(TSDataType.DATE, TSDataType.STRING); + executeAndVerifyTypeConversion(TSDataType.DATE, TSDataType.TIMESTAMP); + } + + // Test for converting BLOB to OtherType + @Test + public void testBlobToOtherTypeConversion() { + createDataPipe(); + executeAndVerifyTypeConversion(TSDataType.BLOB, TSDataType.TEXT); + executeAndVerifyTypeConversion(TSDataType.BLOB, TSDataType.STRING); + executeAndVerifyTypeConversion(TSDataType.BLOB, TSDataType.BOOLEAN); + executeAndVerifyTypeConversion(TSDataType.BLOB, TSDataType.INT32); + executeAndVerifyTypeConversion(TSDataType.BLOB, TSDataType.INT64); + executeAndVerifyTypeConversion(TSDataType.BLOB, TSDataType.FLOAT); + executeAndVerifyTypeConversion(TSDataType.BLOB, TSDataType.DOUBLE); + executeAndVerifyTypeConversion(TSDataType.BLOB, TSDataType.TIMESTAMP); + executeAndVerifyTypeConversion(TSDataType.BLOB, TSDataType.DATE); + } + + // Test for converting STRING to OtherType + @Test + public void testStringToOtherTypeConversion() { + createDataPipe(); + executeAndVerifyTypeConversion(TSDataType.STRING, TSDataType.TEXT); + executeAndVerifyTypeConversion(TSDataType.STRING, TSDataType.BLOB); + executeAndVerifyTypeConversion(TSDataType.STRING, TSDataType.BOOLEAN); + executeAndVerifyTypeConversion(TSDataType.STRING, TSDataType.INT32); + executeAndVerifyTypeConversion(TSDataType.STRING, TSDataType.INT64); + executeAndVerifyTypeConversion(TSDataType.STRING, TSDataType.FLOAT); + executeAndVerifyTypeConversion(TSDataType.STRING, TSDataType.DOUBLE); + executeAndVerifyTypeConversion(TSDataType.STRING, TSDataType.TIMESTAMP); + } + + private void executeAndVerifyTypeConversion(TSDataType source, TSDataType target) { + List pairs = prepareTypeConversionTest(source, target); + TestUtils.assertDataEventuallyOnEnv( + receiverEnv, + String.format("select status from root.test.%s2%s", source.name(), target.name()), + String.format("Time,root.test.%s2%s.status,", source.name(), target.name()), + createExpectedResultSet(pairs, source, target), + 600); + } + + private List prepareTypeConversionTest(TSDataType sourceType, TSDataType targetType) { + String sourceTypeName = sourceType.name(); + String targetTypeName = targetType.name(); + + createTimeSeries(sourceTypeName, targetTypeName, sourceTypeName, senderEnv); + createTimeSeries(sourceTypeName, targetTypeName, targetTypeName, receiverEnv); + + List pairs = createTestDataForType(sourceTypeName); + + executeDataInsertions(pairs, sourceType, targetType); + return pairs; + } + + private void createTimeSeries( + String sourceTypeName, String targetTypeName, String dataType, BaseEnv env) { + String timeSeriesCreationQuery = + String.format( + "create timeseries root.test.%s2%s.status with datatype=%s,encoding=PLAIN", + sourceTypeName, targetTypeName, dataType); + TestUtils.tryExecuteNonQueriesWithRetry( + env, Collections.singletonList(timeSeriesCreationQuery)); + } + + private void createDataPipe() { + String sql = + String.format( + "create pipe test" + + " with source ('source'='iotdb-source','source.path'='root.test.**','realtime.mode'='forced-log')" + + " with processor ('processor'='do-nothing-processor')" + + " with sink ('node-urls'='%s:%s','batch.enable'='false','sink.format'='tablet')", + receiverEnv.getIP(), receiverEnv.getPort()); + TestUtils.tryExecuteNonQueriesWithRetry(senderEnv, Collections.singletonList(sql)); + } + + private List createTestDataForType(String sourceType) { + switch (sourceType) { + case "BOOLEAN": + return createTestDataForBoolean(); + case "INT32": + return createTestDataForInt32(); + case "INT64": + return createTestDataForInt64(); + case "FLOAT": + return createTestDataForFloat(); + case "DOUBLE": + return createTestDataForDouble(); + case "TEXT": + return createTestDataForText(); + case "TIMESTAMP": + return createTestDataForTimestamp(); + case "DATE": + return createTestDataForDate(); + case "BLOB": + return createTestDataForBlob(); + case "STRING": + return createTestDataForString(); + default: + throw new UnsupportedOperationException("Unsupported data type: " + sourceType); + } + } + + private void executeDataInsertions( + List testData, TSDataType sourceType, TSDataType targetType) { + switch (sourceType) { + case STRING: + case TEXT: + TestUtils.tryExecuteNonQueriesWithRetry( + senderEnv, + createInsertStatementsForString(testData, sourceType.name(), targetType.name())); + return; + case TIMESTAMP: + TestUtils.tryExecuteNonQueriesWithRetry( + senderEnv, + createInsertStatementsForTimestamp(testData, sourceType.name(), targetType.name())); + return; + case DATE: + TestUtils.tryExecuteNonQueriesWithRetry( + senderEnv, + createInsertStatementsForLocalDate(testData, sourceType.name(), targetType.name())); + return; + case BLOB: + TestUtils.tryExecuteNonQueriesWithRetry( + senderEnv, + createInsertStatementsForBlob(testData, sourceType.name(), targetType.name())); + return; + default: + TestUtils.tryExecuteNonQueriesWithRetry( + senderEnv, + createInsertStatementsForNumeric(testData, sourceType.name(), targetType.name())); + } + } + + private List createInsertStatementsForString( + List testData, String sourceType, String targetType) { + List executes = new ArrayList<>(); + for (Pair pair : testData) { + executes.add( + String.format( + "insert into root.test.%s2%s(timestamp,status) values (%s,'%s');", + sourceType, + targetType, + pair.left, + new String(((Binary) (pair.right)).getValues(), StandardCharsets.UTF_8))); + } + executes.add("flush"); + return executes; + } + + private List createInsertStatementsForNumeric( + List testData, String sourceType, String targetType) { + List executes = new ArrayList<>(); + for (Pair pair : testData) { + executes.add( + String.format( + "insert into root.test.%s2%s(timestamp,status) values (%s,%s);", + sourceType, targetType, pair.left, pair.right)); + } + executes.add("flush"); + return executes; + } + + private List createInsertStatementsForTimestamp( + List testData, String sourceType, String targetType) { + List executes = new ArrayList<>(); + for (Pair pair : testData) { + executes.add( + String.format( + "insert into root.test.%s2%s(timestamp,status) values (%s,%s);", + sourceType, targetType, pair.left, pair.right)); + } + executes.add("flush"); + return executes; + } + + private List createInsertStatementsForLocalDate( + List testData, String sourceType, String targetType) { + List executes = new ArrayList<>(); + for (Pair pair : testData) { + executes.add( + String.format( + "insert into root.test.%s2%s(timestamp,status) values (%s,'%s');", + sourceType, targetType, pair.left, DateUtils.formatDate((Integer) pair.right))); + } + executes.add("flush"); + return executes; + } + + private List createInsertStatementsForBlob( + List testData, String sourceType, String targetType) { + List executes = new ArrayList<>(); + for (Pair pair : testData) { + String value = BytesUtils.parseBlobByteArrayToString(((Binary) pair.right).getValues()); + executes.add( + String.format( + "insert into root.test.%s2%s(timestamp,status) values (%s,X'%s');", + sourceType, targetType, pair.left, value.substring(2))); + } + executes.add("flush"); + return executes; + } + + private Set createExpectedResultSet( + List pairs, TSDataType sourceType, TSDataType targetType) { + switch (targetType) { + case TIMESTAMP: + return generateTimestampResultSet(pairs, sourceType, targetType); + case DATE: + return generateLocalDateResultSet(pairs, sourceType, targetType); + case BLOB: + return generateBlobResultSet(pairs, sourceType, targetType); + case TEXT: + case STRING: + return generateStringResultSet(pairs, sourceType, targetType); + default: + HashSet resultSet = new HashSet<>(); + for (Pair pair : pairs) { + resultSet.add( + String.format( + "%s,%s,", pair.left, ValueConverter.convert(sourceType, targetType, pair.right))); + } + return resultSet; + } + } + + private Set generateTimestampResultSet( + List pairs, TSDataType sourceType, TSDataType targetType) { + HashSet resultSet = new HashSet<>(); + for (Pair pair : pairs) { + resultSet.add( + String.format( + "%s,%s,", pair.left, ValueConverter.convert(sourceType, targetType, pair.right))); + } + return resultSet; + } + + private Set generateLocalDateResultSet( + List pairs, TSDataType sourceType, TSDataType targetType) { + HashSet resultSet = new HashSet<>(); + for (Pair pair : pairs) { + resultSet.add( + String.format( + "%s,%s,", + pair.left, + DateUtils.formatDate( + (Integer) ValueConverter.convert(sourceType, targetType, pair.right)))); + } + return resultSet; + } + + private Set generateBlobResultSet( + List pairs, TSDataType sourceType, TSDataType targetType) { + HashSet resultSet = new HashSet<>(); + for (Pair pair : pairs) { + resultSet.add( + String.format( + "%s,%s,", + pair.left, + BytesUtils.parseBlobByteArrayToString( + ((Binary) ValueConverter.convert(sourceType, targetType, pair.right)) + .getValues()))); + } + return resultSet; + } + + private Set generateStringResultSet( + List pairs, TSDataType sourceType, TSDataType targetType) { + HashSet resultSet = new HashSet<>(); + for (Pair pair : pairs) { + resultSet.add( + String.format( + "%s,%s,", + pair.left, + new String( + ((Binary) ValueConverter.convert(sourceType, targetType, pair.right)).getValues(), + StandardCharsets.UTF_8))); + } + return resultSet; + } + + private List createTestDataForBoolean() { + List pairs = new java.util.ArrayList<>(); + Random random = new Random(); + for (long i = 0; i < generateDataSize; i++) { + pairs.add(new Pair<>(i, random.nextBoolean())); + } + return pairs; + } + + private List createTestDataForInt32() { + List pairs = new ArrayList<>(); + Random random = new Random(); + for (long i = 0; i < generateDataSize; i++) { + pairs.add(new Pair<>(i, random.nextInt())); + } + pairs.add(new Pair<>(generateDataSize + 1, -1)); + pairs.add(new Pair<>(generateDataSize + 2, -2)); + pairs.add(new Pair<>(generateDataSize + 3, -3)); + return pairs; + } + + private List createTestDataForInt64() { + List pairs = new ArrayList<>(); + Random random = new Random(); + for (long i = 0; i < generateDataSize; i++) { + pairs.add(new Pair<>(i, random.nextLong())); + } + pairs.add(new Pair<>(generateDataSize + 1, -1L)); + pairs.add(new Pair<>(generateDataSize + 2, -2L)); + pairs.add(new Pair<>(generateDataSize + 3, -3L)); + return pairs; + } + + private List createTestDataForFloat() { + List pairs = new ArrayList<>(); + Random random = new Random(); + for (long i = 0; i < generateDataSize; i++) { + pairs.add(new Pair<>(i, random.nextFloat())); + } + return pairs; + } + + private List createTestDataForDouble() { + List pairs = new ArrayList<>(); + Random random = new Random(); + for (long i = 0; i < generateDataSize; i++) { + pairs.add(new Pair<>(i, random.nextDouble())); + } + return pairs; + } + + private List createTestDataForText() { + List pairs = new ArrayList<>(); + for (long i = 0; i < generateDataSize; i++) { + pairs.add(new Pair<>(i, new Binary((String.valueOf(i)).getBytes(StandardCharsets.UTF_8)))); + } + pairs.add(new Pair(generateDataSize + 1, new Binary("Hello".getBytes(StandardCharsets.UTF_8)))); + pairs.add( + new Pair( + generateDataSize + 2, new Binary("Hello World!".getBytes(StandardCharsets.UTF_8)))); + pairs.add( + new Pair( + generateDataSize + 3, new Binary("This is a test.".getBytes(StandardCharsets.UTF_8)))); + pairs.add( + new Pair( + generateDataSize + 4, + new Binary("IoTDB Hello World!!!!".getBytes(StandardCharsets.UTF_8)))); + pairs.add( + new Pair( + generateDataSize + 5, + new Binary( + "IoTDB is an excellent time series database!!!!!!!!!" + .getBytes(StandardCharsets.UTF_8)))); + return pairs; + } + + private List createTestDataForTimestamp() { + List pairs = new ArrayList<>(); + for (long i = 0; i < generateDataSize; i++) { + pairs.add(new Pair<>(i, new Date().getTime() + i)); + } + return pairs; + } + + private List createTestDataForDate() { + List pairs = new ArrayList<>(); + int year = 2023; + int month = 1; + int day = 1; + for (long i = 0; i < generateDataSize; i++) { + pairs.add(new Pair<>(i, year * 10000 + (month * 100) + day)); + + // update + day++; + if (day > 28) { + day = 1; + month++; + if (month > 12) { + month = 1; + year++; + } + } + } + return pairs; + } + + private List createTestDataForBlob() { + List pairs = new ArrayList<>(); + for (long i = 0; i < generateDataSize; i++) { + pairs.add(new Pair<>(i, new Binary((String.valueOf(i)).getBytes(StandardCharsets.UTF_8)))); + } + pairs.add(new Pair(generateDataSize + 1, new Binary("Hello".getBytes(StandardCharsets.UTF_8)))); + pairs.add( + new Pair( + generateDataSize + 2, new Binary("Hello World!".getBytes(StandardCharsets.UTF_8)))); + pairs.add( + new Pair( + generateDataSize + 3, new Binary("This is a test.".getBytes(StandardCharsets.UTF_8)))); + pairs.add( + new Pair( + generateDataSize + 4, + new Binary("IoTDB Hello World!!!!".getBytes(StandardCharsets.UTF_8)))); + pairs.add( + new Pair( + generateDataSize + 5, + new Binary( + "IoTDB is an excellent time series database!!!!!!!!!" + .getBytes(StandardCharsets.UTF_8)))); + return pairs; + } + + private List createTestDataForString() { + List pairs = new ArrayList<>(); + for (long i = 0; i < generateDataSize; i++) { + pairs.add(new Pair<>(i, new Binary((String.valueOf(i)).getBytes(StandardCharsets.UTF_8)))); + } + pairs.add(new Pair(generateDataSize + 1, new Binary("Hello".getBytes(StandardCharsets.UTF_8)))); + pairs.add( + new Pair( + generateDataSize + 2, new Binary("Hello World!".getBytes(StandardCharsets.UTF_8)))); + pairs.add( + new Pair( + generateDataSize + 3, new Binary("This is a test.".getBytes(StandardCharsets.UTF_8)))); + pairs.add( + new Pair( + generateDataSize + 4, + new Binary("IoTDB Hello World!!!!".getBytes(StandardCharsets.UTF_8)))); + pairs.add( + new Pair( + generateDataSize + 5, + new Binary( + "IoTDB is an excellent time series database!!!!!!!!!" + .getBytes(StandardCharsets.UTF_8)))); + return pairs; + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/single/AbstractPipeSingleIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/single/AbstractPipeSingleIT.java index 70bb4df4a93e8..324fc529b5a47 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/single/AbstractPipeSingleIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/single/AbstractPipeSingleIT.java @@ -35,7 +35,11 @@ public void setUp() { env = MultiEnvFactory.getEnv(0); env.getConfig().getCommonConfig().setAutoCreateSchemaEnabled(true); // 10 min, assert that the operations will not time out - env.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); + env.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + env.getConfig() + .getCommonConfig() + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); env.initClusterEnvironment(); } diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/single/IoTDBPipeAggregateIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/single/IoTDBPipeAggregateIT.java index cbd9c08f4e5c2..78ad6b9bf3339 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/single/IoTDBPipeAggregateIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/single/IoTDBPipeAggregateIT.java @@ -66,7 +66,8 @@ public void testAggregator() throws Exception { processorAttributes.put("output.database", "root.testdb"); processorAttributes.put( "output.measurements", "Avg1, peak1, rms1, var1, skew1, kurt1, ff1, cf1, pf1"); - processorAttributes.put("operators", "avg, peak, rms, var, skew, kurt, ff, cf, pf, cE"); + processorAttributes.put( + "operators", "avg, peak, rms, var, skew, kurt, ff, cf, pf, cE, max, min"); processorAttributes.put("sliding.seconds", "60"); connectorAttributes.put("sink", "write-back-sink"); @@ -115,7 +116,7 @@ public void testAggregator() throws Exception { env, "select count(*) from root.testdb.** group by level=1", "count(root.testdb.*.*.*.*),", - Collections.singleton("20,")); + Collections.singleton("24,")); // Test manually renamed timeSeries count TestUtils.assertDataEventuallyOnEnv( diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/single/IoTDBPipeOPCUAIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/single/IoTDBPipeOPCUAIT.java new file mode 100644 index 0000000000000..8caabaf6f6263 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/single/IoTDBPipeOPCUAIT.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.pipe.it.single; + +import org.apache.iotdb.commons.client.sync.SyncConfigNodeIServiceClient; +import org.apache.iotdb.confignode.rpc.thrift.TCreatePipeReq; +import org.apache.iotdb.db.it.utils.TestUtils; +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT1; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT1.class}) +public class IoTDBPipeOPCUAIT extends AbstractPipeSingleIT { + @Test + public void testOPCUASink() throws Exception { + try (final SyncConfigNodeIServiceClient client = + (SyncConfigNodeIServiceClient) env.getLeaderConfigNodeConnection()) { + + if (!TestUtils.tryExecuteNonQueryWithRetry( + env, "insert into root.db.d1(time, s1) values (1, 1)")) { + return; + } + + final Map connectorAttributes = new HashMap<>(); + connectorAttributes.put("sink", "opc-ua-sink"); + connectorAttributes.put("opcua.model", "client-server"); + + Assert.assertEquals( + TSStatusCode.SUCCESS_STATUS.getStatusCode(), + client + .createPipe( + new TCreatePipeReq("testPipe", connectorAttributes) + .setExtractorAttributes(Collections.emptyMap()) + .setProcessorAttributes(Collections.emptyMap())) + .getCode()); + Assert.assertEquals( + TSStatusCode.SUCCESS_STATUS.getStatusCode(), client.dropPipe("testPipe").getCode()); + + // Test reconstruction + connectorAttributes.put("password", "test"); + Assert.assertEquals( + TSStatusCode.SUCCESS_STATUS.getStatusCode(), + client + .createPipe( + new TCreatePipeReq("testPipe", connectorAttributes) + .setExtractorAttributes(Collections.emptyMap()) + .setProcessorAttributes(Collections.emptyMap())) + .getCode()); + + // Test conflict + connectorAttributes.put("password", "conflict"); + Assert.assertEquals( + TSStatusCode.PIPE_ERROR.getStatusCode(), + client + .createPipe( + new TCreatePipeReq("testPipe", connectorAttributes) + .setExtractorAttributes(Collections.emptyMap()) + .setProcessorAttributes(Collections.emptyMap())) + .getCode()); + } + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/session/it/IoTDBSessionInsertNullIT.java b/integration-test/src/test/java/org/apache/iotdb/session/it/IoTDBSessionInsertNullIT.java index f101a833fbb3b..503b61f2d7cd7 100644 --- a/integration-test/src/test/java/org/apache/iotdb/session/it/IoTDBSessionInsertNullIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/session/it/IoTDBSessionInsertNullIT.java @@ -32,7 +32,10 @@ import org.apache.tsfile.file.metadata.enums.TSEncoding; import org.apache.tsfile.read.common.Field; import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -355,4 +358,43 @@ public void insertAlignedRecordsOfOneDeviceNullTest() { fail(e.getMessage()); } } + + @Test + public void insertTabletNullMeasurementTest() { + try (ISession session = EnvFactory.getEnv().getSessionConnection()) { + String deviceId = "root.sg1.clsu.aligned_d1"; + Tablet tablet = + new Tablet( + deviceId, + Arrays.asList( + new MeasurementSchema("s1", TSDataType.BOOLEAN), + new MeasurementSchema(null, TSDataType.INT32)), + 1); + tablet.addTimestamp(0, 300); + tablet.addValue("s1", 0, true); + tablet.addValue(null, 0, 1); + session.insertAlignedTablet(tablet); + fail(); + } catch (Exception e) { + Assert.assertEquals("measurement should be non null value", e.getMessage()); + } + + try (ISession session = EnvFactory.getEnv().getSessionConnection()) { + String deviceId = "root.sg1.clsu.aligned_d1"; + Tablet tablet = + new Tablet( + deviceId, + Arrays.asList( + new MeasurementSchema("s1", TSDataType.BOOLEAN), + new MeasurementSchema(null, TSDataType.INT32)), + 1); + tablet.addTimestamp(0, 300); + tablet.addValue("s1", 0, true); + // doesn't insert 2nd measurement + session.insertAlignedTablet(tablet); + fail(); + } catch (Exception e) { + Assert.assertEquals("measurement should be non null value", e.getMessage()); + } + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/session/it/IoTDBSessionSimpleIT.java b/integration-test/src/test/java/org/apache/iotdb/session/it/IoTDBSessionSimpleIT.java index 0a89ee56ded15..e2ddbcdf259f7 100644 --- a/integration-test/src/test/java/org/apache/iotdb/session/it/IoTDBSessionSimpleIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/session/it/IoTDBSessionSimpleIT.java @@ -20,9 +20,11 @@ import org.apache.iotdb.common.rpc.thrift.TAggregationType; import org.apache.iotdb.db.conf.IoTDBDescriptor; +import org.apache.iotdb.db.it.utils.TestUtils; import org.apache.iotdb.db.protocol.thrift.OperationType; import org.apache.iotdb.isession.ISession; import org.apache.iotdb.isession.SessionDataSet; +import org.apache.iotdb.isession.SessionDataSet.DataIterator; import org.apache.iotdb.it.env.EnvFactory; import org.apache.iotdb.it.framework.IoTDBTestRunner; import org.apache.iotdb.itbase.category.ClusterIT; @@ -36,24 +38,36 @@ import org.apache.tsfile.common.conf.TSFileConfig; import org.apache.tsfile.common.constant.TsFileConstant; import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.IDeviceID; +import org.apache.tsfile.file.metadata.PlainDeviceID; import org.apache.tsfile.file.metadata.enums.CompressionType; import org.apache.tsfile.file.metadata.enums.TSEncoding; import org.apache.tsfile.read.common.Field; +import org.apache.tsfile.read.common.Path; import org.apache.tsfile.read.common.RowRecord; import org.apache.tsfile.utils.Binary; import org.apache.tsfile.utils.BitMap; +import org.apache.tsfile.utils.DateUtils; +import org.apache.tsfile.write.TsFileWriter; +import org.apache.tsfile.write.record.TSRecord; import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.record.datapoint.IntDataPoint; import org.apache.tsfile.write.schema.MeasurementSchema; import org.junit.After; +import org.junit.AfterClass; import org.junit.Assert; -import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.File; +import java.nio.charset.StandardCharsets; import java.security.SecureRandom; +import java.sql.Timestamp; +import java.time.LocalDate; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -62,26 +76,43 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +@SuppressWarnings({"ThrowFromFinallyBlock", "ResultOfMethodCallIgnored"}) @RunWith(IoTDBTestRunner.class) public class IoTDBSessionSimpleIT { private static Logger LOGGER = LoggerFactory.getLogger(IoTDBSessionSimpleIT.class); - @Before - public void setUp() throws Exception { + private static final String[] databasesToClear = new String[] {"root.sg", "root.sg1"}; + + @BeforeClass + public static void setUpClass() throws Exception { EnvFactory.getEnv().initClusterEnvironment(); } @After - public void tearDown() throws Exception { + public void tearDown() { + for (String database : databasesToClear) { + try (ISession session = EnvFactory.getEnv().getSessionConnection()) { + session.executeNonQueryStatement("DELETE DATABASE " + database); + } catch (Exception ignored) { + + } + } + } + + @AfterClass + public static void tearDownClass() throws Exception { EnvFactory.getEnv().cleanClusterEnvironment(); } @@ -187,7 +218,7 @@ public void insertPartialTabletsTest() { } fail(); } catch (Exception e) { - Assert.assertTrue(e.getMessage().contains("data type of root.sg.d2.s2 is not consistent")); + assertTrue(e.getMessage().contains("data type of root.sg.d2.s2 is not consistent")); } } @@ -321,7 +352,7 @@ public void insertByObjAndNotInferTypeTest() { expected.add(TSDataType.TEXT.name()); Set actual = new HashSet<>(); - SessionDataSet dataSet = session.executeQueryStatement("show timeseries root.**"); + SessionDataSet dataSet = session.executeQueryStatement("show timeseries root.sg1.**"); while (dataSet.hasNext()) { actual.add(dataSet.next().getFields().get(3).getStringValue()); } @@ -464,7 +495,7 @@ public void insertTabletWithAlignedTimeseriesTest() { @Test @Category({LocalStandaloneIT.class, ClusterIT.class}) - public void insertTabletWithNullValuesTest() { + public void insertTabletWithNullValuesTest() throws InterruptedException { try (ISession session = EnvFactory.getEnv().getSessionConnection()) { List schemaList = new ArrayList<>(); schemaList.add(new MeasurementSchema("s0", TSDataType.DOUBLE, TSEncoding.RLE)); @@ -512,6 +543,38 @@ public void insertTabletWithNullValuesTest() { assertEquals(9L, field.getLongV()); } } + dataSet = session.executeQueryStatement("select s3 from root.sg1.d1"); + int result = 0; + assertTrue(dataSet.hasNext()); + while (dataSet.hasNext()) { + RowRecord rowRecord = dataSet.next(); + Field field = rowRecord.getFields().get(0); + // skip null value + if (result == 3) { + result++; + } + assertEquals(result++, field.getIntV()); + } + } catch (Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + TimeUnit.MILLISECONDS.sleep(2000); + + TestUtils.stopForciblyAndRestartDataNodes(); + + try (ISession session = EnvFactory.getEnv().getSessionConnection()) { + SessionDataSet dataSet = session.executeQueryStatement("select s3 from root.sg1.d1"); + int result = 0; + while (dataSet.hasNext()) { + RowRecord rowRecord = dataSet.next(); + Field field = rowRecord.getFields().get(0); + // skip null value + if (result == 3) { + result++; + } + assertEquals(result++, field.getIntV()); + } } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); @@ -648,7 +711,7 @@ public void insertTabletWithWrongTimestampPrecisionTest() { tablet.reset(); } } catch (Exception e) { - Assert.assertTrue(e.getMessage().contains("Current system timestamp precision is ms")); + assertTrue(e.getMessage().contains("Current system timestamp precision is ms")); } } @@ -676,7 +739,7 @@ public void insertTabletWithDuplicatedMeasurementsTest() { tablet.reset(); } } catch (Exception e) { - Assert.assertTrue(e.getMessage().contains("Insertion contains duplicated measurement: s0")); + assertTrue(e.getMessage().contains("Insertion contains duplicated measurement: s0")); } } @@ -733,7 +796,7 @@ public void createWrongTimeSeriesTest() { LOGGER.error("", e); } - final SessionDataSet dataSet = session.executeQueryStatement("SHOW TIMESERIES"); + final SessionDataSet dataSet = session.executeQueryStatement("SHOW TIMESERIES root.sg.**"); assertFalse(dataSet.hasNext()); session.deleteStorageGroup(storageGroup); @@ -1058,7 +1121,7 @@ public void insertOneDeviceRecordsWithDuplicatedMeasurementsTest() { Boolean.TRUE); session.insertRecordsOfOneDevice("root.sg.d1", times, measurements, datatypes, values); } catch (Exception e) { - Assert.assertTrue(e.getMessage().contains("Insertion contains duplicated measurement: s2")); + assertTrue(e.getMessage().contains("Insertion contains duplicated measurement: s2")); } } @@ -1113,7 +1176,43 @@ public void insertRecordsWithDuplicatedMeasurementsTest() { Boolean.TRUE); session.insertRecords(devices, times, measurements, datatypes, values); } catch (Exception e) { - Assert.assertTrue(e.getMessage().contains("Insertion contains duplicated measurement: s2")); + assertTrue(e.getMessage().contains("Insertion contains duplicated measurement: s2")); + } + } + + @Test + @Category({LocalStandaloneIT.class, ClusterIT.class}) + public void insertRecordsWithExpiredDataTest() + throws IoTDBConnectionException, StatementExecutionException { + try (ISession session = EnvFactory.getEnv().getSessionConnection()) { + List times = new ArrayList<>(); + List> measurements = new ArrayList<>(); + List> datatypes = new ArrayList<>(); + List> values = new ArrayList<>(); + List devices = new ArrayList<>(); + + devices.add("root.sg.d1"); + addLine( + times, + measurements, + datatypes, + values, + 3L, + "s1", + "s2", + TSDataType.INT32, + TSDataType.INT32, + 1, + 2); + session.executeNonQueryStatement("set ttl to root.sg.d1 1"); + try { + session.insertRecords(devices, times, measurements, datatypes, values); + } catch (Exception e) { + assertTrue(e.getMessage().contains("less than ttl time bound")); + } + session.executeNonQueryStatement("unset ttl to root.sg.d1"); + SessionDataSet dataSet = session.executeQueryStatement("select * from root.sg.d1"); + assertFalse(dataSet.hasNext()); } } @@ -1751,14 +1850,14 @@ public void illegalDatabaseNameTest() { session.createDatabase(""); fail(); } catch (StatementExecutionException e) { - Assert.assertTrue(e.getMessage().contains(" is not a legal path")); + assertTrue(e.getMessage().contains(" is not a legal path")); } try { session.deleteDatabases(Arrays.asList("root.db", "")); fail(); } catch (StatementExecutionException e) { - Assert.assertTrue(e.getMessage().contains(" is not a legal path")); + assertTrue(e.getMessage().contains(" is not a legal path")); } session.deleteDatabase("root.db"); @@ -1807,4 +1906,509 @@ public void convertRecordsToTabletsTest() { e.printStackTrace(); } } + + @Test + @Category({LocalStandaloneIT.class, ClusterIT.class}) + public void insertMinMaxTimeTest() throws IoTDBConnectionException, StatementExecutionException { + try { + try (ISession session = EnvFactory.getEnv().getSessionConnection()) { + try { + session.executeNonQueryStatement( + "SET CONFIGURATION \"timestamp_precision_check_enabled\"=\"false\""); + } catch (StatementExecutionException e) { + // run in IDE will trigger this, ignore it + if (!e.getMessage().contains("Unable to find the configuration file")) { + throw e; + } + } + + session.executeNonQueryStatement( + String.format( + "INSERT INTO root.testInsertMinMax.d1(timestamp, s1) VALUES (%d, 1)", + Long.MIN_VALUE)); + session.executeNonQueryStatement( + String.format( + "INSERT INTO root.testInsertMinMax.d1(timestamp, s1) VALUES (%d, 1)", + Long.MAX_VALUE)); + + SessionDataSet dataSet = + session.executeQueryStatement("SELECT * FROM root.testInsertMinMax.d1"); + RowRecord record = dataSet.next(); + assertEquals(Long.MIN_VALUE, record.getTimestamp()); + record = dataSet.next(); + assertEquals(Long.MAX_VALUE, record.getTimestamp()); + assertFalse(dataSet.hasNext()); + + session.executeNonQueryStatement("FLUSH"); + dataSet = session.executeQueryStatement("SELECT * FROM root.testInsertMinMax.d1"); + record = dataSet.next(); + assertEquals(Long.MIN_VALUE, record.getTimestamp()); + record = dataSet.next(); + assertEquals(Long.MAX_VALUE, record.getTimestamp()); + assertFalse(dataSet.hasNext()); + } + } finally { + try (ISession session = EnvFactory.getEnv().getSessionConnection()) { + try { + session.executeNonQueryStatement( + "SET CONFIGURATION \"timestamp_precision_check_enabled\"=\"true\""); + } catch (StatementExecutionException e) { + // run in IDE will trigger this, ignore it + if (!e.getMessage().contains("Unable to find the configuration file")) { + throw e; + } + } + } + } + } + + @Test + @Category({LocalStandaloneIT.class, ClusterIT.class}) + public void loadMinMaxTimeNonAlignedTest() throws Exception { + File file = new File("target", "test.tsfile"); + try (TsFileWriter writer = new TsFileWriter(file)) { + IDeviceID deviceID = new PlainDeviceID("root.testLoadMinMax.d1"); + writer.registerTimeseries(new Path(deviceID), new MeasurementSchema("s1", TSDataType.INT32)); + TSRecord record = new TSRecord(Long.MIN_VALUE, deviceID); + record.addTuple(new IntDataPoint("s1", 1)); + writer.write(record); + record.setTime(Long.MAX_VALUE); + writer.write(record); + } + + try (ISession session = EnvFactory.getEnv().getSessionConnection()) { + try { + session.executeNonQueryStatement( + "SET CONFIGURATION \"timestamp_precision_check_enabled\"=\"false\""); + } catch (StatementExecutionException e) { + // run in IDE will trigger this, ignore it + if (!e.getMessage().contains("Unable to find the configuration file")) { + throw e; + } + } + session.executeNonQueryStatement("LOAD \"" + file.getAbsolutePath() + "\""); + + SessionDataSet dataSet = + session.executeQueryStatement("SELECT * FROM root.testLoadMinMax.d1"); + RowRecord record = dataSet.next(); + assertEquals(Long.MIN_VALUE, record.getTimestamp()); + record = dataSet.next(); + assertEquals(Long.MAX_VALUE, record.getTimestamp()); + assertFalse(dataSet.hasNext()); + } finally { + try (ISession session = EnvFactory.getEnv().getSessionConnection()) { + try { + session.executeNonQueryStatement( + "SET CONFIGURATION \"timestamp_precision_check_enabled\"=\"true\""); + } catch (StatementExecutionException e) { + // run in IDE will trigger this, ignore it + if (!e.getMessage().contains("Unable to find the configuration file")) { + throw e; + } + } + } + file.delete(); + } + } + + @Test + @Category({LocalStandaloneIT.class, ClusterIT.class}) + public void loadMinMaxTimeAlignedTest() throws Exception { + File file = new File("target", "test.tsfile"); + try (TsFileWriter writer = new TsFileWriter(file)) { + IDeviceID deviceID = new PlainDeviceID("root.testLoadMinMaxAligned.d1"); + writer.registerAlignedTimeseries( + new Path(deviceID), + Collections.singletonList(new MeasurementSchema("s1", TSDataType.INT32))); + TSRecord record = new TSRecord(Long.MIN_VALUE, deviceID); + record.addTuple(new IntDataPoint("s1", 1)); + writer.writeAligned(record); + record.setTime(Long.MAX_VALUE); + writer.writeAligned(record); + } + + try (ISession session = EnvFactory.getEnv().getSessionConnection()) { + try { + session.executeNonQueryStatement( + "SET CONFIGURATION \"timestamp_precision_check_enabled\"=\"false\""); + } catch (StatementExecutionException e) { + // run in IDE will trigger this, ignore it + if (!e.getMessage().contains("Unable to find the configuration file")) { + throw e; + } + } + session.executeNonQueryStatement("LOAD \"" + file.getAbsolutePath() + "\""); + + SessionDataSet dataSet = + session.executeQueryStatement("SELECT * FROM root.testLoadMinMaxAligned.d1"); + RowRecord record = dataSet.next(); + assertEquals(Long.MIN_VALUE, record.getTimestamp()); + record = dataSet.next(); + assertEquals(Long.MAX_VALUE, record.getTimestamp()); + assertFalse(dataSet.hasNext()); + } finally { + try (ISession session = EnvFactory.getEnv().getSessionConnection()) { + try { + session.executeNonQueryStatement( + "SET CONFIGURATION \"timestamp_precision_check_enabled\"=\"true\""); + } catch (StatementExecutionException e) { + // run in IDE will trigger this, ignore it + if (!e.getMessage().contains("Unable to find the configuration file")) { + throw e; + } + } + } + file.delete(); + } + } + + @Test + @Category({LocalStandaloneIT.class, ClusterIT.class}) + public void testQueryAllDataType() throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = + new Tablet( + "root.sg.d1", + Arrays.asList( + new MeasurementSchema("s1", TSDataType.INT32), + new MeasurementSchema("s2", TSDataType.INT64), + new MeasurementSchema("s3", TSDataType.FLOAT), + new MeasurementSchema("s4", TSDataType.DOUBLE), + new MeasurementSchema("s5", TSDataType.TEXT), + new MeasurementSchema("s6", TSDataType.BOOLEAN), + new MeasurementSchema("s7", TSDataType.TIMESTAMP), + new MeasurementSchema("s8", TSDataType.BLOB), + new MeasurementSchema("s9", TSDataType.STRING), + new MeasurementSchema("s10", TSDataType.DATE), + new MeasurementSchema("s11", TSDataType.TIMESTAMP)), + 10); + tablet.addTimestamp(0, 0L); + tablet.addValue("s1", 0, 1); + tablet.addValue("s2", 0, 1L); + tablet.addValue("s3", 0, 0f); + tablet.addValue("s4", 0, 0d); + tablet.addValue("s5", 0, "text_value"); + tablet.addValue("s6", 0, true); + tablet.addValue("s7", 0, 1L); + tablet.addValue("s8", 0, new Binary(new byte[] {1})); + tablet.addValue("s9", 0, "string_value"); + tablet.addValue("s10", 0, DateUtils.parseIntToLocalDate(20250403)); + tablet.initBitMaps(); + tablet.bitMaps[10].mark(0); + tablet.rowSize = 1; + + try (ISession session = EnvFactory.getEnv().getSessionConnection()) { + session.insertTablet(tablet); + + try (SessionDataSet dataSet = session.executeQueryStatement("select * from root.sg.d1")) { + DataIterator iterator = dataSet.iterator(); + int count = 0; + while (iterator.next()) { + count++; + assertFalse(iterator.isNull("root.sg.d1.s1")); + assertEquals(1, iterator.getInt("root.sg.d1.s1")); + assertFalse(iterator.isNull("root.sg.d1.s2")); + assertEquals(1L, iterator.getLong("root.sg.d1.s2")); + assertFalse(iterator.isNull("root.sg.d1.s3")); + assertEquals(0, iterator.getFloat("root.sg.d1.s3"), 0.01); + assertFalse(iterator.isNull("root.sg.d1.s4")); + assertEquals(0, iterator.getDouble("root.sg.d1.s4"), 0.01); + assertFalse(iterator.isNull("root.sg.d1.s5")); + assertEquals("text_value", iterator.getString("root.sg.d1.s5")); + assertFalse(iterator.isNull("root.sg.d1.s6")); + assertTrue(iterator.getBoolean("root.sg.d1.s6")); + assertFalse(iterator.isNull("root.sg.d1.s7")); + assertEquals(new Timestamp(1), iterator.getTimestamp("root.sg.d1.s7")); + assertFalse(iterator.isNull("root.sg.d1.s8")); + assertEquals(new Binary(new byte[] {1}), iterator.getBlob("root.sg.d1.s8")); + assertFalse(iterator.isNull("root.sg.d1.s9")); + assertEquals("string_value", iterator.getString("root.sg.d1.s9")); + assertFalse(iterator.isNull("root.sg.d1.s10")); + assertEquals(DateUtils.parseIntToLocalDate(20250403), iterator.getDate("root.sg.d1.s10")); + assertTrue(iterator.isNull("root.sg.d1.s11")); + assertNull(iterator.getTimestamp("root.sg.d1.s11")); + + assertEquals(new Timestamp(0), iterator.getTimestamp("Time")); + assertFalse(iterator.isNull("Time")); + } + Assert.assertEquals(tablet.rowSize, count); + } + } + } + + @Test + public void testInsertWrongTypeRecord() throws IoTDBConnectionException { + try (ISession session = EnvFactory.getEnv().getSessionConnection()) { + assertThrows( + ClassCastException.class, + () -> + session.insertRecord( + "root.db1.d1", + 0, + Collections.singletonList("s1"), + Collections.singletonList(TSDataType.INT32), + Collections.singletonList(1L))); + } + } + + @Test + public void testAlterDefaultCompression() + throws IoTDBConnectionException, StatementExecutionException { + // auto-create + try (ISession session = EnvFactory.getEnv().getSessionConnection()) { + List types = + Arrays.asList( + TSDataType.BOOLEAN, + TSDataType.INT32, + TSDataType.DATE, + TSDataType.INT64, + TSDataType.TIMESTAMP, + TSDataType.FLOAT, + TSDataType.DOUBLE, + TSDataType.TEXT, + TSDataType.STRING, + TSDataType.BLOB); + List measurements = + types.stream().map(dataType -> "__" + dataType.toString()).collect(Collectors.toList()); + List values = + Arrays.asList( + false, + 1, + LocalDate.of(1000, 1, 1), + 1L, + 1L, + 1.0f, + 1.0, + new Binary("1".getBytes(StandardCharsets.UTF_8)), + new Binary("1".getBytes(StandardCharsets.UTF_8)), + new Binary("1".getBytes(StandardCharsets.UTF_8))); + String device1 = "root.test.d1"; + session.insertRecord(device1, 0, measurements, types, values); + + try (SessionDataSet dataSet = + session.executeQueryStatement("SHOW TIMESERIES root.test.d1.**")) { + int compressionIndex = dataSet.getColumnNames().indexOf("Compression"); + while (dataSet.hasNext()) { + RowRecord rec = dataSet.next(); + Field compressionField = rec.getFields().get(compressionIndex); + assertEquals("LZ4", compressionField.getStringValue()); + } + } + + for (TSDataType type : types) { + String configName = null; + switch (type) { + case INT32: + case INT64: + case FLOAT: + case DOUBLE: + case TEXT: + case BOOLEAN: + configName = type.name().toLowerCase(); + break; + case STRING: + case BLOB: + configName = "text"; + break; + case DATE: + configName = "int32"; + break; + case TIMESTAMP: + configName = "int64"; + break; + } + session.executeNonQueryStatement( + String.format("SET CONFIGURATION '%s_compressor'='GZIP'", configName)); + } + + String device2 = "root.test.d2"; + session.insertRecord(device2, 0, measurements, types, values); + + try (SessionDataSet dataSet = + session.executeQueryStatement("SHOW TIMESERIES root.test.d2.**")) { + int compressionIndex = dataSet.getColumnNames().indexOf("Compression"); + while (dataSet.hasNext()) { + RowRecord rec = dataSet.next(); + Field compressionField = rec.getFields().get(compressionIndex); + assertEquals("GZIP", compressionField.getStringValue()); + } + } + } + + // manual create + try (ISession session = EnvFactory.getEnv().getSessionConnection()) { + List types = + Arrays.asList( + TSDataType.BOOLEAN, + TSDataType.INT32, + TSDataType.DATE, + TSDataType.INT64, + TSDataType.TIMESTAMP, + TSDataType.FLOAT, + TSDataType.DOUBLE, + TSDataType.TEXT, + TSDataType.STRING, + TSDataType.BLOB); + List measurements = + types.stream().map(dataType -> "__" + dataType.toString()).collect(Collectors.toList()); + + String device3 = "root.test.d3"; + for (int i = 0; i < types.size(); i++) { + session.executeNonQueryStatement( + String.format( + "CREATE TIMESERIES %s.%s WITH DATATYPE=%s", + device3, measurements.get(i), types.get(i))); + } + + try (SessionDataSet dataSet = + session.executeQueryStatement("SHOW TIMESERIES root.test.d3.**")) { + int compressionIndex = dataSet.getColumnNames().indexOf("Compression"); + while (dataSet.hasNext()) { + RowRecord rec = dataSet.next(); + Field compressionField = rec.getFields().get(compressionIndex); + assertEquals("GZIP", compressionField.getStringValue()); + } + } + + for (TSDataType type : types) { + String configName = null; + switch (type) { + case INT32: + case INT64: + case FLOAT: + case DOUBLE: + case TEXT: + case BOOLEAN: + configName = type.name().toLowerCase(); + break; + case STRING: + case BLOB: + configName = "text"; + break; + case DATE: + configName = "int32"; + break; + case TIMESTAMP: + configName = "int64"; + break; + } + session.executeNonQueryStatement( + String.format("SET CONFIGURATION '%s_compressor'='LZ4'", configName)); + } + + String device4 = "root.test.d4"; + for (int i = 0; i < types.size(); i++) { + session.executeNonQueryStatement( + String.format( + "CREATE TIMESERIES %s.%s WITH DATATYPE=%s", + device4, measurements.get(i), types.get(i))); + } + + try (SessionDataSet dataSet = + session.executeQueryStatement("SHOW TIMESERIES root.test.d4.**")) { + int compressionIndex = dataSet.getColumnNames().indexOf("Compression"); + while (dataSet.hasNext()) { + RowRecord rec = dataSet.next(); + Field compressionField = rec.getFields().get(compressionIndex); + assertEquals("LZ4", compressionField.getStringValue()); + } + } + } + + // template + try (ISession session = EnvFactory.getEnv().getSessionConnection()) { + List types = + Arrays.asList( + TSDataType.BOOLEAN, + TSDataType.INT32, + TSDataType.DATE, + TSDataType.INT64, + TSDataType.TIMESTAMP, + TSDataType.FLOAT, + TSDataType.DOUBLE, + TSDataType.TEXT, + TSDataType.STRING, + TSDataType.BLOB); + List measurements = + types.stream().map(dataType -> "__" + dataType.toString()).collect(Collectors.toList()); + List values = + Arrays.asList( + false, + 1, + LocalDate.of(1000, 1, 1), + 1L, + 1L, + 1.0f, + 1.0, + new Binary("1".getBytes(StandardCharsets.UTF_8)), + new Binary("1".getBytes(StandardCharsets.UTF_8)), + new Binary("1".getBytes(StandardCharsets.UTF_8))); + + String createTemplateSql = "CREATE DEVICE TEMPLATE t1 ("; + for (int i = 0; i < types.size(); i++) { + createTemplateSql += measurements.get(i) + " " + types.get(i).name(); + if (i != types.size() - 1) { + createTemplateSql += ","; + } + } + createTemplateSql += ")"; + session.executeNonQueryStatement(createTemplateSql); + + session.executeNonQueryStatement("SET DEVICE TEMPLATE t1 TO root.test.d5"); + String device5 = "root.test.d5"; + session.insertRecord(device5, 0, measurements, types, values); + + try (SessionDataSet dataSet = + session.executeQueryStatement("SHOW TIMESERIES root.test.d5.**")) { + int compressionIndex = dataSet.getColumnNames().indexOf("Compression"); + while (dataSet.hasNext()) { + RowRecord rec = dataSet.next(); + Field compressionField = rec.getFields().get(compressionIndex); + assertEquals("LZ4", compressionField.getStringValue()); + } + } + + for (TSDataType type : types) { + String configName = null; + switch (type) { + case INT32: + case INT64: + case FLOAT: + case DOUBLE: + case TEXT: + case BOOLEAN: + configName = type.name().toLowerCase(); + break; + case STRING: + case BLOB: + configName = "text"; + break; + case DATE: + configName = "int32"; + break; + case TIMESTAMP: + configName = "int64"; + break; + } + session.executeNonQueryStatement( + String.format("SET CONFIGURATION '%s_compressor'='GZIP'", configName)); + } + + createTemplateSql = createTemplateSql.replace("t1", "t2"); + session.executeNonQueryStatement(createTemplateSql); + session.executeNonQueryStatement("SET DEVICE TEMPLATE t2 TO root.test.d6"); + + String device6 = "root.test.d6"; + session.insertRecord(device6, 0, measurements, types, values); + + try (SessionDataSet dataSet = + session.executeQueryStatement("SHOW TIMESERIES root.test.d6.**")) { + int compressionIndex = dataSet.getColumnNames().indexOf("Compression"); + while (dataSet.hasNext()) { + RowRecord rec = dataSet.next(); + Field compressionField = rec.getFields().get(compressionIndex); + assertEquals("GZIP", compressionField.getStringValue()); + } + } + } + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/AbstractSubscriptionIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/AbstractSubscriptionIT.java index a168dbc78ba72..bae161ba24ffa 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/AbstractSubscriptionIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/AbstractSubscriptionIT.java @@ -24,19 +24,20 @@ import org.junit.After; import org.junit.Before; import org.junit.Rule; -import org.junit.rules.TestName; import org.junit.rules.TestRule; public abstract class AbstractSubscriptionIT { - @Rule public TestName testName = new TestName(); + @Rule public DisplayName testName = new DisplayName(); - @Rule public final TestRule skipOnSetUpFailure = new SkipOnSetUpFailure("setUp"); + @Rule + public final TestRule skipOnSetUpAndTearDownFailure = + new SkipOnSetUpAndTearDownFailure("setUp", "tearDown"); @Before - public void setUp() { + public void setUp() throws Exception { // set thread name - Thread.currentThread().setName(String.format("%s - main", testName.getMethodName())); + Thread.currentThread().setName(String.format("%s - main", testName.getDisplayName())); // set thread pools core size SubscriptionExecutorServiceManager.setControlFlowExecutorCorePoolSize(1); @@ -45,5 +46,5 @@ public void setUp() { } @After - public void tearDown() {} + public void tearDown() throws Exception {} } diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/DisplayName.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/DisplayName.java new file mode 100644 index 0000000000000..b1faae54287bd --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/DisplayName.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it; + +import org.junit.rules.TestWatcher; +import org.junit.runner.Description; + +public class DisplayName extends TestWatcher { + + private volatile String displayName; + + public DisplayName() {} + + protected void starting(final Description d) { + this.displayName = d.getDisplayName(); + } + + public String getDisplayName() { + return displayName; + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/IoTDBSubscriptionITConstant.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/IoTDBSubscriptionITConstant.java index 5b8ec39327414..3162139fb665e 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/IoTDBSubscriptionITConstant.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/IoTDBSubscriptionITConstant.java @@ -19,10 +19,15 @@ package org.apache.iotdb.subscription.it; +import org.apache.iotdb.consensus.ConsensusFactory; +import org.apache.iotdb.itbase.env.BaseEnv; +import org.apache.iotdb.session.Session; + import org.awaitility.Awaitility; import org.awaitility.core.ConditionFactory; import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; public class IoTDBSubscriptionITConstant { @@ -40,4 +45,27 @@ public class IoTDBSubscriptionITConstant { public static final long SLEEP_NS = 1_000_000_000L; public static final long POLL_TIMEOUT_MS = 10_000L; + + @FunctionalInterface + public interface WrappedVoidSupplier { + void get() throws Throwable; + } + + public static void AWAIT_WITH_FLUSH(final Session session, final WrappedVoidSupplier assertions) { + AWAIT.untilAsserted( + () -> { + session.executeNonQueryStatement("flush"); + assertions.get(); + }); + } + + public static Consumer FORCE_SCALABLE_SINGLE_NODE_MODE = + env -> + env.getConfig() + .getCommonConfig() + .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS) + .setSchemaReplicationFactor(1) + .setDataReplicationFactor(1); } diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/Retry.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/Retry.java new file mode 100644 index 0000000000000..0d748b019abbc --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/Retry.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** Marks a test method to specify how many times it should be retried (first run + retries). */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.METHOD) +public @interface Retry { + /** Total execution count = 1 (initial run) + number of retries */ + int times() default 3; +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/RetryRule.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/RetryRule.java new file mode 100644 index 0000000000000..5a577c21f36f2 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/RetryRule.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it; + +import org.junit.rules.TestRule; +import org.junit.runner.Description; +import org.junit.runners.model.Statement; + +/** Controls retry logic for test failures based on the {@link Retry} annotation. */ +public class RetryRule implements TestRule { + + @Override + public Statement apply(final Statement base, final Description description) { + // Read the annotation on the method; if absent, do not retry (times = 1) + final Retry retry = description.getAnnotation(Retry.class); + final int times = (retry != null ? retry.times() : 1); + return new RetryStatement(base, description, times); + } + + private static class RetryStatement extends Statement { + private final Statement base; + private final Description description; + private final int times; + + RetryStatement(final Statement base, final Description description, final int times) { + this.base = base; + this.description = description; + this.times = times; + } + + @Override + public void evaluate() throws Throwable { + Throwable lastThrowable; + for (int i = 1; i <= times; i++) { + try { + base.evaluate(); + return; // Return immediately on success + } catch (final Throwable t) { + lastThrowable = t; + System.err.printf( + "[%s] run %d/%d failed: %s%n", + description.getDisplayName(), i, times, t.getMessage()); + if (i == times) { + // If it's the last attempt, and it still fails, throw the exception + throw lastThrowable; + } + // Otherwise, continue to the next retry + } + } + } + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/SkipOnSetUpAndTearDownFailure.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/SkipOnSetUpAndTearDownFailure.java new file mode 100644 index 0000000000000..6c3891612198a --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/SkipOnSetUpAndTearDownFailure.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it; + +import org.checkerframework.checker.nullness.qual.NonNull; +import org.junit.Assume; +import org.junit.rules.TestRule; +import org.junit.runner.Description; +import org.junit.runners.model.MultipleFailureException; +import org.junit.runners.model.Statement; + +import java.lang.reflect.Method; + +public class SkipOnSetUpAndTearDownFailure implements TestRule { + + private final String setUpMethodName; + private final String tearDownMethodName; + + /** + * @param setUpMethodName Should be exactly the same as the method name decorated with @Before. + * @param tearDownMethodName Should be exactly the same as the method name decorated with @After. + */ + public SkipOnSetUpAndTearDownFailure( + @NonNull final String setUpMethodName, @NonNull final String tearDownMethodName) { + this.setUpMethodName = setUpMethodName; + this.tearDownMethodName = tearDownMethodName; + } + + @Override + public Statement apply(final Statement base, final Description description) { + return new Statement() { + @Override + public void evaluate() throws Throwable { + try { + base.evaluate(); + } catch (final Throwable e) { + // Pay attention to the situation of MultipleFailureException... + if ((e instanceof MultipleFailureException + && ((MultipleFailureException) e) + .getFailures().stream().allMatch(this::isExceptionInSetUpOrTearDown)) + || isExceptionInSetUpOrTearDown(e)) { + Assume.assumeTrue( + String.format( + "Skipping test due to setup or tearDown failure for %s#%s", + description.getClassName(), description.getMethodName()), + false); + } + + // Re-throw the exception (which means the test has failed). + throw e; + + // Regardless of the circumstances, the method decorated with @After will always be + // executed. + } + } + + private boolean isExceptionInSetUpOrTearDown(final Throwable e) { + // Trace back the exception stack to determine whether the exception was thrown during the + // setUp or tearDown phase. + for (final StackTraceElement stackTraceElement : e.getStackTrace()) { + if (setUpMethodName.equals(stackTraceElement.getMethodName()) + && description.getClassName().equals(stackTraceElement.getClassName()) + && isMethodAnnotationWithBefore(stackTraceElement.getMethodName())) { + e.printStackTrace(); + return true; + } + + if (tearDownMethodName.equals(stackTraceElement.getMethodName()) + && description.getClassName().equals(stackTraceElement.getClassName()) + && isMethodAnnotationWithAfter(stackTraceElement.getMethodName())) { + e.printStackTrace(); + return true; + } + } + return false; + } + + private boolean isMethodAnnotationWithBefore(final String methodName) { + try { + final Method method = description.getTestClass().getDeclaredMethod(methodName); + return method.isAnnotationPresent(org.junit.Before.class); + } catch (final Throwable ignored) { + return false; + } + } + + private boolean isMethodAnnotationWithAfter(final String methodName) { + try { + final Method method = description.getTestClass().getDeclaredMethod(methodName); + return method.isAnnotationPresent(org.junit.After.class); + } catch (final Throwable ignored) { + return false; + } + } + }; + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/SkipOnSetUpFailure.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/SkipOnSetUpFailure.java deleted file mode 100644 index add7b7c1e2b88..0000000000000 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/SkipOnSetUpFailure.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.subscription.it; - -import org.checkerframework.checker.nullness.qual.NonNull; -import org.junit.AssumptionViolatedException; -import org.junit.rules.TestRule; -import org.junit.runner.Description; -import org.junit.runners.model.Statement; - -import java.lang.reflect.Method; - -public class SkipOnSetUpFailure implements TestRule { - - private final String setUpMethodName; - - /** - * @param setUpMethodName Should be exactly the same as the method name decorated with @Before. - */ - public SkipOnSetUpFailure(@NonNull final String setUpMethodName) { - this.setUpMethodName = setUpMethodName; - } - - @Override - public Statement apply(final Statement base, final Description description) { - return new Statement() { - @Override - public void evaluate() throws Throwable { - try { - base.evaluate(); - } catch (final Throwable e) { - // Trace back the exception stack to determine whether the exception was thrown during the - // setUp phase. - for (final StackTraceElement stackTraceElement : e.getStackTrace()) { - if (setUpMethodName.equals(stackTraceElement.getMethodName()) - && description.getClassName().equals(stackTraceElement.getClassName()) - && isMethodAnnotationWithBefore(stackTraceElement.getMethodName())) { - e.printStackTrace(); - // Skip this test. - throw new AssumptionViolatedException( - String.format( - "Skipping test due to setup failure for %s#%s", - description.getClassName(), description.getMethodName())); - } - } - - // Re-throw the exception (which means the test has failed). - throw e; - - // Regardless of the circumstances, the method decorated with @After will always be - // executed. - } - } - - private boolean isMethodAnnotationWithBefore(final String methodName) { - try { - final Method method = description.getTestClass().getDeclaredMethod(methodName); - return method.isAnnotationPresent(org.junit.Before.class); - } catch (final Throwable ignored) { - return false; - } - } - }; - } -} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/cluster/IoTDBSubscriptionRestartIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/cluster/IoTDBSubscriptionRestartIT.java index 41dd654383d66..21490c2687668 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/cluster/IoTDBSubscriptionRestartIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/cluster/IoTDBSubscriptionRestartIT.java @@ -39,6 +39,7 @@ import org.apache.iotdb.subscription.it.AbstractSubscriptionIT; import org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant; +import org.apache.tsfile.utils.Pair; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -48,9 +49,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.LockSupport; @@ -65,9 +68,13 @@ public class IoTDBSubscriptionRestartIT extends AbstractSubscriptionIT { @Override @Before - public void setUp() { + public void setUp() throws Exception { super.setUp(); + // enable subscription + EnvFactory.getEnv().getConfig().getCommonConfig().setSubscriptionEnabled(true); + + // set cluster env EnvFactory.getEnv() .getConfig() .getCommonConfig() @@ -75,17 +82,19 @@ public void setUp() { .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS) .setSchemaReplicationFactor(3) - .setDataReplicationFactor(2); + .setDataReplicationFactor(2) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); EnvFactory.getEnv().initClusterEnvironment(3, 3); } @Override @After - public void tearDown() { - super.tearDown(); - + public void tearDown() throws Exception { EnvFactory.getEnv().cleanClusterEnvironment(); + + super.tearDown(); } @Test @@ -104,9 +113,10 @@ public void testSubscriptionAfterRestartCluster() throws Exception { } // Subscription - final SubscriptionPullConsumer consumer; + final SubscriptionPullConsumer consumer1; + final SubscriptionPullConsumer consumer2; try { - consumer = + consumer1 = new SubscriptionPullConsumer.Builder() .host(host) .port(port) @@ -116,8 +126,21 @@ public void testSubscriptionAfterRestartCluster() throws Exception { .heartbeatIntervalMs(1000) // narrow heartbeat interval .endpointsSyncIntervalMs(5000) // narrow endpoints sync interval .buildPullConsumer(); - consumer.open(); - consumer.subscribe(topicName); + consumer1.open(); + consumer1.subscribe(topicName); + + consumer2 = + new SubscriptionPullConsumer.Builder() + .host(host) + .port(port) + .consumerId("c2") + .consumerGroupId("cg2") + .autoCommit(true) + .heartbeatIntervalMs(1000) // narrow heartbeat interval + .endpointsSyncIntervalMs(5000) // narrow endpoints sync interval + .buildPullConsumer(); + consumer2.open(); + consumer2.subscribe(topicName); } catch (final Exception e) { e.printStackTrace(); fail(e.getMessage()); @@ -145,7 +168,7 @@ public void testSubscriptionAfterRestartCluster() throws Exception { client.showSubscription(new TShowSubscriptionReq()); Assert.assertEquals(RpcUtils.SUCCESS_STATUS.getCode(), showSubscriptionResp.status.getCode()); Assert.assertNotNull(showSubscriptionResp.subscriptionInfoList); - Assert.assertEquals(1, showSubscriptionResp.subscriptionInfoList.size()); + Assert.assertEquals(2, showSubscriptionResp.subscriptionInfoList.size()); } // Insert some historical data @@ -162,17 +185,18 @@ public void testSubscriptionAfterRestartCluster() throws Exception { } // Subscription again - final Map timestamps = new HashMap<>(); + final Map, Long> timestamps = new ConcurrentHashMap<>(); final AtomicBoolean isClosed = new AtomicBoolean(false); - final Thread thread = + final List threads = new ArrayList<>(); + threads.add( new Thread( () -> { - try (final SubscriptionPullConsumer consumerRef = consumer) { + try (final SubscriptionPullConsumer consumerRef1 = consumer1) { while (!isClosed.get()) { LockSupport.parkNanos(IoTDBSubscriptionITConstant.SLEEP_NS); // wait some time final List messages; try { - messages = consumer.poll(IoTDBSubscriptionITConstant.POLL_TIMEOUT_MS); + messages = consumerRef1.poll(IoTDBSubscriptionITConstant.POLL_TIMEOUT_MS); } catch (final Exception e) { e.printStackTrace(); // Avoid failure @@ -183,13 +207,13 @@ public void testSubscriptionAfterRestartCluster() throws Exception { message.getSessionDataSetsHandler()) { while (dataSet.hasNext()) { final long timestamp = dataSet.next().getTimestamp(); - timestamps.put(timestamp, timestamp); + timestamps.put(new Pair<>(timestamp, consumerRef1.toString()), timestamp); } } } // Auto commit } - consumerRef.unsubscribe(topicName); + consumerRef1.unsubscribe(topicName); } catch (final Exception e) { e.printStackTrace(); // Avoid failure @@ -197,19 +221,57 @@ public void testSubscriptionAfterRestartCluster() throws Exception { LOGGER.info("consumer exiting..."); } }, - String.format("%s - %s", testName.getMethodName(), consumer)); - thread.start(); + String.format("%s - %s", testName.getDisplayName(), consumer1))); + threads.add( + new Thread( + () -> { + try (final SubscriptionPullConsumer consumerRef2 = consumer2) { + while (!isClosed.get()) { + LockSupport.parkNanos(IoTDBSubscriptionITConstant.SLEEP_NS); // wait some time + final List messages; + try { + messages = consumerRef2.poll(IoTDBSubscriptionITConstant.POLL_TIMEOUT_MS); + } catch (final Exception e) { + e.printStackTrace(); + // Avoid failure + continue; + } + for (final SubscriptionMessage message : messages) { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + while (dataSet.hasNext()) { + final long timestamp = dataSet.next().getTimestamp(); + timestamps.put(new Pair<>(timestamp, consumerRef2.toString()), timestamp); + } + } + } + // Auto commit + } + consumerRef2.unsubscribe(topicName); + } catch (final Exception e) { + e.printStackTrace(); + // Avoid failure + } finally { + LOGGER.info("consumer exiting..."); + } + }, + String.format("%s - %s", testName.getDisplayName(), consumer2))); + for (final Thread thread : threads) { + thread.start(); + } // Check timestamps size try { // Keep retrying if there are execution failures - AWAIT.untilAsserted(() -> Assert.assertEquals(100, timestamps.size())); + AWAIT.untilAsserted(() -> Assert.assertEquals(200, timestamps.size())); } catch (final Exception e) { e.printStackTrace(); fail(e.getMessage()); } finally { isClosed.set(true); - thread.join(); + for (final Thread thread : threads) { + thread.join(); + } } } @@ -310,7 +372,7 @@ public void testSubscriptionAfterRestartDataNode() throws Exception { LOGGER.info("consumer exiting..."); } }, - String.format("%s - %s", testName.getMethodName(), consumer)); + String.format("%s - %s", testName.getDisplayName(), consumer)); thread.start(); // Start DN 1 & DN 2 @@ -437,7 +499,7 @@ public void testSubscriptionWhenConfigNodeLeaderChange() throws Exception { LOGGER.info("consumer exiting..."); } }, - String.format("%s - %s", testName.getMethodName(), consumer)); + String.format("%s - %s", testName.getDisplayName(), consumer)); thread.start(); // Shutdown leader CN diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/AbstractSubscriptionDualIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/AbstractSubscriptionDualIT.java index fb25cdc3a6c09..873334ea13909 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/AbstractSubscriptionDualIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/AbstractSubscriptionDualIT.java @@ -26,14 +26,14 @@ import org.junit.After; import org.junit.Before; -abstract class AbstractSubscriptionDualIT extends AbstractSubscriptionIT { +public abstract class AbstractSubscriptionDualIT extends AbstractSubscriptionIT { protected BaseEnv senderEnv; protected BaseEnv receiverEnv; @Override @Before - public void setUp() { + public void setUp() throws Exception { super.setUp(); MultiEnvFactory.createEnv(2); @@ -47,21 +47,36 @@ public void setUp() { } protected void setUpConfig() { + // enable subscription + senderEnv.getConfig().getCommonConfig().setSubscriptionEnabled(true); + receiverEnv.getConfig().getCommonConfig().setSubscriptionEnabled(true); + // enable auto create schema senderEnv.getConfig().getCommonConfig().setAutoCreateSchemaEnabled(true); receiverEnv.getConfig().getCommonConfig().setAutoCreateSchemaEnabled(true); // 10 min, assert that the operations will not time out - senderEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); - receiverEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); + senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + receiverEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + + senderEnv + .getConfig() + .getCommonConfig() + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); + receiverEnv + .getConfig() + .getCommonConfig() + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); } @Override @After - public void tearDown() { - super.tearDown(); - + public void tearDown() throws Exception { senderEnv.cleanClusterEnvironment(); receiverEnv.cleanClusterEnvironment(); + + super.tearDown(); } } diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionConsumerGroupIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionConsumerGroupIT.java index eccfc634e6b9e..63ec58d134d3f 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionConsumerGroupIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionConsumerGroupIT.java @@ -26,7 +26,7 @@ import org.apache.iotdb.isession.ISession; import org.apache.iotdb.it.env.cluster.node.DataNodeWrapper; import org.apache.iotdb.it.framework.IoTDBTestRunner; -import org.apache.iotdb.itbase.category.MultiClusterIT2Subscription; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionArchVerification; import org.apache.iotdb.rpc.TSStatusCode; import org.apache.iotdb.rpc.subscription.config.TopicConstant; import org.apache.iotdb.session.subscription.SubscriptionSession; @@ -69,7 +69,7 @@ import static org.junit.Assert.fail; @RunWith(IoTDBTestRunner.class) -@Category({MultiClusterIT2Subscription.class}) +@Category({MultiClusterIT2SubscriptionArchVerification.class}) public class IoTDBSubscriptionConsumerGroupIT extends AbstractSubscriptionDualIT { // Test dimensions: @@ -117,13 +117,24 @@ static final class SubscriptionInfo { protected void setUpConfig() { super.setUpConfig(); + senderEnv.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false); + // Enable air gap receiver - receiverEnv.getConfig().getCommonConfig().setPipeAirGapReceiverEnabled(true); + receiverEnv + .getConfig() + .getCommonConfig() + .setPipeMemoryManagementEnabled(false) + .setPipeAirGapReceiverEnabled(true); + receiverEnv + .getConfig() + .getCommonConfig() + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); } @Override @Before - public void setUp() { + public void setUp() throws Exception { super.setUp(); // Setup connector attributes @@ -1036,7 +1047,7 @@ private void pollMessagesAndCheck( LOGGER.info("consumer {} exiting...", consumers.get(index)); } }, - String.format("%s - %s", testName.getMethodName(), consumers.get(index).toString())); + String.format("%s - %s", testName.getDisplayName(), consumers.get(index).toString())); t.start(); threads.add(t); } @@ -1058,7 +1069,7 @@ private void pollMessagesAndCheck( for (final DataNodeWrapper wrapper : senderEnv.getDataNodeWrapperList()) { // wrapper.executeJstack(); wrapper.executeJstack( - String.format("%s_%s", testName.getMethodName(), currentTime[0])); + String.format("%s_%s", testName.getDisplayName(), currentTime[0])); } currentTime[0] = System.currentTimeMillis(); } diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTimePrecisionIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTimePrecisionIT.java index 39a9f2225f6bb..9a5886436992b 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTimePrecisionIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTimePrecisionIT.java @@ -22,7 +22,7 @@ import org.apache.iotdb.db.it.utils.TestUtils; import org.apache.iotdb.isession.ISession; import org.apache.iotdb.it.framework.IoTDBTestRunner; -import org.apache.iotdb.itbase.category.MultiClusterIT2Subscription; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionArchVerification; import org.apache.iotdb.rpc.subscription.config.TopicConstant; import org.apache.iotdb.session.subscription.SubscriptionSession; import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; @@ -30,6 +30,7 @@ import org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant; import org.apache.tsfile.write.record.Tablet; +import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; @@ -49,19 +50,35 @@ import static org.junit.Assert.fail; @RunWith(IoTDBTestRunner.class) -@Category({MultiClusterIT2Subscription.class}) +@Category({MultiClusterIT2SubscriptionArchVerification.class}) public class IoTDBSubscriptionTimePrecisionIT extends AbstractSubscriptionDualIT { private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBSubscriptionTimePrecisionIT.class); + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + } + @Override protected void setUpConfig() { super.setUpConfig(); // Set timestamp precision to nanosecond senderEnv.getConfig().getCommonConfig().setTimestampPrecision("ns"); + senderEnv + .getConfig() + .getCommonConfig() + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); receiverEnv.getConfig().getCommonConfig().setTimestampPrecision("ns"); + receiverEnv + .getConfig() + .getCommonConfig() + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); } @Test @@ -160,7 +177,7 @@ public void testTopicTimePrecision() throws Exception { LOGGER.info("consumer exiting..."); } }, - String.format("%s - consumer", testName.getMethodName())); + String.format("%s - consumer", testName.getDisplayName())); thread.start(); // Check data on receiver diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTopicIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTopicIT.java index ce8b46f2aead8..e701df2b42d5c 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTopicIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTopicIT.java @@ -27,7 +27,7 @@ import org.apache.iotdb.db.it.utils.TestUtils; import org.apache.iotdb.isession.ISession; import org.apache.iotdb.it.framework.IoTDBTestRunner; -import org.apache.iotdb.itbase.category.MultiClusterIT2Subscription; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionArchVerification; import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.subscription.config.TopicConstant; import org.apache.iotdb.session.subscription.SubscriptionSession; @@ -44,6 +44,7 @@ import org.apache.tsfile.read.query.dataset.QueryDataSet; import org.apache.tsfile.write.record.Tablet; import org.junit.Assert; +import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; @@ -68,11 +69,17 @@ import static org.junit.Assert.fail; @RunWith(IoTDBTestRunner.class) -@Category({MultiClusterIT2Subscription.class}) +@Category({MultiClusterIT2SubscriptionArchVerification.class}) public class IoTDBSubscriptionTopicIT extends AbstractSubscriptionDualIT { private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBSubscriptionTopicIT.class); + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + } + @Override protected void setUpConfig() { super.setUpConfig(); @@ -84,6 +91,17 @@ protected void setUpConfig() { .setPipeHeartbeatIntervalSecondsForCollectingPipeMeta(30); senderEnv.getConfig().getCommonConfig().setPipeMetaSyncerInitialSyncDelayMinutes(1); senderEnv.getConfig().getCommonConfig().setPipeMetaSyncerSyncIntervalMinutes(1); + senderEnv + .getConfig() + .getCommonConfig() + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); + + receiverEnv + .getConfig() + .getCommonConfig() + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); } @Test @@ -162,7 +180,7 @@ private void testTopicWithPathTemplate(final String topicFormat) throws Exceptio LOGGER.info("consumer exiting..."); } }, - String.format("%s - consumer", testName.getMethodName())); + String.format("%s - consumer", testName.getDisplayName())); thread.start(); // Check data on receiver @@ -263,7 +281,7 @@ private void testTopicWithTimeTemplate(final String topicFormat) throws Exceptio LOGGER.info("consumer exiting..."); } }, - String.format("%s - consumer", testName.getMethodName())); + String.format("%s - consumer", testName.getDisplayName())); thread.start(); // Check data on receiver @@ -360,7 +378,7 @@ private void testTopicWithProcessorTemplate(final String topicFormat) throws Exc LOGGER.info("consumer exiting..."); } }, - String.format("%s - consumer", testName.getMethodName())); + String.format("%s - consumer", testName.getDisplayName())); thread.start(); // Check data on receiver @@ -484,7 +502,7 @@ public void testTopicNameWithBackQuote() throws Exception { LOGGER.info("consumer exiting..."); } }, - String.format("%s - consumer", testName.getMethodName())); + String.format("%s - consumer", testName.getDisplayName())); thread.start(); // Check data on receiver @@ -658,7 +676,7 @@ private void testTopicWithSnapshotModeTemplate(final String topicFormat) throws LOGGER.info("consumer exiting..."); } }, - String.format("%s - consumer", testName.getMethodName())); + String.format("%s - consumer", testName.getDisplayName())); thread.start(); try { @@ -769,7 +787,7 @@ private void testTopicWithLooseRangeTemplate(final String topicFormat) throws Ex LOGGER.info("consumer exiting..."); } }, - String.format("%s - consumer", testName.getMethodName()))); + String.format("%s - consumer", testName.getDisplayName()))); // Insert some realtime data on sender threads.add( @@ -792,7 +810,7 @@ private void testTopicWithLooseRangeTemplate(final String topicFormat) throws Ex } dataPrepared.set(true); }, - String.format("%s - data inserter", testName.getMethodName()))); + String.format("%s - data inserter", testName.getDisplayName()))); for (final Thread thread : threads) { thread.start(); @@ -820,6 +838,78 @@ private void testTopicWithLooseRangeTemplate(final String topicFormat) throws Ex } } + @Test + public void testSnapshotModeWithEmptyData() throws Exception { + // Create topic + final String topicName = "topic11"; + final String host = senderEnv.getIP(); + final int port = Integer.parseInt(senderEnv.getPort()); + try (final SubscriptionSession session = new SubscriptionSession(host, port)) { + session.open(); + final Properties config = new Properties(); + config.put(TopicConstant.MODE_KEY, TopicConstant.MODE_SNAPSHOT_VALUE); + session.createTopic(topicName, config); + } catch (final Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + assertTopicCount(1); + + // Subscription + final Thread thread = + new Thread( + () -> { + try (final SubscriptionPullConsumer consumer = + new SubscriptionPullConsumer.Builder() + .host(host) + .port(port) + .consumerId("c1") + .consumerGroupId("cg1") + .autoCommit(true) + .buildPullConsumer()) { + consumer.open(); + consumer.subscribe(topicName); + + while (!consumer.allTopicMessagesHaveBeenConsumed()) { + LockSupport.parkNanos(IoTDBSubscriptionITConstant.SLEEP_NS); // wait some time + consumer.poll(IoTDBSubscriptionITConstant.POLL_TIMEOUT_MS); // poll and ignore + } + + // Exiting the loop represents passing the awaitility test, at this point the result + // of 'show subscription' is empty, so there is no need to explicitly unsubscribe. + } catch (final Exception e) { + e.printStackTrace(); + // Avoid failure + } finally { + LOGGER.info("consumer exiting..."); + } + }, + String.format("%s - consumer", testName.getDisplayName())); + thread.start(); + + try { + // Keep retrying if there are execution failures + AWAIT.untilAsserted( + () -> { + // Check empty subscription + try (final SyncConfigNodeIServiceClient client = + (SyncConfigNodeIServiceClient) senderEnv.getLeaderConfigNodeConnection()) { + final TShowSubscriptionResp showSubscriptionResp = + client.showSubscription(new TShowSubscriptionReq()); + Assert.assertEquals( + RpcUtils.SUCCESS_STATUS.getCode(), showSubscriptionResp.status.getCode()); + Assert.assertNotNull(showSubscriptionResp.subscriptionInfoList); + Assert.assertEquals(0, showSubscriptionResp.subscriptionInfoList.size()); + } + }); + } catch (final Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } finally { + thread.join(); + } + } + /////////////////////////////// utility /////////////////////////////// private void assertTopicCount(final int count) throws Exception { diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/AbstractSubscriptionLocalIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/AbstractSubscriptionLocalIT.java index bb248f745d4ed..fe667480c86bf 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/AbstractSubscriptionLocalIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/AbstractSubscriptionLocalIT.java @@ -25,21 +25,29 @@ import org.junit.After; import org.junit.Before; -abstract class AbstractSubscriptionLocalIT extends AbstractSubscriptionIT { +public abstract class AbstractSubscriptionLocalIT extends AbstractSubscriptionIT { @Override @Before - public void setUp() { + public void setUp() throws Exception { super.setUp(); + // enable subscription + EnvFactory.getEnv() + .getConfig() + .getCommonConfig() + .setSubscriptionEnabled(true) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); + EnvFactory.getEnv().initClusterEnvironment(); } @Override @After - public void tearDown() { - super.tearDown(); - + public void tearDown() throws Exception { EnvFactory.getEnv().cleanClusterEnvironment(); + + super.tearDown(); } } diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/IoTDBSubscriptionBasicIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/IoTDBSubscriptionBasicIT.java index 68287ab66193e..a6750b0ff1907 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/IoTDBSubscriptionBasicIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/IoTDBSubscriptionBasicIT.java @@ -19,10 +19,14 @@ package org.apache.iotdb.subscription.it.local; +import org.apache.iotdb.commons.client.sync.SyncConfigNodeIServiceClient; +import org.apache.iotdb.confignode.rpc.thrift.TShowSubscriptionReq; +import org.apache.iotdb.confignode.rpc.thrift.TShowSubscriptionResp; import org.apache.iotdb.isession.ISession; import org.apache.iotdb.it.env.EnvFactory; import org.apache.iotdb.it.framework.IoTDBTestRunner; import org.apache.iotdb.itbase.category.LocalStandaloneIT; +import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.subscription.config.TopicConstant; import org.apache.iotdb.session.subscription.SubscriptionSession; import org.apache.iotdb.session.subscription.consumer.AckStrategy; @@ -30,6 +34,7 @@ import org.apache.iotdb.session.subscription.consumer.ConsumeResult; import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.model.Subscription; import org.apache.iotdb.session.subscription.payload.SubscriptionMessage; import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; import org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant; @@ -39,6 +44,7 @@ import org.apache.tsfile.read.expression.QueryExpression; import org.apache.tsfile.read.query.dataset.QueryDataSet; import org.junit.Assert; +import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; @@ -50,6 +56,7 @@ import java.util.Collections; import java.util.List; import java.util.Properties; +import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; @@ -65,6 +72,12 @@ public class IoTDBSubscriptionBasicIT extends AbstractSubscriptionLocalIT { private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBSubscriptionBasicIT.class); + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + } + @Test public void testBasicPullConsumerWithCommitAsync() throws Exception { // Insert some historical data @@ -162,7 +175,7 @@ public void onFailure(final Throwable e) { LOGGER.info("consumer exiting..."); } }, - String.format("%s - consumer", testName.getMethodName())); + String.format("%s - consumer", testName.getDisplayName())); thread.start(); // Check row count @@ -391,7 +404,7 @@ public void testPollUnsubscribedTopics() throws Exception { LOGGER.info("consumer exiting..."); } }, - String.format("%s - consumer", testName.getMethodName())); + String.format("%s - consumer", testName.getDisplayName())); thread.start(); // Check row count @@ -545,4 +558,164 @@ public void testDataSetDeduplication() { fail(e.getMessage()); } } + + // same to + // org.apache.iotdb.subscription.it.local.IoTDBSubscriptionBasicIT.testDataSetDeduplication, + // but missing consumer id & consumer group id when building consumer + @Test + public void testMissingConsumerId() { + // Insert some historical data + try (final ISession session = EnvFactory.getEnv().getSessionConnection()) { + session.createDatabase("root.db"); + for (int i = 0; i < 100; ++i) { + session.executeNonQueryStatement( + String.format("insert into root.db.d1(time, s1, s2) values (%s, 1, 2)", i)); + session.executeNonQueryStatement( + String.format("insert into root.db.d2(time, s1, s2) values (%s, 3, 4)", i)); + } + // DO NOT FLUSH HERE + } catch (final Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + + // Create topic + final String topicName = "topic7"; + final String host = EnvFactory.getEnv().getIP(); + final int port = Integer.parseInt(EnvFactory.getEnv().getPort()); + try (final SubscriptionSession session = new SubscriptionSession(host, port)) { + session.open(); + final Properties config = new Properties(); + config.put(TopicConstant.PATTERN_KEY, "root.db.d1.s1"); + session.createTopic(topicName, config); + } catch (final Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + + // Subscription + final AtomicInteger rowCount = new AtomicInteger(); + try (final SubscriptionPushConsumer consumer = + new SubscriptionPushConsumer.Builder() + .host(host) + .port(port) + .ackStrategy(AckStrategy.AFTER_CONSUME) + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + while (dataSet.hasNext()) { + dataSet.next(); + rowCount.addAndGet(1); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer()) { + + consumer.open(); + consumer.subscribe(topicName); + + AWAIT.untilAsserted( + () -> { + Assert.assertEquals(100, rowCount.get()); + Assert.assertNotNull(consumer.getConsumerId()); + Assert.assertNotNull(consumer.getConsumerGroupId()); + }); + } catch (final Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + } + + @Test + public void testDropSubscriptionBySession() throws Exception { + // Insert some historical data + try (final ISession session = EnvFactory.getEnv().getSessionConnection()) { + for (int i = 0; i < 100; ++i) { + session.executeNonQueryStatement( + String.format("insert into root.db.d1(time, s1) values (%s, 1)", i)); + } + session.executeNonQueryStatement("flush"); + } catch (final Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + + // Create topic + final String topicName = "topic8"; + final String host = EnvFactory.getEnv().getIP(); + final int port = Integer.parseInt(EnvFactory.getEnv().getPort()); + try (final SubscriptionSession session = new SubscriptionSession(host, port)) { + session.open(); + session.createTopic(topicName); + } catch (final Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + + // Subscription + final Thread thread = + new Thread( + () -> { + try (final SubscriptionPullConsumer consumer = + new SubscriptionPullConsumer.Builder() + .host(host) + .port(port) + .consumerId("c1") + .consumerGroupId("cg1") + .autoCommit(true) + .buildPullConsumer()) { + consumer.open(); + consumer.subscribe(topicName); + + while (!consumer.allTopicMessagesHaveBeenConsumed()) { + LockSupport.parkNanos(IoTDBSubscriptionITConstant.SLEEP_NS); // wait some time + consumer.poll(IoTDBSubscriptionITConstant.POLL_TIMEOUT_MS); // poll and ignore + } + } catch (final Exception e) { + e.printStackTrace(); + // Avoid failure + } finally { + LOGGER.info("consumer exiting..."); + } + }, + String.format("%s - consumer", testName.getDisplayName())); + thread.start(); + + // Drop Subscription + LockSupport.parkNanos(5_000_000_000L); // wait some time + try (final SubscriptionSession session = new SubscriptionSession(host, port)) { + session.open(); + final Set subscriptions = session.getSubscriptions(topicName); + Assert.assertEquals(1, subscriptions.size()); + session.dropSubscription(subscriptions.iterator().next().getSubscriptionId()); + } catch (final Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + + try { + // Keep retrying if there are execution failures + AWAIT.untilAsserted( + () -> { + // Check empty subscription + try (final SyncConfigNodeIServiceClient client = + (SyncConfigNodeIServiceClient) + EnvFactory.getEnv().getLeaderConfigNodeConnection()) { + final TShowSubscriptionResp showSubscriptionResp = + client.showSubscription(new TShowSubscriptionReq()); + Assert.assertEquals( + RpcUtils.SUCCESS_STATUS.getCode(), showSubscriptionResp.status.getCode()); + Assert.assertNotNull(showSubscriptionResp.subscriptionInfoList); + Assert.assertEquals(0, showSubscriptionResp.subscriptionInfoList.size()); + } + }); + } catch (final Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } finally { + thread.join(); + } + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/IoTDBSubscriptionDataTypeIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/IoTDBSubscriptionDataTypeIT.java index 413540a89c1f1..d102b2da081b9 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/IoTDBSubscriptionDataTypeIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/IoTDBSubscriptionDataTypeIT.java @@ -41,6 +41,7 @@ import org.apache.tsfile.utils.Binary; import org.apache.tsfile.write.record.Tablet; import org.junit.Assert; +import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; @@ -64,6 +65,12 @@ public class IoTDBSubscriptionDataTypeIT extends AbstractSubscriptionLocalIT { private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBSubscriptionDataTypeIT.class); + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + } + // ----------------------------- // // SessionDataSetsHandler format // // ----------------------------- // @@ -326,7 +333,7 @@ private void testPullConsumerSubscribeDataTemplate( LOGGER.info("consumer exiting..."); } }, - String.format("%s - consumer", testName.getMethodName())); + String.format("%s - consumer", testName.getDisplayName())); thread.start(); // Check row count diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/IoTDBSubscriptionIdempotentIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/IoTDBSubscriptionIdempotentIT.java index 85fff437f18e6..73b866ce6739b 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/IoTDBSubscriptionIdempotentIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/IoTDBSubscriptionIdempotentIT.java @@ -26,6 +26,7 @@ import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; import org.junit.Assert; +import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; @@ -40,6 +41,12 @@ public class IoTDBSubscriptionIdempotentIT extends AbstractSubscriptionLocalIT { private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBSubscriptionIdempotentIT.class); + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + } + @Test public void testSubscribeOrUnsubscribeNonExistedTopicTest() { final String host = EnvFactory.getEnv().getIP(); diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/IoTDBSubscriptionTopicIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/IoTDBSubscriptionTopicIT.java new file mode 100644 index 0000000000000..270bf05f9102c --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/IoTDBSubscriptionTopicIT.java @@ -0,0 +1,185 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.local; + +import org.apache.iotdb.it.env.EnvFactory; +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.LocalStandaloneIT; +import org.apache.iotdb.session.subscription.SubscriptionSession; +import org.apache.iotdb.session.subscription.model.Topic; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.util.Optional; +import java.util.Properties; + +import static org.junit.Assert.fail; + +@RunWith(IoTDBTestRunner.class) +@Category({LocalStandaloneIT.class}) +public class IoTDBSubscriptionTopicIT extends AbstractSubscriptionLocalIT { + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + } + + @Test + public void testBasicCreateTopic() { + final String host = EnvFactory.getEnv().getIP(); + final int port = Integer.parseInt(EnvFactory.getEnv().getPort()); + + try (final SubscriptionSession session = new SubscriptionSession(host, port)) { + session.open(); + // create topic + String topicName = "topic1"; + session.createTopic(topicName); + Assert.assertTrue(session.getTopic(topicName).isPresent()); + Assert.assertEquals(topicName, session.getTopic(topicName).get().getTopicName()); + + // create topic + topicName = "topic2"; + Properties properties = new Properties(); + properties.put("path", "root.**"); + properties.put("start-time", "2023-01-01"); + properties.put("end-time", "2023-12-31"); + properties.put("format", "TsFileHandler"); + session.createTopic(topicName, properties); + Optional topic = session.getTopic(topicName); + Assert.assertTrue(topic.isPresent()); + Assert.assertEquals(topicName, topic.get().getTopicName()); + // verify topic parameters + Assert.assertTrue(topic.get().getTopicAttributes().contains("path=root.**")); + Assert.assertTrue(topic.get().getTopicAttributes().contains("start-time=2023-01-01")); + Assert.assertTrue(topic.get().getTopicAttributes().contains("end-time=2023-12-31")); + Assert.assertTrue(topic.get().getTopicAttributes().contains("format=TsFileHandler")); + + } catch (final Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + } + + @Test + public void testBasicCreateTopicIfNotExists() { + final String host = EnvFactory.getEnv().getIP(); + final int port = Integer.parseInt(EnvFactory.getEnv().getPort()); + + try (final SubscriptionSession session = new SubscriptionSession(host, port)) { + session.open(); + // create topic if not exits + String topicName = "topic3"; + session.createTopicIfNotExists(topicName); + Optional topic = session.getTopic(topicName); + Assert.assertTrue(topic.isPresent()); + Assert.assertEquals(topicName, topic.get().getTopicName()); + + // create topic if not exits + session.createTopicIfNotExists(topicName); + topic = session.getTopic(topicName); + Assert.assertTrue(topic.isPresent()); + Assert.assertEquals(topicName, topic.get().getTopicName()); + + // create topic if not exits + topicName = "topic4"; + Properties properties = new Properties(); + properties.put("path", "root.**"); + properties.put("start-time", "2023-01-01"); + properties.put("end-time", "2023-12-31"); + properties.put("format", "TsFileHandler"); + session.createTopicIfNotExists(topicName, properties); + topic = session.getTopic(topicName); + Assert.assertTrue(topic.isPresent()); + Assert.assertEquals(topicName, topic.get().getTopicName()); + // verify topic parameters + Assert.assertTrue(topic.get().getTopicAttributes().contains("path=root.**")); + Assert.assertTrue(topic.get().getTopicAttributes().contains("start-time=2023-01-01")); + Assert.assertTrue(topic.get().getTopicAttributes().contains("end-time=2023-12-31")); + Assert.assertTrue(topic.get().getTopicAttributes().contains("format=TsFileHandler")); + + // create topic if not exits + properties.put("start-time", "2023-01-02"); + session.createTopicIfNotExists(topicName, properties); + topic = session.getTopic(topicName); + Assert.assertTrue(topic.isPresent()); + Assert.assertEquals(topicName, topic.get().getTopicName()); + // verify Topic Parameters + Assert.assertTrue(topic.get().getTopicAttributes().contains("path=root.**")); + Assert.assertTrue(topic.get().getTopicAttributes().contains("start-time=2023-01-01")); + Assert.assertFalse(topic.get().getTopicAttributes().contains("start-time=2023-01-02")); + Assert.assertTrue(topic.get().getTopicAttributes().contains("end-time=2023-12-31")); + Assert.assertTrue(topic.get().getTopicAttributes().contains("format=TsFileHandler")); + } catch (final Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + } + + @Test + public void testBasicDropTopic() { + final String host = EnvFactory.getEnv().getIP(); + final int port = Integer.parseInt(EnvFactory.getEnv().getPort()); + + try (final SubscriptionSession session = new SubscriptionSession(host, port)) { + session.open(); + // create topic + String topicName = "topic5"; + session.createTopic(topicName); + + // drop topic + session.dropTopic(topicName); + Assert.assertFalse(session.getTopic(topicName).isPresent()); + + } catch (final Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + } + + @Test + public void testBasicDropTopicIfExists() { + final String host = EnvFactory.getEnv().getIP(); + final int port = Integer.parseInt(EnvFactory.getEnv().getPort()); + + try (final SubscriptionSession session = new SubscriptionSession(host, port)) { + session.open(); + // create topic + String topicName = "topic6"; + session.createTopic(topicName); + + // drop topic if exists + session.dropTopicIfExists(topicName); + Assert.assertFalse(session.getTopic(topicName).isPresent()); + + // drop topic if exists + session.dropTopicIfExists(topicName); + Assert.assertFalse(session.getTopic(topicName).isPresent()); + + } catch (final Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/AbstractSubscriptionTripleIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/AbstractSubscriptionTripleIT.java index d0d23a0407f0e..8f43e934c2936 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/AbstractSubscriptionTripleIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/AbstractSubscriptionTripleIT.java @@ -27,7 +27,7 @@ import org.junit.After; import org.junit.Before; -abstract class AbstractSubscriptionTripleIT extends AbstractSubscriptionIT { +public abstract class AbstractSubscriptionTripleIT extends AbstractSubscriptionIT { protected BaseEnv sender; protected BaseEnv receiver1; @@ -35,7 +35,7 @@ abstract class AbstractSubscriptionTripleIT extends AbstractSubscriptionIT { @Override @Before - public void setUp() { + public void setUp() throws Exception { super.setUp(); // increase the number of threads to speed up testing @@ -55,24 +55,48 @@ public void setUp() { } protected void setUpConfig() { + // enable subscription + sender.getConfig().getCommonConfig().setSubscriptionEnabled(true); + receiver1.getConfig().getCommonConfig().setSubscriptionEnabled(true); + receiver2.getConfig().getCommonConfig().setSubscriptionEnabled(true); + // enable auto create schema sender.getConfig().getCommonConfig().setAutoCreateSchemaEnabled(true); receiver1.getConfig().getCommonConfig().setAutoCreateSchemaEnabled(true); receiver2.getConfig().getCommonConfig().setAutoCreateSchemaEnabled(true); // 10 min, assert that the operations will not time out - sender.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); - receiver1.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); - receiver2.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); + sender.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + receiver1.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + receiver2.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + + // reduce tsfile batch memory usage + sender + .getConfig() + .getCommonConfig() + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false) + .setSubscriptionPrefetchTsFileBatchMaxDelayInMs(500) + .setSubscriptionPrefetchTsFileBatchMaxSizeInBytes(32 * 1024); + receiver1 + .getConfig() + .getCommonConfig() + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); + receiver2 + .getConfig() + .getCommonConfig() + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); } @Override @After - public void tearDown() { - super.tearDown(); - + public void tearDown() throws Exception { sender.cleanClusterEnvironment(); receiver1.cleanClusterEnvironment(); receiver2.cleanClusterEnvironment(); + + super.tearDown(); } } diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/IoTDBSubscriptionSharingIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/IoTDBSubscriptionSharingIT.java index 0dc6edf1548c6..63f560a665f54 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/IoTDBSubscriptionSharingIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/IoTDBSubscriptionSharingIT.java @@ -22,7 +22,7 @@ import org.apache.iotdb.isession.ISession; import org.apache.iotdb.isession.SessionDataSet; import org.apache.iotdb.it.framework.IoTDBTestRunner; -import org.apache.iotdb.itbase.category.MultiClusterIT2Subscription; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionArchVerification; import org.apache.iotdb.itbase.env.BaseEnv; import org.apache.iotdb.rpc.IoTDBConnectionException; import org.apache.iotdb.rpc.StatementExecutionException; @@ -59,8 +59,12 @@ import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; import static org.junit.Assert.fail; +/** + * refer to {@link + * org.apache.iotdb.subscription.it.triple.regression.pushconsumer.multi.IoTDBMultiGroupVsMultiConsumerIT} + */ @RunWith(IoTDBTestRunner.class) -@Category({MultiClusterIT2Subscription.class}) +@Category({MultiClusterIT2SubscriptionArchVerification.class}) public class IoTDBSubscriptionSharingIT extends AbstractSubscriptionTripleIT { private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBSubscriptionSharingIT.class); @@ -334,7 +338,6 @@ private void preparePushConsumers() { return ConsumeResult.SUCCESS; }) .buildPushConsumer()); - consumers.add( new SubscriptionPushConsumer.Builder() .host(sender.getIP()) @@ -390,7 +393,7 @@ private void preparePushConsumers() { reader.query( QueryExpression.create( Collections.singletonList( - new Path(databasePrefix + 6 + ".d_0", "s_0", true)), + new Path(databasePrefix + "6.d_0", "s_0", true)), null)); while (dataset.hasNext()) { rowCount6.addAndGet(1); @@ -465,7 +468,7 @@ private void preparePushConsumers() { @Override @Before - public void setUp() { + public void setUp() throws Exception { super.setUp(); // prepare schemaList @@ -475,7 +478,7 @@ public void setUp() { @Override @After - public void tearDown() { + public void tearDown() throws Exception { // log some info try { LOGGER.info("[src] {} = {}", sql1, getCount(sender, sql1)); @@ -531,7 +534,8 @@ public void testSubscriptionSharing() { getCount(sender, sql1), getCount(receiver1, sql1) + getCount(receiver2, sql1)); // "c4,c6|topic2" - Assert.assertEquals(105, getCount(receiver1, sql2) + getCount(receiver2, sql2)); + Assert.assertEquals( + getCount(sender, sql2) - 400, getCount(receiver1, sql2) + getCount(receiver2, sql2)); // "c4,c5|c7,c9|topic3" final long topic3Total = getCount(receiver1, sql3) + getCount(receiver2, sql3); diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/AbstractSubscriptionRegressionIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/AbstractSubscriptionRegressionIT.java new file mode 100644 index 0000000000000..125195b5a95b9 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/AbstractSubscriptionRegressionIT.java @@ -0,0 +1,558 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression; + +import org.apache.iotdb.isession.SessionDataSet; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.ConsumerConstant; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.Session; +import org.apache.iotdb.session.subscription.SubscriptionSession; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionMessage; +import org.apache.iotdb.session.subscription.payload.SubscriptionTsFileHandler; +import org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.WrappedVoidSupplier; +import org.apache.iotdb.subscription.it.triple.AbstractSubscriptionTripleIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.read.TsFileReader; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.read.expression.QueryExpression; +import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.write.record.Tablet; +import org.junit.After; +import org.junit.Assert; +import org.junit.Assume; +import org.junit.Before; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.text.SimpleDateFormat; +import java.time.Duration; +import java.time.ZoneId; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Objects; +import java.util.Properties; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.POLL_TIMEOUT_MS; + +public abstract class AbstractSubscriptionRegressionIT extends AbstractSubscriptionTripleIT { + + private static final Logger LOGGER = + LoggerFactory.getLogger(AbstractSubscriptionRegressionIT.class); + private static final String DROP_DATABASE_SQL = "drop database "; + + protected static final SimpleDateFormat FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"); + + public String SRC_HOST; + public String DEST_HOST; + public String DEST_HOST2; + + public int SRC_PORT; + public int DEST_PORT; + public int DEST_PORT2; + + public SubscriptionSession subs; + + public Session session_src; + public Session session_dest; + public Session session_dest2; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + beforeSuite(); + } + + @Override + @After + public void tearDown() throws Exception { + afterSuite(); + super.tearDown(); + } + + public void beforeSuite() throws IoTDBConnectionException { + SRC_HOST = sender.getIP(); + DEST_HOST = receiver1.getIP(); + DEST_HOST2 = receiver2.getIP(); + + SRC_PORT = Integer.parseInt(sender.getPort()); + DEST_PORT = Integer.parseInt(receiver1.getPort()); + DEST_PORT2 = Integer.parseInt(receiver2.getPort()); + + session_src = + new Session.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .username("root") + .password("root") + .zoneId(ZoneId.of("Asia/Shanghai")) + .build(); + session_dest = + new Session.Builder() + .host(DEST_HOST) + .port(DEST_PORT) + .username("root") + .password("root") + .zoneId(ZoneId.of("Asia/Shanghai")) + .build(); + session_dest2 = + new Session.Builder() + .host(DEST_HOST2) + .port(DEST_PORT2) + .username("root") + .password("root") + .zoneId(ZoneId.of("Asia/Shanghai")) + .build(); + session_src.open(false); + session_dest.open(false); + session_dest2.open(false); + + subs = new SubscriptionSession(SRC_HOST, SRC_PORT); + subs.open(); + System.out.println("TestConfig beforeClass"); + } + + public void afterSuite() throws IoTDBConnectionException { + System.out.println("TestConfig afterClass"); + session_src.close(); + session_dest.close(); + session_dest2.close(); + subs.close(); + } + + public void createDB(String database) throws IoTDBConnectionException { + try { + session_src.createDatabase(database); + } catch (StatementExecutionException e) { + } + try { + session_dest.createDatabase(database); + } catch (StatementExecutionException e) { + } + try { + session_dest2.createDatabase(database); + } catch (StatementExecutionException e) { + } + } + + public void dropDB(String pattern) throws IoTDBConnectionException { + try { + session_src.executeNonQueryStatement(DROP_DATABASE_SQL + pattern + "*"); + } catch (StatementExecutionException e) { + System.out.println("### src:" + e); + } + try { + session_dest.executeNonQueryStatement(DROP_DATABASE_SQL + pattern + "*"); + } catch (StatementExecutionException e) { + System.out.println("### dest:" + e); + } + try { + session_dest2.executeNonQueryStatement(DROP_DATABASE_SQL + pattern + "*"); + } catch (StatementExecutionException e) { + System.out.println("### dest2:" + e); + } + } + + public SubscriptionPullConsumer create_pull_consumer( + String groupId, String consumerId, Boolean autoCommit, Long interval) + throws TException, IoTDBConnectionException, IOException, StatementExecutionException { + SubscriptionPullConsumer pullConsumer; + Properties properties = new Properties(); + properties.put(ConsumerConstant.HOST_KEY, SRC_HOST); + properties.put(ConsumerConstant.PORT_KEY, SRC_PORT); + if (groupId != null) { + properties.put(ConsumerConstant.CONSUMER_GROUP_ID_KEY, groupId); + } + if (consumerId != null) { + properties.put(ConsumerConstant.CONSUMER_ID_KEY, consumerId); + } + if (autoCommit != null) { + properties.put(ConsumerConstant.AUTO_COMMIT_KEY, autoCommit); + } + if (interval != null) { + properties.put(ConsumerConstant.AUTO_COMMIT_INTERVAL_MS_KEY, interval); + } + properties.put(ConsumerConstant.FILE_SAVE_DIR_KEY, "target/pull-subscription"); + pullConsumer = new SubscriptionPullConsumer(properties); + pullConsumer.open(); + return pullConsumer; + } + + public void createTopic_s( + String topicName, String pattern, String start, String end, boolean isTsfile) + throws IoTDBConnectionException, StatementExecutionException { + Properties properties = new Properties(); + if (pattern != null) { + properties.setProperty("path", pattern); + } + if (start != null) { + properties.setProperty("start-time", start); + } + if (end != null) { + properties.setProperty("end-time", end); + } + if (isTsfile) { + properties.setProperty("format", "TsFileHandler"); + } else { + properties.setProperty("format", "SessionDataSet"); + } + properties.setProperty("processor", "do-nothing-processor"); + subs.createTopic(topicName, properties); + } + + public void createTopic_s( + String topicName, + String pattern, + String start, + String end, + boolean isTsfile, + String mode, + String loose_range) + throws IoTDBConnectionException, StatementExecutionException { + Properties properties = new Properties(); + if (pattern != null) { + properties.setProperty(TopicConstant.PATH_KEY, pattern); + } + if (start != null) { + properties.setProperty(TopicConstant.START_TIME_KEY, start); + } + if (end != null) { + properties.setProperty(TopicConstant.END_TIME_KEY, end); + } + if (isTsfile) { + properties.setProperty(TopicConstant.FORMAT_KEY, "TsFileHandler"); + } else { + properties.setProperty(TopicConstant.FORMAT_KEY, "SessionDataSet"); + } + if (mode != null && !mode.isEmpty()) { + properties.setProperty(TopicConstant.MODE_KEY, mode); + } + if (loose_range != null && !loose_range.isEmpty()) { + properties.setProperty(TopicConstant.LOOSE_RANGE_KEY, loose_range); + } + subs.createTopic(topicName, properties); + } + + public static long getCount(Session session, String sql) + throws IoTDBConnectionException, StatementExecutionException { + SessionDataSet dataSet = session.executeQueryStatement(sql); + while (dataSet.hasNext()) { + RowRecord rowRecord = dataSet.next(); + long result = rowRecord.getFields().get(0).getLongV(); + return result; + } + return 0; + } + + public void check_count(int expect_count, String sql, String msg) + throws IoTDBConnectionException, StatementExecutionException { + assertEquals(getCount(session_dest, sql), expect_count, "Query count:" + msg); + } + + public void check_count2(int expect_count, String sql, String msg) + throws IoTDBConnectionException, StatementExecutionException { + assertEquals(getCount(session_dest2, sql), expect_count, "Query count:" + msg); + } + + public void consume_data(SubscriptionPullConsumer consumer, Session session) + throws TException, + IOException, + StatementExecutionException, + InterruptedException, + IoTDBConnectionException { + while (true) { + Thread.sleep(1000); + + List messages = consumer.poll(Duration.ofMillis(POLL_TIMEOUT_MS)); + if (messages.isEmpty()) { + break; + } + for (final SubscriptionMessage message : messages) { + for (final Iterator it = message.getSessionDataSetsHandler().tabletIterator(); + it.hasNext(); ) { + final Tablet tablet = it.next(); + session.insertTablet(tablet); + } + } + consumer.commitSync(messages); + } + } + + public int consume_tsfile(SubscriptionPullConsumer consumer, String device) + throws InterruptedException { + return consume_tsfile(consumer, Collections.singletonList(device)).get(0); + } + + public List consume_tsfile(SubscriptionPullConsumer consumer, List devices) + throws InterruptedException { + List rowCounts = new ArrayList<>(devices.size()); + for (int i = 0; i < devices.size(); i++) { + rowCounts.add(new AtomicInteger(0)); + } + AtomicInteger onReceived = new AtomicInteger(0); + while (true) { + Thread.sleep(1000); + // That is, the consumer poll will keep pulling if no messages are fetched within the timeout, + // until a message is fetched or the time exceeds the timeout. + List messages = consumer.poll(Duration.ofMillis(POLL_TIMEOUT_MS)); + if (messages.isEmpty()) { + break; + } + for (final SubscriptionMessage message : messages) { + onReceived.incrementAndGet(); + // System.out.println(FORMAT.format(new Date()) + " onReceived=" + onReceived.get()); + final SubscriptionTsFileHandler tsFileHandler = message.getTsFileHandler(); + try (final TsFileReader tsFileReader = tsFileHandler.openReader()) { + for (int i = 0; i < devices.size(); i++) { + final Path path = new Path(devices.get(i), "s_0", true); + final QueryDataSet dataSet = + tsFileReader.query(QueryExpression.create(Collections.singletonList(path), null)); + while (dataSet.hasNext()) { + RowRecord next = dataSet.next(); + rowCounts.get(i).addAndGet(1); + System.out.println(next.getTimestamp() + "," + next.getFields()); + } + // System.out.println(FORMAT.format(new Date()) + " consume tsfile " + i + ":" + + // rowCounts.get(i).get()); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + consumer.commitSync(messages); + } + List results = new ArrayList<>(devices.size()); + for (AtomicInteger rowCount : rowCounts) { + results.add(rowCount.get()); + } + results.add(onReceived.get()); + return results; + } + + public void consume_data_await( + SubscriptionPullConsumer consumer, Session session, List assertions) { + AWAIT.untilAsserted( + () -> { + List messages = consumer.poll(Duration.ofMillis(POLL_TIMEOUT_MS)); + if (messages.isEmpty()) { + session_src.executeNonQueryStatement("flush"); + } + for (final SubscriptionMessage message : messages) { + for (final Iterator it = message.getSessionDataSetsHandler().tabletIterator(); + it.hasNext(); ) { + final Tablet tablet = it.next(); + session.insertTablet(tablet); + } + } + consumer.commitSync(messages); + for (final WrappedVoidSupplier assertion : assertions) { + assertion.get(); + } + }); + } + + public void consume_tsfile_await( + SubscriptionPullConsumer consumer, + List devices, + List expected, + List allowGte) { + final List counters = new ArrayList<>(devices.size()); + for (int i = 0; i < devices.size(); i++) { + counters.add(new AtomicInteger(0)); + } + AWAIT.untilAsserted( + () -> { + List messages = consumer.poll(Duration.ofMillis(POLL_TIMEOUT_MS)); + if (messages.isEmpty()) { + session_src.executeNonQueryStatement("flush"); + } + for (final SubscriptionMessage message : messages) { + final SubscriptionTsFileHandler tsFileHandler = message.getTsFileHandler(); + try (final TsFileReader tsFileReader = tsFileHandler.openReader()) { + for (int i = 0; i < devices.size(); i++) { + final Path path = new Path(devices.get(i), "s_0", true); + final QueryDataSet dataSet = + tsFileReader.query( + QueryExpression.create(Collections.singletonList(path), null)); + while (dataSet.hasNext()) { + dataSet.next(); + counters.get(i).addAndGet(1); + } + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + consumer.commitSync(messages); + for (int i = 0; i < devices.size(); i++) { + if (allowGte.get(i)) { + assertGte(counters.get(i).get(), expected.get(i)); + } else { + assertEquals(counters.get(i).get(), expected.get(i)); + } + } + }); + } + + public void consume_tsfile_await( + SubscriptionPullConsumer consumer, List devices, List expected) { + consume_tsfile_await( + consumer, + devices, + expected, + Stream.generate(() -> false).limit(devices.size()).collect(Collectors.toList())); + } + + public void consume_tsfile_with_file_count_await( + SubscriptionPullConsumer consumer, List devices, List expected) { + final List counters = new ArrayList<>(devices.size()); + for (int i = 0; i < devices.size(); i++) { + counters.add(new AtomicInteger(0)); + } + AtomicInteger onReceived = new AtomicInteger(0); + AWAIT.untilAsserted( + () -> { + List messages = consumer.poll(Duration.ofMillis(POLL_TIMEOUT_MS)); + if (messages.isEmpty()) { + session_src.executeNonQueryStatement("flush"); + } + for (final SubscriptionMessage message : messages) { + onReceived.incrementAndGet(); + final SubscriptionTsFileHandler tsFileHandler = message.getTsFileHandler(); + try (final TsFileReader tsFileReader = tsFileHandler.openReader()) { + for (int i = 0; i < devices.size(); i++) { + final Path path = new Path(devices.get(i), "s_0", true); + final QueryDataSet dataSet = + tsFileReader.query( + QueryExpression.create(Collections.singletonList(path), null)); + while (dataSet.hasNext()) { + dataSet.next(); + counters.get(i).addAndGet(1); + } + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + consumer.commitSync(messages); + for (int i = 0; i < devices.size(); i++) { + assertEquals(counters.get(i).get(), expected.get(i)); + } + assertEquals(onReceived.get(), expected.get(devices.size())); + }); + } + + //////////////////////////// strict assertions //////////////////////////// + + public static void assertEquals(int actual, int expected) { + Assert.assertEquals(expected, actual); + } + + public static void assertEquals(Integer actual, int expected) { + Assert.assertEquals(expected, (Object) actual); + } + + public static void assertEquals(int actual, Integer expected) { + Assert.assertEquals((Object) expected, actual); + } + + public static void assertEquals(Integer actual, Integer expected) { + Assert.assertEquals(expected, actual); + } + + public static void assertEquals(int actual, int expected, String message) { + Assert.assertEquals(message, expected, actual); + } + + public static void assertEquals(Integer actual, int expected, String message) { + Assert.assertEquals(message, expected, (Object) actual); + } + + public static void assertEquals(int actual, Integer expected, String message) { + Assert.assertEquals(message, (Object) expected, actual); + } + + public static void assertEquals(Integer actual, Integer expected, String message) { + Assert.assertEquals(message, expected, actual); + } + + public static void assertEquals(long actual, long expected, String message) { + Assert.assertEquals(message, expected, actual); + } + + public static void assertEquals(boolean actual, boolean expected, String message) { + Assert.assertEquals(message, expected, actual); + } + + public static void assertTrue(boolean condition) { + Assert.assertTrue(condition); + } + + public static void assertTrue(boolean condition, String message) { + Assert.assertTrue(message, condition); + } + + public static void assertFalse(boolean condition) { + Assert.assertFalse(condition); + } + + public static void assertFalse(boolean condition, String message) { + Assert.assertFalse(message, condition); + } + + //////////////////////////// non-strict assertions //////////////////////////// + + public static void assertGte(int actual, int expected) { + assertGte(actual, expected, null); + } + + public static void assertGte(int actual, int expected, String message) { + assertGte((long) actual, expected, message); + } + + public static void assertGte(long actual, long expected, String message) { + assertTrue(actual >= expected, message); + if (!(actual == expected)) { + String skipMessage = actual + " should be equals to " + expected; + if (Objects.nonNull(message)) { + skipMessage += ", message: " + message; + } + LOGGER.warn(skipMessage); + Assume.assumeTrue(skipMessage, actual == expected); + } + } + + public void check_count_non_strict(int expect_count, String sql, String msg) + throws IoTDBConnectionException, StatementExecutionException { + assertGte(getCount(session_dest, sql), expect_count, "Query count: " + msg); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/auto_create_db/IoTDBDefaultPullConsumerDataSetIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/auto_create_db/IoTDBDefaultPullConsumerDataSetIT.java new file mode 100644 index 0000000000000..0b36deed5eb08 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/auto_create_db/IoTDBDefaultPullConsumerDataSetIT.java @@ -0,0 +1,153 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.auto_create_db; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionMisc; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionMisc.class}) +public class IoTDBDefaultPullConsumerDataSetIT extends AbstractSubscriptionRegressionIT { + public static SubscriptionPullConsumer consumer; + private int deviceCount = 3; + private static final String databasePrefix = "root.DefaultPullConsumerDataSet"; + private static String topicName = "topic_autodb_DefaultPullConsumerDataSet"; + private static List schemaList = new ArrayList<>(); + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createTopic_s(topicName, null, null, null, false); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT32)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + for (int i = 0; i < deviceCount; i++) { + session_src.executeNonQueryStatement("create database " + databasePrefix + i); + session_dest.executeNonQueryStatement("create database " + databasePrefix + i); + } + } + + @Override + @After + public void tearDown() throws Exception { + consumer.close(); + subs.dropTopic(topicName); + for (int i = 0; i < deviceCount; i++) { + session_src.executeNonQueryStatement("drop database " + databasePrefix + i); + session_dest.executeNonQueryStatement("drop database " + databasePrefix + i); + } + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20 + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + List devices = new ArrayList<>(deviceCount); + for (int i = 0; i < deviceCount; i++) { + devices.add(databasePrefix + i + ".d_0"); + } + consumer = create_pull_consumer("pull_auto_create_db", "default_pattern_dataset", false, null); + for (int i = 0; i < deviceCount; i++) { + // Write data before subscribing + insert_data(1706659200000L, devices.get(i)); // 2024-01-31 08:00:00+08:00 + } + // Subscribe + consumer.subscribe(topicName); + assertEquals( + subs.getSubscriptions(topicName).size(), 1, "show subscriptions after subscription"); + for (int i = 0; i < deviceCount; i++) { + insert_data(System.currentTimeMillis(), devices.get(i)); + } + String sql = "select count(s_0) from " + databasePrefix + "0.d_0"; + System.out.println(FORMAT.format(new Date()) + " src: " + getCount(session_src, sql)); + // Consumption data + AWAIT.untilAsserted( + () -> { + session_src.executeNonQueryStatement("flush"); + consume_data(consumer, session_dest); + for (int i = 0; i < deviceCount; i++) { + check_count( + 10, "select count(s_0) from " + devices.get(i), i + ":Consumption Data:s_0"); + } + }); + // Unsubscribe + consumer.unsubscribe(topicName); + // Unsubscribe and then write data + for (int i = 0; i < deviceCount; i++) { + insert_data(1707782400000L, devices.get(i)); // 2024-02-13 08:00:00+08:00 + } + consumer.subscribe(topicName); + assertEquals( + subs.getSubscriptions(topicName).size(), 1, "show subscriptions after re-subscribing"); + System.out.println(FORMAT.format(new Date()) + " src: " + getCount(session_src, sql)); + // Consumption data: Progress is not retained when re-subscribing after cancellation. Full + // synchronization. + AWAIT.untilAsserted( + () -> { + session_src.executeNonQueryStatement("flush"); + consume_data(consumer, session_dest); + for (int i = 0; i < deviceCount; i++) { + check_count( + 15, "select count(s_0) from " + devices.get(i), i + ":consume data again:s_0"); + } + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/auto_create_db/IoTDBDefaultTsfilePushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/auto_create_db/IoTDBDefaultTsfilePushConsumerIT.java new file mode 100644 index 0000000000000..ac3ee07bd7fb3 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/auto_create_db/IoTDBDefaultTsfilePushConsumerIT.java @@ -0,0 +1,234 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.auto_create_db; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionMisc; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.read.TsFileReader; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.read.expression.QueryExpression; +import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer + * pattern: root.** + * TsFile + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionMisc.class}) +public class IoTDBDefaultTsfilePushConsumerIT extends AbstractSubscriptionRegressionIT { + private SubscriptionPushConsumer consumer; + private int deviceCount = 3; + private static final String databasePrefix = "root.DefaultTsfilePushConsumer"; + private static String topicName = "topicDefaultTsfilePushConsumer"; + private static List schemaList = new ArrayList<>(); + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createTopic_s(topicName, null, null, null, true); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT32)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + for (int i = 0; i < deviceCount; i++) { + session_src.executeNonQueryStatement("create database " + databasePrefix + i); + } + } + + @Override + protected void setUpConfig() { + super.setUpConfig(); + + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(sender); + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(receiver1); + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(receiver2); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + for (int i = 0; i < deviceCount; i++) { + session_src.executeNonQueryStatement("drop database " + databasePrefix + i); + } + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (1 + row) * 20 + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + List devices = new ArrayList<>(deviceCount); + List paths = new ArrayList<>(deviceCount); + for (int i = 0; i < deviceCount; i++) { + devices.add(databasePrefix + i + ".d_0"); + paths.add(new Path(devices.get(i), "s_0", true)); + } + System.out.println("### Before Subscription Write Data ###"); + for (int i = 0; i < deviceCount; i++) { + insert_data(1706659200000L, devices.get(i)); // 2024-01-31 08:00:00+08:00 + } + session_src.executeNonQueryStatement("flush;"); + final AtomicInteger onReceiveCount = new AtomicInteger(0); + List rowCounts = new ArrayList<>(deviceCount); + for (int i = 0; i < deviceCount; i++) { + rowCounts.add(new AtomicInteger(0)); + } + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("default_pattern_TsFile_consumer") + .consumerGroupId("push_auto_create_db") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + onReceiveCount.incrementAndGet(); + System.out.println( + FORMAT.format(new Date()) + " ######## onReceived: " + onReceiveCount.get()); + try { + TsFileReader reader = message.getTsFileHandler().openReader(); + for (int i = 0; i < deviceCount; i++) { + QueryDataSet dataset = + reader.query( + QueryExpression.create( + Collections.singletonList(paths.get(i)), null)); + while (dataset.hasNext()) { + rowCounts.get(i).addAndGet(1); + RowRecord next = dataset.next(); + // System.out.println(format.format(new + // Date())+" "+next.getTimestamp()+","+next.getFields()); + } + System.out.println( + FORMAT.format(new Date()) + + " rowCounts_" + + i + + ":" + + rowCounts.get(i).get()); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions(topicName).forEach(System.out::println); + assertEquals( + subs.getSubscriptions(topicName).size(), 1, "After subscribing: show subscriptions"); + for (int i = 0; i < deviceCount; i++) { + insert_data(System.currentTimeMillis(), devices.get(i)); + System.out.println( + FORMAT.format(new Date()) + + " src " + + i + + ":" + + getCount(session_src, "select count(s_0) from " + devices.get(i))); + } + session_src.executeNonQueryStatement("flush;"); + AWAIT.untilAsserted( + () -> { + for (int i = 0; i < deviceCount; i++) { + assertEquals(rowCounts.get(i).get(), 10, devices.get(i) + ".s_0"); + } + }); + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + System.out.println("### Subscribe and write data ###"); + assertEquals( + subs.getSubscriptions(topicName).size(), 1, "After subscribing again: show subscriptions"); + for (int i = 0; i < deviceCount; i++) { + insert_data(1707782400000L, devices.get(i)); // 2024-02-13 08:00:00+08:00 + System.out.println( + FORMAT.format(new Date()) + + " src " + + i + + ":" + + getCount(session_src, "select count(s_0) from " + devices.get(i))); + } + session_src.executeNonQueryStatement("flush;"); + + // Unsubscribe, then it will consume all again. + AWAIT.untilAsserted( + () -> { + for (int i = 0; i < deviceCount; i++) { + assertEquals(rowCounts.get(i).get(), 25, devices.get(i) + ".s_0"); + } + }); + System.out.println(FORMAT.format(new Date()) + " onReceived: " + onReceiveCount.get()); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/auto_create_db/IoTDBRootDatasetPushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/auto_create_db/IoTDBRootDatasetPushConsumerIT.java new file mode 100644 index 0000000000000..beec83bc6222f --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/auto_create_db/IoTDBRootDatasetPushConsumerIT.java @@ -0,0 +1,185 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.auto_create_db; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionMisc; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer + * pattern: root + * DataSet + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionMisc.class}) +public class IoTDBRootDatasetPushConsumerIT extends AbstractSubscriptionRegressionIT { + private String pattern = "root.**"; + public static SubscriptionPushConsumer consumer; + private int deviceCount = 3; + private static final String databasePrefix = "root.RootDatasetPushConsumer"; + private static final String database2 = "root.RootDatasetPushConsumer2.test"; + private static String topicName = "topicAutoCreateDB_RootDatasetPushConsumer"; + private static List schemaList = new ArrayList<>(); + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createTopic_s(topicName, pattern, null, null, false); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT32)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(databasePrefix + "*.*"); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20 + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + List devices = new ArrayList<>(deviceCount); + for (int i = 0; i < deviceCount - 1; i++) { + devices.add(databasePrefix + i + ".d_0"); + } + devices.add(database2 + ".d_2"); + for (int i = 0; i < deviceCount; i++) { + // Write data before subscribing + insert_data(1706659200000L, devices.get(i)); // 2024-01-31 08:00:00+08:00 + } + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("root_dataset_consumer") + .consumerGroupId("push_auto_create_db") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions(topicName).size(), 1, "subscribe:show subscriptions"); + for (int i = 0; i < deviceCount; i++) { + insert_data(System.currentTimeMillis(), devices.get(i)); + } + String sql = "select count(s_0) from " + databasePrefix + "0.d_0"; + System.out.println(FORMAT.format(new Date()) + " src: " + getCount(session_src, sql)); + AWAIT.untilAsserted( + () -> { + check_count(10, "select count(s_0) from " + devices.get(0), "0:consume data:s_0"); + for (int i = 1; i < deviceCount; i++) { + check_count(10, "select count(s_0) from " + devices.get(i), i + ":consume data:s_0"); + } + }); + + // Unsubscribe + consumer.unsubscribe(topicName); + for (int i = 0; i < deviceCount; i++) { + insert_data(1707782400000L, devices.get(i)); // 2024-02-13 08:00:00+08:00 + } + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals( + subs.getSubscriptions(topicName).size(), 1, "After subscribing again: show subscriptions"); + System.out.println(FORMAT.format(new Date()) + " src: " + getCount(session_src, sql)); + // Consumption data: Progress is not retained after unsubscribing and re-subscribing. Full + // synchronization. + AWAIT.untilAsserted( + () -> { + check_count( + 15, + "select count(s_0) from " + devices.get(0), + "0:After subscribing again:consume data:s_0"); + for (int i = 1; i < deviceCount; i++) { + check_count( + 15, + "select count(s_0) from " + devices.get(i), + i + ":After subscribing again:consume data:s_0"); + } + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/auto_create_db/IoTDBRootPullConsumeTsfileIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/auto_create_db/IoTDBRootPullConsumeTsfileIT.java new file mode 100644 index 0000000000000..169cbd7305f90 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/auto_create_db/IoTDBRootPullConsumeTsfileIT.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.auto_create_db; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionMisc; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/*** + * PullConsumer + * pattern: db + * Tsfile + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionMisc.class}) +public class IoTDBRootPullConsumeTsfileIT extends AbstractSubscriptionRegressionIT { + private static final String pattern = "root.**"; + private static final String device = "root.auto_create_db.RootPullConsumeTsfile.d_0"; + private static final String device2 = "root.RootPullConsumeTsfile.d_1"; + public static SubscriptionPullConsumer consumer; + private static String topicName = "topicAutoCreateDB_RootPullConsumeTsfile"; + private static List schemaList = new ArrayList<>(); + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createTopic_s(topicName, pattern, null, null, true); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT32)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + session_src.executeNonQueryStatement("create database root.auto_create_db"); + session_src.executeNonQueryStatement("create database root.RootPullConsumeTsfile"); + } + + @Override + protected void setUpConfig() { + super.setUpConfig(); + + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(sender); + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(receiver1); + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(receiver2); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + session_src.executeNonQueryStatement("drop database root.auto_create_db"); + session_src.executeNonQueryStatement("drop database root.RootPullConsumeTsfile"); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException, InterruptedException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (row + 1) * 20 + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L, device); // 2024-01-31 08:00:00+08:00 + insert_data(1706659200000L, device2); // 2024-01-31 08:00:00+08:00 + consumer = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("root_tsfile") + .consumerGroupId("pull_auto_create_db") + .autoCommit(false) + .fileSaveDir("target/pull-subscription") // hack for license check + .buildPullConsumer(); + consumer.open(); + consumer.subscribe(topicName); + subs.getSubscriptions(topicName).forEach(System.out::println); + assertEquals(subs.getSubscriptions(topicName).size(), 1, "subscribe:show subscriptions"); + insert_data(System.currentTimeMillis(), device); + insert_data(System.currentTimeMillis(), device2); + List devices = new ArrayList<>(2); + devices.add(device); + devices.add(device2); + consume_tsfile_await(consumer, devices, Arrays.asList(10, 10)); + consumer.unsubscribe(topicName); + assertEquals(subs.getSubscriptions(topicName).size(), 0, "unsubscribe:show subscriptions"); + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "subscribe again:show subscriptions"); + insert_data(1707782400000L, device); // 2024-02-13 08:00:00+08:00 + insert_data(1707782400000L, device2); // 2024-02-13 08:00:00+08:00 + consume_tsfile_await(consumer, devices, Arrays.asList(15, 15)); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/mix/IoTDBPushConsumerPullConsumerWith1TopicShareProcessMixIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/mix/IoTDBPushConsumerPullConsumerWith1TopicShareProcessMixIT.java new file mode 100644 index 0000000000000..4df5ee114498f --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/mix/IoTDBPushConsumerPullConsumerWith1TopicShareProcessMixIT.java @@ -0,0 +1,195 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.mix; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionMisc; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer + * pattern: db + * Dataset + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionMisc.class}) +public class IoTDBPushConsumerPullConsumerWith1TopicShareProcessMixIT + extends AbstractSubscriptionRegressionIT { + private static String topicName = "`1-group.1-consumer.db`"; + private static List schemaList = new ArrayList<>(); + private final String database = "root.PushConsumerPullConsumerWith1TopicShareProcessMix"; + private final String device = database + ".d_0"; + private final String pattern = database + ".**"; + private SubscriptionPushConsumer consumer; + private SubscriptionPullConsumer consumer2; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createTopic_s(topicName, pattern, null, null, false); + createDB(database); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest2.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest2.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + consumer.close(); + consumer2.close(); + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + Thread thread = + new Thread( + () -> { + long timestamp = 1706659200000L; // 2024-01-31 08:00:00+08:00 + for (int i = 0; i < 100; i++) { + try { + insert_data(timestamp); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } + timestamp += 20000; + } + }); + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("dataset_push_consumer_1") + .consumerGroupId("db_pull_push_mix") + .ackStrategy(AckStrategy.BEFORE_CONSUME) + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + consumer.subscribe(topicName); + + consumer2 = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("dataset_pull_consumer_2") + .consumerGroupId("db_pull_push_mix") + .buildPullConsumer(); + consumer2.open(); + consumer2.subscribe(topicName); + + thread.start(); + thread.join(); + consume_data(consumer2, session_dest2); + System.out.println("After subscribing:"); + subs.getSubscriptions().forEach((System.out::println)); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions"); + + Thread.sleep(3000); + // The first 5 entries may have duplicate data + String sql = "select count(s_0) from " + device; + System.out.println("src push consumer: " + getCount(session_src, sql)); + System.out.println("dest push consumer: " + getCount(session_dest, sql)); + System.out.println("dest2 pull consumer: " + getCount(session_dest2, sql)); + AWAIT.untilAsserted( + () -> { + assertEquals( + getCount(session_dest, sql) + getCount(session_dest2, sql), + getCount(session_src, sql), + "share process"); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/param/IoTDBTestParamPullConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/param/IoTDBTestParamPullConsumerIT.java new file mode 100644 index 0000000000000..28ea92d3d0f4f --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/param/IoTDBTestParamPullConsumerIT.java @@ -0,0 +1,535 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.param; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionMisc; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.exception.SubscriptionConnectionException; +import org.apache.iotdb.rpc.subscription.exception.SubscriptionException; +import org.apache.iotdb.rpc.subscription.exception.SubscriptionIdentifierSemanticException; +import org.apache.iotdb.rpc.subscription.exception.SubscriptionRuntimeCriticalException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionMessage; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.read.TsFileReader; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.read.expression.QueryExpression; +import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.atomic.AtomicInteger; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionMisc.class}) +public class IoTDBTestParamPullConsumerIT extends AbstractSubscriptionRegressionIT { + private static SubscriptionPullConsumer consumer; + private static final String topicName = "TestParamPullConsumerTopic1"; + private static final String database = "root.TestParamPullConsumer"; + private static final String device = database + ".d_0"; + private final String pattern = "root.**"; + private static List schemaList = new ArrayList<>(); + + static { + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + } + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + consumer = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("consumer_id_1") + .consumerGroupId("TestParamPullConsumer_1") + .buildPullConsumer(); + consumer.open(); + createDB(database); + createTopic_s(topicName, null, null, null, false); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + subs.getTopics().forEach(System.out::println); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + session_src.executeNonQueryStatement("create user user02 'user02';"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + try { + session_src.executeNonQueryStatement("drop user user02"); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + } + + private void run_single(SubscriptionPullConsumer consumer, int index) + throws TException, + IoTDBConnectionException, + IOException, + StatementExecutionException, + InterruptedException { + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "subscribe then show subscriptions:" + index); + Long timestamp = 1706659200000L + 8000 * index; + // Write data + insert_data(timestamp); + // consume + consume_data(consumer, session_dest); + // Unsubscribe + consumer.unsubscribe(topicName); + consumer.close(); + check_count( + 4, + "select count(s_0) from " + device + " where time >= " + timestamp, + "Consumption data:" + pattern); + } + + @Test + public void testUnsetGroup() + throws TException, + IoTDBConnectionException, + IOException, + StatementExecutionException, + InterruptedException { + SubscriptionPullConsumer consumer1 = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("consumer_id_1") + .autoCommit(true) + .autoCommitIntervalMs(1000L) + .buildPullConsumer(); + run_single(consumer1, 1); + } + + @Test + public void testUnsetConsumer() + throws TException, + IoTDBConnectionException, + IOException, + StatementExecutionException, + InterruptedException { + SubscriptionPullConsumer consumer1 = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerGroupId("TestParamPullConsumer_group_id_1") + .autoCommit(true) + .autoCommitIntervalMs(1000L) + .buildPullConsumer(); + run_single(consumer1, 2); + } + + @Test + public void testUnsetConsumerGroup() + throws TException, + IoTDBConnectionException, + IOException, + StatementExecutionException, + InterruptedException { + SubscriptionPullConsumer consumer1 = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .autoCommit(true) + .autoCommitIntervalMs(1000L) + .buildPullConsumer(); + run_single(consumer1, 3); + } + + @Test + public void testAutoCommitIntervalNegative() + throws TException, + IoTDBConnectionException, + IOException, + StatementExecutionException, + InterruptedException { + SubscriptionPullConsumer consumer1 = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("consumer_id_1") + .consumerGroupId("TestParamPullConsumer") + .autoCommitIntervalMs(-1) + .buildPullConsumer(); + run_single(consumer1, 5); + } + + @Test + public void testDuplicateConsumerId() { + SubscriptionPullConsumer consumer = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("testDuplicateConsumerId") + .consumerGroupId("TestParamPullConsumer_1") + .autoCommitIntervalMs(-1) + .buildPullConsumer(); + SubscriptionPullConsumer consumer2 = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("testDuplicateConsumerId") + .consumerGroupId("TestParamPullConsumer_2") + .autoCommitIntervalMs(-1) + .buildPullConsumer(); + SubscriptionPullConsumer consumer3 = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("testDuplicateConsumerId") + .consumerGroupId("TestParamPullConsumer_1") + .autoCommitIntervalMs(-1) + .buildPullConsumer(); + consumer.open(); + consumer2.open(); + consumer3.open(); + consumer.close(); + consumer2.close(); + consumer3.close(); + } + + @Test + public void testNodeUrls() + throws TException, + IoTDBConnectionException, + IOException, + StatementExecutionException, + InterruptedException { + SubscriptionPullConsumer consumer1 = + new SubscriptionPullConsumer.Builder() + .nodeUrls(Collections.singletonList(SRC_HOST + ":" + SRC_PORT)) + .consumerId("testNodeUrls") + .consumerGroupId("TestParamPullConsumer") + .autoCommitIntervalMs(500L) + .buildPullConsumer(); + run_single(consumer1, 5); + } + + @Test(expected = NullPointerException.class) + public void testCreateConsumer_null() { + new SubscriptionPullConsumer(null).open(); + } + + @Test( + expected = + SubscriptionConnectionException.class) // connect to TEndPoint(ip:localhost, port:6667) + public void testCreateConsumer_empty() { + new SubscriptionPullConsumer(new Properties()).open(); + } + + @Test(expected = SubscriptionConnectionException.class) + public void testCreateConsumer_empty2() { + new SubscriptionPullConsumer.Builder().buildPullConsumer().open(); + } + + @Test(expected = SubscriptionIdentifierSemanticException.class) + public void testSubscribe_null() { + consumer.subscribe((String) null); + } + + @Test(expected = SubscriptionIdentifierSemanticException.class) + public void testSubscribe_empty() { + consumer.subscribe(""); + } + + @Test(expected = SubscriptionRuntimeCriticalException.class) + public void testSubscribe_notTopic() { + consumer.subscribe("topic_notCreate"); + } + + @Test + public void testSubscribe_dup() { + SubscriptionPullConsumer consumer1 = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("testSubscribe_dup") + .consumerGroupId("TestParamPullConsumer") + .buildPullConsumer(); + consumer1.open(); + consumer1.subscribe(topicName); + consumer1.subscribe(topicName); + consumer1.close(); + } + + @Test(expected = SubscriptionIdentifierSemanticException.class) + public void testUnSubscribe_null() { + consumer.unsubscribe((String) null); + } + + @Test(expected = SubscriptionIdentifierSemanticException.class) + public void testUnSubscribe_empty() { + consumer.unsubscribe(""); + } + + @Test(expected = SubscriptionRuntimeCriticalException.class) + public void testUnSubscribe_notTopic() { + consumer.unsubscribe("topic_notCreate"); + } + + @Test + public void testUnSubscribe_dup() { + SubscriptionPullConsumer consumer1 = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("testUnSubscribe_dup") + .consumerGroupId("TestParamPullConsumer") + .buildPullConsumer(); + consumer1.open(); + consumer1.subscribe(topicName); + consumer1.unsubscribe(topicName); + consumer1.unsubscribe(topicName); + consumer1.close(); + } + + @Test + public void testUnSubscribe_notSubs() + throws StatementExecutionException, IoTDBConnectionException { + subs.createTopic("t"); + SubscriptionPullConsumer consumer1 = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("testUnSubscribe_notSubs") + .consumerGroupId("TestParamPullConsumer") + .buildPullConsumer(); + consumer1.open(); + // No subscription, unsubscribe directly + consumer1.unsubscribe("t"); + consumer1.close(); + subs.dropTopic("t"); + } + + @Test(expected = SubscriptionException.class) + public void testSubscribe_AfterClose() { + SubscriptionPullConsumer consumer1 = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("testSubscribe_AfterClose") + .consumerGroupId("TestParamPullConsumer") + .buildPullConsumer(); + consumer1.open(); + consumer1.subscribe(topicName); + consumer1.close(); + consumer1.subscribe(topicName); + } + + @Test(expected = SubscriptionException.class) + public void testUnSubscribe_AfterClose() { + SubscriptionPullConsumer consumer1 = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("testUnSubscribe_AfterClose") + .consumerGroupId("TestParamPullConsumer") + .buildPullConsumer(); + consumer1.open(); + consumer1.close(); + consumer1.unsubscribe(topicName); + } + + @Test(expected = SubscriptionConnectionException.class) + public void testNoUser() { + String userName = "user01"; + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .username(userName) + .buildPullConsumer() + .open(); + } + + @Test(expected = SubscriptionConnectionException.class) + public void testErrorPasswd() { + String userName = "user02"; + SubscriptionPullConsumer consumer1 = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .username(userName) + .buildPullConsumer(); + consumer1.open(); + consumer1.close(); + } + + @Test + public void testTsfile_ts() + throws IoTDBConnectionException, + StatementExecutionException, + InterruptedException, + IOException { + String t1 = "tsTopic"; + createTopic_s(t1, database + ".d_0.s_0", null, null, true); + try (SubscriptionPullConsumer consumer1 = + new SubscriptionPullConsumer.Builder().host(SRC_HOST).port(SRC_PORT).buildPullConsumer()) { + consumer1.open(); + consumer1.subscribe(t1); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_0(time,s_0,s_1) values (1,10,20),(1000,30,60);"); + session_src.executeNonQueryStatement("flush;"); + Thread.sleep(3000L); + AtomicInteger rowCount = new AtomicInteger(0); + while (true) { + Thread.sleep(1000L); + List messages = consumer1.poll(Duration.ofMillis(1000)); + if (messages.isEmpty()) { + break; + } + } + } + subs.dropTopic(t1); + } + + @Test + public void testTsfile_ts_normal() + throws IoTDBConnectionException, + StatementExecutionException, + InterruptedException, + IOException { + String t1 = "tsTopicNormal"; + String device = database + ".d_1"; + createTopic_s(t1, device + ".s_0", null, null, true); + try (SubscriptionPullConsumer consumer1 = + new SubscriptionPullConsumer.Builder().host(SRC_HOST).port(SRC_PORT).buildPullConsumer()) { + consumer1.open(); + consumer1.subscribe(t1); + session_src.executeNonQueryStatement( + "insert into " + device + "(time,s_0) values (1,10),(1000,30);"); + session_src.executeNonQueryStatement("flush;"); + Thread.sleep(3000L); + AtomicInteger rowCount = new AtomicInteger(0); + while (true) { + Thread.sleep(1000L); + List messages = consumer1.poll(Duration.ofMillis(1000)); + if (messages.isEmpty()) { + break; + } + for (final SubscriptionMessage message : messages) { + TsFileReader reader = message.getTsFileHandler().openReader(); + QueryDataSet dataset = + reader.query( + QueryExpression.create( + Collections.singletonList(new Path(device, "s_0", true)), null)); + while (dataset.hasNext()) { + rowCount.addAndGet(1); + RowRecord next = dataset.next(); + System.out.println(device + ":" + next.getTimestamp() + "," + next.getFields()); + } + } + consumer1.commitSync(messages); + } + } + subs.dropTopic(t1); + } + + @Test + public void testTsfile_device() + throws IoTDBConnectionException, + StatementExecutionException, + InterruptedException, + IOException { + String t1 = "DeviceTopic"; + String device = database + ".d2"; + createTopic_s(t1, device + ".**", null, null, true); + try (SubscriptionPullConsumer consumer1 = + new SubscriptionPullConsumer.Builder().host(SRC_HOST).port(SRC_PORT).buildPullConsumer()) { + consumer1.open(); + consumer1.subscribe(t1); + session_src.executeNonQueryStatement( + "insert into " + device + "(time,s_0,s_1) values (1,10,20),(1000,30,60);"); + session_src.executeNonQueryStatement("flush;"); + Thread.sleep(3000L); + AtomicInteger rowCount = new AtomicInteger(0); + while (true) { + Thread.sleep(1000L); + List messages = consumer1.poll(Duration.ofMillis(1000)); + if (messages.isEmpty()) { + break; + } + for (final SubscriptionMessage message : messages) { + TsFileReader reader = message.getTsFileHandler().openReader(); + QueryDataSet dataset = + reader.query( + QueryExpression.create( + Collections.singletonList(new Path(device, "s_0", true)), null)); + while (dataset.hasNext()) { + rowCount.addAndGet(1); + RowRecord next = dataset.next(); + System.out.println(device + ":" + next.getTimestamp() + "," + next.getFields()); + } + } + consumer1.commitSync(messages); + } + } + subs.dropTopic(t1); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/param/IoTDBTestParamPushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/param/IoTDBTestParamPushConsumerIT.java new file mode 100644 index 0000000000000..67213cd0ef4b3 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/param/IoTDBTestParamPushConsumerIT.java @@ -0,0 +1,350 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.param; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionMisc; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.exception.SubscriptionConnectionException; +import org.apache.iotdb.rpc.subscription.exception.SubscriptionException; +import org.apache.iotdb.rpc.subscription.exception.SubscriptionIdentifierSemanticException; +import org.apache.iotdb.rpc.subscription.exception.SubscriptionRuntimeCriticalException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionMisc.class}) +public class IoTDBTestParamPushConsumerIT extends AbstractSubscriptionRegressionIT { + private static SubscriptionPushConsumer consumer; + private static final String topicName = "TestParamPushConsumerTopic1"; + private static final String database = "root.TestParamPushConsumer"; + private static final String device = database + ".d_0"; + private static List schemaList = new ArrayList<>(); + private String sql = "select count(s_0) from " + device; + + static { + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + } + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s(topicName, null, null, null, false); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + subs.getTopics().forEach(System.out::println); + session_src.executeNonQueryStatement("create user user02 'user02';"); + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("consumer_id_default") + .consumerGroupId("TestParamPushConsumer") + .ackStrategy(AckStrategy.BEFORE_CONSUME) + .consumeListener( + message -> { + System.out.println(message.getMessageType()); + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + consumer.subscribe(topicName); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + try { + session_src.executeNonQueryStatement("drop user user02"); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush"); + } + + @Test + public void testUnsetGroupConsumer() + throws IoTDBConnectionException, StatementExecutionException { + long count = getCount(session_src, sql); + insert_data(1706659200000L); + try (final SubscriptionPushConsumer consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .ackStrategy(AckStrategy.BEFORE_CONSUME) + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + // System.out.println("#### " + dataSet.getTablet().rowSize); + session_dest.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer()) { + consumer.open(); + consumer.subscribe(topicName); + AWAIT.untilAsserted( + () -> { + check_count(5, sql, "before count=" + count); + }); + } + } + + @Test + public void testAlterPollTime() throws IoTDBConnectionException, StatementExecutionException { + String sql = "select count(s_0) from " + device; + long count = getCount(session_src, sql); + insert_data(1706669800000L); + try (final SubscriptionPushConsumer consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .ackStrategy(AckStrategy.BEFORE_CONSUME) + .autoPollTimeoutMs(1000) + .autoPollIntervalMs(10) + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer()) { + consumer.open(); + consumer.subscribe(topicName); + AWAIT.untilAsserted( + () -> { + System.out.println(getCount(session_dest, sql)); + check_count(5, sql, " would sync all data including deleted, before count=" + count); + }); + } + } + + @Test(expected = NullPointerException.class) + public void testCreateConsumer_null() { + new SubscriptionPushConsumer(null).open(); + } + + @Test( + expected = + SubscriptionConnectionException.class) // connect to TEndPoint(ip:localhost, port:6667) + public void testCreateConsumer_empty() { + new SubscriptionPushConsumer(new Properties()).open(); + } + + @Test(expected = SubscriptionConnectionException.class) + public void testCreateConsumer_empty2() { + new SubscriptionPushConsumer.Builder().buildPushConsumer().open(); + } + + @Test(expected = SubscriptionIdentifierSemanticException.class) + public void testSubscribe_null() { + consumer.subscribe((String) null); + } + + @Test(expected = SubscriptionIdentifierSemanticException.class) + public void testSubscribe_empty() { + consumer.subscribe(""); + } + + @Test(expected = SubscriptionRuntimeCriticalException.class) + public void testSubscribe_notTopic() { + consumer.subscribe("topic_notCreate"); + } + + @Test + public void testSubscribe_dup() { + SubscriptionPushConsumer consumer1 = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("consumer_id_default") + .consumerGroupId("TestParamPushConsumer") + .buildPushConsumer(); + consumer1.open(); + consumer1.subscribe(topicName); + consumer1.subscribe(topicName); + consumer1.close(); + } + + @Test(expected = SubscriptionIdentifierSemanticException.class) + public void testUnSubscribe_null() { + consumer.unsubscribe((String) null); + } + + @Test(expected = SubscriptionIdentifierSemanticException.class) + public void testUnSubscribe_empty() { + consumer.unsubscribe(""); + } + + @Test(expected = SubscriptionRuntimeCriticalException.class) + public void testUnSubscribe_notTopic() { + consumer.unsubscribe("topic_notCreate"); + } + + @Test + public void testUnSubscribe_dup() { + SubscriptionPushConsumer consumer1 = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("consumer_id_1") + .consumerGroupId("push_param_group_id_1") + .buildPushConsumer(); + consumer1.open(); + consumer1.subscribe(topicName); + consumer1.unsubscribe(topicName); + consumer1.unsubscribe(topicName); + consumer1.close(); + } + + @Test + public void testUnSubscribe_notSubs() + throws IoTDBConnectionException, StatementExecutionException { + subs.createTopic("t"); + SubscriptionPushConsumer consumer1 = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("consumer_id_1") + .consumerGroupId("push_param_group_id_1") + .buildPushConsumer(); + consumer1.open(); + // No subscription, unsubscribe directly + consumer1.unsubscribe("t"); + consumer1.close(); + subs.dropTopic("t"); + } + + @Test(expected = SubscriptionException.class) + public void testSubscribe_AfterClose() { + SubscriptionPushConsumer consumer1 = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("consumer_id_1") + .consumerGroupId("push_param_group_id_1") + .buildPushConsumer(); + consumer1.open(); + consumer1.subscribe(topicName); + consumer1.close(); + consumer1.subscribe(topicName); + } + + @Test(expected = SubscriptionException.class) + public void testUnSubscribe_AfterClose() { + SubscriptionPushConsumer consumer1 = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("consumer_id_1") + .consumerGroupId("push_param_group_id_1") + .buildPushConsumer(); + consumer1.open(); + consumer1.close(); + consumer1.unsubscribe(topicName); + } + + @Test(expected = SubscriptionConnectionException.class) + public void testNoUser() { + String userName = "user01"; + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .username(userName) + .buildPushConsumer() + .open(); + } + + @Test(expected = SubscriptionConnectionException.class) + public void testErrorPasswd() { + String userName = "user02"; + SubscriptionPushConsumer consumer1 = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .username(userName) + .buildPushConsumer(); + consumer1.open(); + consumer1.close(); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/param/IoTDBTestParamSubscriptionSessionIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/param/IoTDBTestParamSubscriptionSessionIT.java new file mode 100644 index 0000000000000..f148129d610ca --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/param/IoTDBTestParamSubscriptionSessionIT.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.param; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionMisc; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.session.subscription.SubscriptionSession; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionMisc.class}) +public class IoTDBTestParamSubscriptionSessionIT extends AbstractSubscriptionRegressionIT { + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + } + + @Test + public void testCreateSession_null_host() { + new SubscriptionSession.Builder().host(null).build(); + } + + @Test(expected = IoTDBConnectionException.class) + public void testCreateSession_error_port() throws IoTDBConnectionException { + new SubscriptionSession(SRC_HOST, SRC_PORT + 1).open(); + } + + @Test(expected = IoTDBConnectionException.class) + public void testCreateSession_ErrorHostname() throws IoTDBConnectionException { + new SubscriptionSession.Builder().host("noName").build().open(); + } + + @Test(expected = IoTDBConnectionException.class) + public void testCreateSession_ErrorUsername() throws IoTDBConnectionException { + new SubscriptionSession.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .username("admin") + .build() + .open(); + } + + @Test(expected = IoTDBConnectionException.class) + public void testCreateSession_ErrorPassword() throws IoTDBConnectionException { + new SubscriptionSession.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .password("admin") + .build() + .open(); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/param/IoTDBTestParamTopicIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/param/IoTDBTestParamTopicIT.java new file mode 100644 index 0000000000000..bad7f39bda17d --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/param/IoTDBTestParamTopicIT.java @@ -0,0 +1,249 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.param; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionMisc; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.rpc.subscription.exception.SubscriptionIdentifierSemanticException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.Properties; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionMisc.class}) +public class IoTDBTestParamTopicIT extends AbstractSubscriptionRegressionIT { + private static SubscriptionPullConsumer consumer; + private static final String topicName = "TopicParam"; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + consumer = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerGroupId("g_TestParamTopic") + .consumerId("c1") + .buildPullConsumer(); + consumer.open(); + } + + @Override + @After + public void tearDown() throws Exception { + consumer.close(); + subs.getTopics() + .forEach( + topic -> { + try { + subs.dropTopic(topic.getTopicName()); + } catch (Exception ignored) { + } + }); + super.tearDown(); + } + + private void printTopics(String msg) + throws IoTDBConnectionException, StatementExecutionException { + System.out.println(msg); + subs.getTopics().forEach(System.out::println); + } + + @Test(expected = SubscriptionIdentifierSemanticException.class) + public void testCreateTopic_null() throws IoTDBConnectionException, StatementExecutionException { + subs.createTopic(null); + printTopics("testCreateTopic_null"); + } + + @Test(expected = SubscriptionIdentifierSemanticException.class) + public void testCreateTopic_emptyString() + throws IoTDBConnectionException, StatementExecutionException { + subs.createTopic(""); + printTopics("testCreateTopic_emptyString"); + } + + @Test(expected = StatementExecutionException.class) + public void testCreateTopic_dup() throws IoTDBConnectionException, StatementExecutionException { + subs.createTopic(topicName); + subs.createTopic(topicName); + printTopics("testCreateTopic_dup"); + } + + @Test(expected = SubscriptionIdentifierSemanticException.class) + public void testCreateTopic_invalid() + throws IoTDBConnectionException, StatementExecutionException { + subs.createTopic("Topic-1"); + printTopics("testCreateTopic_invalid"); + } + + @Test(expected = StatementExecutionException.class) // path filter conditions are not checked + public void testCreateTopic_invalidPath_no_root() + throws IoTDBConnectionException, StatementExecutionException { + Properties properties = new Properties(); + properties.put(TopicConstant.PATH_KEY, "abc"); + subs.createTopic("topic_error_path1", properties); + printTopics("testCreateTopic_invalidPath_no_root"); + } + + @Test(expected = StatementExecutionException.class) + public void testCreateTopic_invalidPath_endWithPoint() + throws IoTDBConnectionException, StatementExecutionException { + Properties properties = new Properties(); + properties.put(TopicConstant.PATH_KEY, "root."); + subs.createTopic("topic_error_path2", properties); + printTopics("testCreateTopic_invalidPath_endWithPoint"); + } + + @Test // Need to pay attention to the default value + public void testCreateTopic_invalidFormat() + throws IoTDBConnectionException, StatementExecutionException { + Properties properties = new Properties(); + properties.put(TopicConstant.FORMAT_KEY, "abd"); + subs.createTopic("topic_error_path3", properties); + printTopics("testCreateTopic_invalidFormat"); + subs.dropTopic("topic_error_path3"); + } + + @Test(expected = StatementExecutionException.class) + public void testCreateTopic_invalidTime() + throws IoTDBConnectionException, StatementExecutionException { + Properties properties = new Properties(); + properties.put(TopicConstant.START_TIME_KEY, "abd"); + subs.createTopic("topic_error_path4", properties); + printTopics("testCreateTopic_invalidTime"); + } + + @Test + public void testCreateTopic_invalidTime2() + throws IoTDBConnectionException, StatementExecutionException { + Properties properties = new Properties(); + properties.put(TopicConstant.START_TIME_KEY, "now"); + properties.put(TopicConstant.END_TIME_KEY, "now"); + subs.createTopic("topic_error_path5", properties); + printTopics("testCreateTopic_invalidTime2"); + subs.dropTopic("topic_error_path5"); + } + + @Test(expected = StatementExecutionException.class) + public void testCreateTopic_invalidTime3() + throws IoTDBConnectionException, StatementExecutionException { + Properties properties = new Properties(); + properties.put(TopicConstant.START_TIME_KEY, "2024-01-01"); + properties.put(TopicConstant.END_TIME_KEY, "2023-01-01"); + subs.createTopic("topic_error_path6", properties); + printTopics("testCreateTopic_invalidTime3"); + } + + @Test(expected = StatementExecutionException.class) + public void testCreateTopic_invalidTime4() + throws IoTDBConnectionException, StatementExecutionException { + Properties properties = new Properties(); + properties.put(TopicConstant.START_TIME_KEY, "now"); + properties.put(TopicConstant.END_TIME_KEY, "2024-01-01"); + subs.createTopic("topic_error_path7", properties); + printTopics("testCreateTopic_invalidTime4"); + } + + @Test(expected = StatementExecutionException.class) + public void testCreateTopic_invalidTime5() + throws IoTDBConnectionException, StatementExecutionException { + Properties properties = new Properties(); + properties.put(TopicConstant.START_TIME_KEY, "2023-01-32"); + properties.put(TopicConstant.END_TIME_KEY, "now"); + subs.createTopic("topic_error_path7", properties); + printTopics("testCreateTopic_invalidTime5"); + } + + @Test(expected = StatementExecutionException.class) + public void testCreateTopic_invalidTime6() + throws IoTDBConnectionException, + StatementExecutionException, + TException, + IOException, + InterruptedException { + Properties properties = new Properties(); + properties.put(TopicConstant.START_TIME_KEY, "2023-02-29"); + properties.put(TopicConstant.END_TIME_KEY, "now"); + subs.createTopic("topic_error_path8", properties); + printTopics("testCreateTopic_invalidTime6"); + consumer.subscribe("topic_error_path8"); + String database = "root.testClient"; + createDB(database); + session_src.executeNonQueryStatement("create timeseries " + database + ".d_1.s_0 int32;"); + session_dest.executeNonQueryStatement("create timeseries " + database + ".d_1.s_0 int32;"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time, s_0) values(1677628800000,33);"); + consume_data(consumer, session_dest); + check_count(1, "select count(s_0) from " + database + ".d_1;", "invalid date"); + consumer.unsubscribe("topic_error_path8"); + subs.dropTopic("topic_error_path8"); + dropDB(database); + } + + @Test(expected = SubscriptionIdentifierSemanticException.class) + public void testDropTopic_null() throws IoTDBConnectionException, StatementExecutionException { + subs.dropTopic(null); + } + + @Test(expected = SubscriptionIdentifierSemanticException.class) + public void testDropTopic_empty() throws IoTDBConnectionException, StatementExecutionException { + subs.dropTopic(""); + } + + @Test(expected = StatementExecutionException.class) // drop non-existent topic + public void testDropTopic_notCreate() + throws IoTDBConnectionException, StatementExecutionException { + subs.dropTopic("abab"); + } + + @Test(expected = StatementExecutionException.class) + public void testDropTopic_dup() throws IoTDBConnectionException, StatementExecutionException { + String dropName = "`topic-1*.`"; + subs.createTopic(dropName); + subs.dropTopic(dropName); + subs.dropTopic(dropName); + } + + @Test + public void testGetTopic_nonExist() throws IoTDBConnectionException, StatementExecutionException { + System.out.println(subs.getTopic("xxx")); + assertFalse(subs.getTopic("xxx").isPresent()); + } + + @Test + public void testGetTopic_exist() throws IoTDBConnectionException, StatementExecutionException { + subs.createTopic("exist_topic_name"); + assertTrue(subs.getTopic("exist_topic_name").isPresent()); + subs.dropTopic("exist_topic_name"); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/autocommit/IoTDBTestAutoCommitFalseDataSetPullConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/autocommit/IoTDBTestAutoCommitFalseDataSetPullConsumerIT.java new file mode 100644 index 0000000000000..a8211c8875251 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/autocommit/IoTDBTestAutoCommitFalseDataSetPullConsumerIT.java @@ -0,0 +1,190 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.autocommit; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.Session; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionMessage; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Date; +import java.util.Iterator; +import java.util.List; + +/*** + * If autoCommit is set to false, not using commit to submit consumption progress will lead to repeated consumption; + * If set to true, it will not cause duplicate consumption (but considering the underlying implementation of the data subscription pipe is at-least-once semantics, there may also be cases of duplicate data) + * autoCommit itself is just an auxiliary feature for at least once semantics. The commit itself is only related to restart recovery. In normal situations without bugs (pure log/batch), whether to commit or not, it will still be sent once. + * Now the data subscription so-called progress information and the Pipe's progress information is a concept. + * For the semantics related to data subscription commit, here are some supplements: * + * In the implementation of data subscription, after a batch of messages is polled by the consumer, if they are not committed within a certain period of time (hardcoded value), the consumers under this subscription can repoll this batch of messages. This mechanism is designed to prevent messages from accumulating on the server side when the consumer does not explicitly commit with autoCommit set to false. However, this mechanism is not part of the functional definition (not exposed to the user), but only to handle some exceptional cases on the client side, such as when a consumer crashes after polling a batch of messages, or forgets to commit. + * Tests can be more focused on situations where autoCommit is true. In this case, if not explicitly committed, it is expected that all messages successfully polled by the consumer should be committed before the consumer is closed (reflected in the Pipe as no accumulated resources). + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBTestAutoCommitFalseDataSetPullConsumerIT + extends AbstractSubscriptionRegressionIT { + private static final String database = "root.TestAutoCommitFalseDataSetPullConsumer"; + private static final String device = database + ".d_0"; + private static final String topicName = "Topic_auto_commit_false"; + private String pattern = device + ".**"; + private static SubscriptionPullConsumer consumer; + private static List schemaList = new ArrayList<>(); + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + + createTopic_s(topicName, pattern, null, null, false); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest2.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest2.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + } + + private void consume_data_noCommit(SubscriptionPullConsumer consumer, Session session) + throws InterruptedException, + TException, + IOException, + StatementExecutionException, + IoTDBConnectionException { + while (true) { + Thread.sleep(1000); + List messages = consumer.poll(Duration.ofMillis(10000)); + if (messages.isEmpty()) { + break; + } + for (final SubscriptionMessage message : messages) { + for (final Iterator it = message.getSessionDataSetsHandler().tabletIterator(); + it.hasNext(); ) { + final Tablet tablet = it.next(); + session.insertTablet(tablet); + System.out.println( + FORMAT.format(new Date()) + " consume data no commit:" + tablet.rowSize); + } + } + } + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + consumer = create_pull_consumer("pull_commit", "Auto_commit_FALSE", false, null); + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + // Subscribe + assertEquals(subs.getSubscriptions().size(), 0, "Before subscription show subscriptions"); + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + String sql1 = "select count(s_0) from " + device; + String sql2 = "select count(s_1) from " + device; + consume_data_noCommit(consumer, session_dest); + System.out.println(FORMAT.format(new Date()) + " src sql1: " + getCount(session_src, sql1)); + System.out.println("dest sql1: " + getCount(session_dest, sql1)); + System.out.println("dest2 sql1: " + getCount(session_dest2, sql1)); + check_count(4, sql1, "dest consume subscription before data:s_0"); + check_count(4, sql2, "dest consume subscription before data:s_1"); + check_count2(0, sql2, "dest2 consumption subscription previous data: s_1"); + + // Subscribe and then write data + insert_data(System.currentTimeMillis()); + consume_data_noCommit(consumer, session_dest2); + System.out.println("src sql1: " + getCount(session_src, sql1)); + System.out.println("dest sql1: " + getCount(session_dest, sql1)); + System.out.println("dest2 sql1: " + getCount(session_dest2, sql1)); + check_count(4, sql1, "dest consume subscription data 2:s_0"); + check_count2(4, sql1, "dest2 consumption subscription data 2:s_0"); + check_count2(4, sql2, "dest2 consumption subscription data 2:s_1"); + + // insert_data(1706659300000L); //2024-01-31 08:00:00+08:00 + // Will consume again + consume_data_noCommit(consumer, session_dest); + System.out.println("src sql1: " + getCount(session_src, sql1)); + System.out.println("dest sql1: " + getCount(session_dest, sql1)); + System.out.println("dest2 sql1: " + getCount(session_dest2, sql1)); + check_count(4, sql1, "dest consumption subscription before data3:s_0"); + check_count(4, sql2, "dest consume subscription before data3:s_1"); + check_count2(4, sql2, "dest2 consumption subscription before count 3 data:s_1"); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/autocommit/IoTDBTestAutoCommitTrueDataSetPullConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/autocommit/IoTDBTestAutoCommitTrueDataSetPullConsumerIT.java new file mode 100644 index 0000000000000..0c1d184545e8a --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/autocommit/IoTDBTestAutoCommitTrueDataSetPullConsumerIT.java @@ -0,0 +1,174 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.autocommit; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.Session; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionMessage; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Date; +import java.util.Iterator; +import java.util.List; + +/*** + * PullConsumer DataSet + * pattern: device + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBTestAutoCommitTrueDataSetPullConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.TestAutoCommitTrueDataSetPullConsumer"; + private static final String device = database + ".d_0"; + private static final String topicName = "topic_auto_commit_true"; + private String pattern = device + ".**"; + private static SubscriptionPullConsumer consumer; + private static List schemaList = new ArrayList<>(); + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s(topicName, pattern, null, null, false); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest2.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest2.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } finally { + subs.dropTopic(topicName); + dropDB(database); + } + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + } + + private void consume_data_noCommit(SubscriptionPullConsumer consumer, Session session) + throws InterruptedException, + TException, + IOException, + StatementExecutionException, + IoTDBConnectionException { + while (true) { + Thread.sleep(1000); + List messages = consumer.poll(Duration.ofMillis(10000)); + if (messages.isEmpty()) { + break; + } + for (final SubscriptionMessage message : messages) { + for (final Iterator it = message.getSessionDataSetsHandler().tabletIterator(); + it.hasNext(); ) { + final Tablet tablet = it.next(); + session.insertTablet(tablet); + System.out.println( + FORMAT.format(new Date()) + " consume data no commit:" + tablet.rowSize); + } + } + } + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + String sql = "select count(s_0) from " + device; + consumer = create_pull_consumer("pull_commit", "Auto_commit_true", true, 1000L); + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + // Subscribe + assertEquals(subs.getSubscriptions().size(), 0, "Before subscription show subscriptions"); + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + System.out.println("First consumption"); + consume_data_noCommit(consumer, session_dest); + System.out.println(FORMAT.format(new Date()) + ", " + getCount(session_src, sql)); + check_count(4, sql, "Consumption subscription previous data: s_0"); + check_count(4, "select count(s_1) from " + device, "Consumption subscription before data: s_1"); + // Subscribe and then write data + insert_data(System.currentTimeMillis()); + consume_data_noCommit(consumer, session_dest2); + System.out.println("second consumption"); + check_count2(4, "select count(s_0) from " + device, "Consumption subscription data 2: s_0"); + check_count2(4, "select count(s_1) from " + device, "Consumption subscription data 2:s_1"); + // Consumed data will not be consumed again + consume_data_noCommit(consumer, session_dest); + System.out.println("Third consumption"); + check_count(4, "select count(s_0) from " + device, "Consumption subscription before data: s_0"); + check_count( + 4, "select count(s_1) from " + device, "Consumption subscription previous data: s_1"); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/format/IoTDBDBDataSetPullConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/format/IoTDBDBDataSetPullConsumerIT.java new file mode 100644 index 0000000000000..49c51a56ffe55 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/format/IoTDBDBDataSetPullConsumerIT.java @@ -0,0 +1,153 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.format; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/*** + * PullConsumer DataSet + * pattern: db + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBDBDataSetPullConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.DBDataSetPullConsumer"; + private static final String device = database + ".d_0"; + private static final String topicName = "topic_format_pull_dataset"; + private static final String pattern = database + ".**"; + private static SubscriptionPullConsumer consumer; + private static List schemaList = new ArrayList<>(); + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s(topicName, pattern, null, null, false); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (row + 1) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + consumer = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("ts") + .consumerGroupId("pull_mode_dataset") + .autoCommit(false) + .fileSaveDir("target/pull-subscription") + .buildPullConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis()); + // Consumption data + consume_data(consumer, session_dest); + + String sql = "select count(s_0) from " + device; + System.out.println("src " + getCount(session_src, sql)); + System.out.println("dest " + getCount(session_dest, sql)); + check_count(8, sql, "consume after subscription"); + // Unsubscribe + consumer.unsubscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 0, "After cancellation, show subscriptions"); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + // Consumption data: Progress is not retained when re-subscribing after cancellation. Full + // synchronization. + consume_data(consumer, session_dest2); + System.out.println("src " + getCount(session_src, sql)); + System.out.println("dest " + getCount(session_dest, sql)); + System.out.println("dest2 " + getCount(session_dest2, sql)); + check_count2( + 12, sql, "Unsubscribe and resubscribe, progress is not retained. Full synchronization."); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/format/IoTDBDBTsfilePullConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/format/IoTDBDBTsfilePullConsumerIT.java new file mode 100644 index 0000000000000..a034a4080627d --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/format/IoTDBDBTsfilePullConsumerIT.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.format; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +/*** + * PullConsumer Tsfile + * pattern: db + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBDBTsfilePullConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.DBTsfilePullConsumer"; + private static final String device = database + ".d_0"; + private static final String topicName = "topic_format_pull_tsfile"; + private static final String pattern = database + ".**"; + private static SubscriptionPullConsumer consumer; + private static List schemaList = new ArrayList<>(); + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createTopic_s(topicName, pattern, null, null, true); + createDB(database); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException, InterruptedException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (row + 1) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + consumer = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("db") + .consumerGroupId("mode_pull_tsfile") + .autoCommit(false) + .fileSaveDir("target/pull-subscription") // hack for license check + .buildPullConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + // insert_data(1706659200000L); //2024-01-31 08:00:00+08:00 + insert_data(System.currentTimeMillis()); + // Consumption data + consume_tsfile_with_file_count_await( + consumer, Collections.singletonList(device), Arrays.asList(10, 2)); + // Unsubscribe + consumer.unsubscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 0, "Show subscriptions after unsubscription"); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + // Consumption data: Progress is not retained when re-subscribing after cancellation. Full + // synchronization. + consume_tsfile_with_file_count_await( + consumer, Collections.singletonList(device), Arrays.asList(15, 3)); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/loose_range/IoTDBAllTsDatasetPullConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/loose_range/IoTDBAllTsDatasetPullConsumerIT.java new file mode 100644 index 0000000000000..0f6ac69ebd14c --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/loose_range/IoTDBAllTsDatasetPullConsumerIT.java @@ -0,0 +1,207 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.loose_range; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/*** + * pull consumer + * pattern: ts + * accurate time + * format: dataset + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBAllTsDatasetPullConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.AllTsDatasetPullConsumer"; + private static final String database2 = "root.test.AllTsDatasetPullConsumer"; + private static final String device = database + ".d_0"; + private String device2 = database + ".d_1"; + private static final String pattern = device + ".s_0"; + private static final String topicName = "topic_loose_range_all_pull_dataset"; + private List schemaList = new ArrayList<>(); + private SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s( + topicName, + pattern, + "2024-01-01T00:00:00+08:00", + "2024-03-31T23:59:59+08:00", + false, + TopicConstant.MODE_LIVE_VALUE, + TopicConstant.LOOSE_RANGE_ALL_VALUE); + session_src.createTimeseries( + pattern, TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + pattern, TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 double;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 double;"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "create topics"); + } + + @Override + @After + public void tearDown() throws Exception { + consumer.close(); + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (1 + row) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + String sql = + "select count(s_0) from " + + device + + " where time >= 2024-01-01T00:00:00+08:00 and time <= 2024-03-31T23:59:59+08:00"; + consumer = create_pull_consumer("ts_accurate_dataset_pull", "loose_range_all", false, null); + // Write data before subscribing + insert_data(1704038396000L, device); // 2023-12-31 23:59:56+08:00 + insert_data(1704038396000L, device2); // 2023-12-31 23:59:56+08:00 + System.out.println("src filter:" + getCount(session_src, sql)); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + // Before consumption subscription data + consume_data(consumer, session_dest); + check_count_non_strict( + 3, "select count(s_0) from " + device, "Start time boundary data: s_0 " + device); + check_count(0, "select count(s_1) from " + device, "Start time boundary data: s_1 " + device); + check_count(0, "select count(s_0) from " + device2, "Start time boundary data: s_0 " + device2); + check_count(0, "select count(s_1) from " + device2, "Start time boundary data: s_1 " + device2); + + insert_data(System.currentTimeMillis(), device); // now + insert_data(System.currentTimeMillis(), device2); // now + System.out.println("src filter:" + getCount(session_src, sql)); + consume_data(consumer, session_dest); + check_count_non_strict( + 3, "select count(s_0) from " + device, "Write some real-time data later:s_0 " + device); + check_count( + 0, "select count(s_1) from " + device, "Write some real-time data later: s_1 " + device); + check_count(0, "select count(s_0) from " + device2, "not in range: s_0 " + device2); + check_count(0, "select count(s_1) from " + device2, "not in range: s_1 " + device2); + + insert_data(1707782400000L, device); // 2024-02-13 08:00:00+08:00 + insert_data(1707782400000L, device2); // 2024-02-13 08:00:00+08:00 + consume_data(consumer, session_dest); + System.out.println("src filter:" + getCount(session_src, sql)); + check_count_non_strict( + 8, "select count(s_0) from " + device, "data within the time range: s_0 " + device); + check_count(0, "select count(s_1) from " + device, "data within the time range: s_1 " + device); + check_count( + 0, "select count(s_0) from " + device2, "data within the time range: s_0 " + device2); + check_count( + 0, "select count(s_1) from " + device2, "data within the time range: s_1 " + device2); + + insert_data(1711814398000L, device); // 2024-03-30 23:59:58+08:00 + insert_data(1711814398000L, device2); // 2024-03-30 23:59:58+08:00 + System.out.println("src filter:" + getCount(session_src, sql)); + consume_data(consumer, session_dest); + check_count_non_strict( + 13, "select count(s_0) from " + device, "End time limit data: s_0 " + device); + check_count(0, "select count(s_1) from " + device, "End time limit data: s_1" + device); + check_count(0, "select count(s_0) from " + device2, "End time limit data: s_0" + device2); + check_count(0, "select count(s_1) from " + device2, "End time limit data: s_1" + device2); + + insert_data(1711900798000L, device); // 2024-03-31 23:59:58+08:00 + insert_data(1711900798000L, device2); // 2024-03-31 23:59:58+08:00 + System.out.println("src filter:" + getCount(session_src, sql)); + consume_data(consumer, session_dest); + check_count_non_strict( + 14, "select count(s_0) from " + device, "End time limit data 2:s_0 " + device); + check_count(0, "select count(s_1) from " + device, "End time limit data 2:s_1" + device); + check_count(0, "select count(s_0) from " + device2, "End time limit data 2: s_0" + device2); + check_count(0, "select count(s_1) from " + device2, "End time limit data 2: s_1" + device2); + + consumer.unsubscribe(topicName); + consumer.subscribe(topicName); + consume_data(consumer, session_dest); + check_count_non_strict( + 14, "select count(s_0) from " + device, "End time limit data 2:s_0 " + device); + check_count(0, "select count(s_1) from " + device, "End time limit data 2:s_1" + device); + check_count(0, "select count(s_0) from " + device2, "End time limit data 2: s_0" + device2); + check_count(0, "select count(s_1) from " + device2, "End time limit data 2: s_1" + device2); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/loose_range/IoTDBAllTsTsfilePullConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/loose_range/IoTDBAllTsTsfilePullConsumerIT.java new file mode 100644 index 0000000000000..1e70c9a9ac94c --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/loose_range/IoTDBAllTsTsfilePullConsumerIT.java @@ -0,0 +1,192 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.loose_range; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Date; +import java.util.List; + +/*** + * PullConsumer + * pattern: ts + * format: tsfile + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBAllTsTsfilePullConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.AllTsTsfilePullConsumer"; + private static final String database2 = "root.AllTsTsfilePullConsumer"; + private static final String topicName = "TopicAllTsTsfilePullConsumer"; + private static final String device = database + ".d_0"; + private static final String pattern = device + ".s_0"; + private static final String device2 = database + ".d_1"; + private static SubscriptionPullConsumer consumer; + private List schemaList = new ArrayList<>(); + private long nowTimestamp; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + nowTimestamp = System.currentTimeMillis(); + createTopic_s( + topicName, + pattern, + null, + String.valueOf(nowTimestamp), + true, + TopicConstant.MODE_LIVE_VALUE, + TopicConstant.LOOSE_RANGE_ALL_VALUE); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 double;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 double;"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + device2 + "(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException, InterruptedException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (row + 1) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L, device); // 2024-01-31 08:00:00+08:00 + insert_data(1706659200000L, device2); // 2024-01-31 08:00:00+08:00 + + consumer = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("ts_pattern_history_tsfile_pull") + .consumerGroupId("loose_range_all") + .autoCommit(false) + .fileSaveDir("target/pull-subscription") // hack for license check + .buildPullConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + insert_data(nowTimestamp - 4000, device); + insert_data(nowTimestamp - 4000, device2); + System.out.println( + FORMAT.format(new Date()) + + " src filter:" + + getCount( + session_src, "select count(s_0) from " + device + " where time <=" + nowTimestamp)); + + // Consumption data + List paths = new ArrayList<>(3); + paths.add(device); + paths.add(device2); + paths.add(database2 + ".d_2"); + consume_tsfile_await( + consumer, paths, Arrays.asList(8, 0, 0), Arrays.asList(true, false, false)); + + // Unsubscribe + consumer.unsubscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 0, "Show subscriptions after cancellation"); + + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L, device); // 2024-02-13 08:00:00+08:00 + insert_data(1707782400000L, device2); // 2024-02-13 08:00:00+08:00 + + // Consumption data: Progress is not retained after unsubscribing and resubscribing. Full + // synchronization. + consume_tsfile_await( + consumer, paths, Arrays.asList(13, 0, 0), Arrays.asList(true, false, false)); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/loose_range/IoTDBAllTsfilePullConsumerSnapshotIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/loose_range/IoTDBAllTsfilePullConsumerSnapshotIT.java new file mode 100644 index 0000000000000..03a4956ce45f2 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/loose_range/IoTDBAllTsfilePullConsumerSnapshotIT.java @@ -0,0 +1,192 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.loose_range; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Date; +import java.util.List; + +/*** + * PullConsumer + * pattern: ts + * format: tsfile + * loose-range: all + * mode: snapshot + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBAllTsfilePullConsumerSnapshotIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.AllTsfilePullConsumerSnapshot"; + private static final String database2 = "root.test.AllTsfilePullConsumerSnapshot"; + private static final String device = database + ".d_0"; + private static final String topicName = "TopicAllTsfilePullConsumerSnapshot"; + private static final String pattern = device + ".s_0"; + private static final String device2 = database + ".d_1"; + private static SubscriptionPullConsumer consumer; + private List schemaList = new ArrayList<>(); + private long nowTimestamp; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + nowTimestamp = System.currentTimeMillis(); + createTopic_s( + topicName, + pattern, + null, + String.valueOf(nowTimestamp), + true, + TopicConstant.MODE_SNAPSHOT_VALUE, + TopicConstant.LOOSE_RANGE_ALL_VALUE); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 double;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 double;"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + device2 + "(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException, InterruptedException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (row + 1) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L, device); // 2024-01-31 08:00:00+08:00 + insert_data(1706659200000L, device2); // 2024-01-31 08:00:00+08:00 + + consumer = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("snapshot_ts_pattern_tsfile_pull") + .consumerGroupId("loose_range_all") + .autoCommit(false) + .fileSaveDir("target/pull-subscription") // hack for license check + .buildPullConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + insert_data(nowTimestamp - 4000, device); + insert_data(nowTimestamp - 4000, device2); + System.out.println( + FORMAT.format(new Date()) + + " src filter:" + + getCount( + session_src, "select count(s_0) from " + device + " where time <=" + nowTimestamp)); + + // Consumption data + List paths = new ArrayList<>(3); + paths.add(device); + paths.add(device2); + paths.add(database2 + ".d_2"); + // Subscribe and write without consuming + consume_tsfile_await(consumer, paths, Arrays.asList(5, 0, 0)); + + // Unsubscribe + consumer.unsubscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 0, "After cancellation, show subscriptions"); + + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L, device); // 2024-02-13 08:00:00+08:00 + insert_data(1707782400000L, device2); // 2024-02-13 08:00:00+08:00 + + // Consumption data: Progress is not retained after unsubscribing and re-subscribing. Full + // synchronization. + consume_tsfile_await(consumer, paths, Arrays.asList(10, 0, 0)); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/loose_range/IoTDBPathDeviceDataSetPullConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/loose_range/IoTDBPathDeviceDataSetPullConsumerIT.java new file mode 100644 index 0000000000000..21f423f37379d --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/loose_range/IoTDBPathDeviceDataSetPullConsumerIT.java @@ -0,0 +1,169 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.loose_range; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/*** + * pull consumer + * format: dataset + * loose-range: path + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBPathDeviceDataSetPullConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.PathDeviceDataSetPullConsumer"; + private static final String database2 = "root.PathDeviceDataSetPullConsumer"; + private static final String topicName = "TopicPathDeviceDataSetPullConsumer"; + private static final String device = database + ".d_0"; + private String pattern = device + ".**"; + private String device2 = database + ".d_1"; + private static SubscriptionPullConsumer consumer; + private List schemaList = new ArrayList<>(); + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s( + topicName, + pattern, + null, + "now", + false, + TopicConstant.MODE_LIVE_VALUE, + TopicConstant.LOOSE_RANGE_PATH_VALUE); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 float;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 float;"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + consumer.close(); + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (row + 1) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + String sql = "select count(s_0) from " + device; + consumer = create_pull_consumer("device_pattern_dataset_pull", "loose_range_path", false, null); + // Write data before subscribing + insert_data(1706659200000L, device); // 2024-01-31 08:00:00+08:00 + insert_data(1706659200000L, device2); // 2024-01-31 08:00:00+08:00 + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis() - 30000L, device); + insert_data(System.currentTimeMillis() - 30000L, device2); + // Consumption data + consume_data(consumer, session_dest); + System.out.println("src: " + getCount(session_src, sql)); + check_count(10, sql, "Consumption data: s_0 " + device); + check_count(10, "select count(s_1) from " + device, "Consumption data: s_1"); + check_count(0, "select count(s_0) from " + device2, "Consumption data:d_1"); + check_count(0, "select count(s_0) from " + database2 + ".d_2", "Consumption data:d_2"); + insert_data(System.currentTimeMillis(), device); + insert_data(System.currentTimeMillis(), device2); + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L, device); // 2024-02-13 08:00:00+08:00 + insert_data(1707782400000L, device2); // 2024-02-13 08:00:00+08:00 + System.out.println("src: " + getCount(session_src, sql)); + // Consumption data: Progress is not retained when re-subscribing after cancellation. Full + // synchronization. + consume_data(consumer, session_dest); + check_count(15, "select count(s_0) from " + device, "Consume data again:" + pattern); + check_count(15, "select count(s_1) from " + device, "Consumption data: s_1"); + check_count(0, "select count(s_0) from " + device2, "Consumption data:d_1"); + check_count(0, "select count(s_0) from " + database2 + ".d_2", "Consumption data:d_2"); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/loose_range/IoTDBPathDeviceTsfilePullConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/loose_range/IoTDBPathDeviceTsfilePullConsumerIT.java new file mode 100644 index 0000000000000..49dae7dd72653 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/loose_range/IoTDBPathDeviceTsfilePullConsumerIT.java @@ -0,0 +1,177 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.loose_range; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/*** + * PullConsumer + * pattern: device + * format: tsfile + * loose-range: time + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBPathDeviceTsfilePullConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.PathDeviceTsfilePullConsumer"; + private static final String database2 = "root.PathDeviceTsfilePullConsumer"; + private static final String topicName = "TopicPathDeviceTsfilePullConsumer"; + private static final String device = database + ".d_0"; + private static final String pattern = device + ".**"; + private static final String device2 = database + ".d_1"; + private static SubscriptionPullConsumer consumer; + private List schemaList = new ArrayList<>(); + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s( + topicName, + pattern, + null, + null, + true, + TopicConstant.MODE_LIVE_VALUE, + TopicConstant.LOOSE_RANGE_PATH_VALUE); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 double;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 double;"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException, InterruptedException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (row + 1) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L, device); // 2024-01-31 08:00:00+08:00 + insert_data(1706659200000L, device2); // 2024-01-31 08:00:00+08:00 + consumer = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("pull_path_device_tsfile") + .consumerGroupId("loose_range_path") + .autoCommit(false) + .fileSaveDir("target/pull-subscription") // hack for license check + .buildPullConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis(), device); + insert_data(System.currentTimeMillis(), device2); + // Consumption data + List devices = new ArrayList<>(3); + devices.add(device); + devices.add(device2); + devices.add(database2 + ".d_2"); + consume_tsfile_with_file_count_await(consumer, devices, Arrays.asList(10, 0, 0, 2)); + // Unsubscribe + consumer.unsubscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 0, "show subscriptions after cancellation"); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L, device); // 2024-02-13 08:00:00+08:00 + insert_data(1707782400000L, device2); // 2024-02-13 08:00:00+08:00 + // Consumption data: Progress is not retained after unsubscribing and re-subscribing. Full + // synchronization. + consume_tsfile_with_file_count_await(consumer, devices, Arrays.asList(15, 0, 0, 3)); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/loose_range/IoTDBTimeTsDatasetPullConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/loose_range/IoTDBTimeTsDatasetPullConsumerIT.java new file mode 100644 index 0000000000000..1be5951a8c6f6 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/loose_range/IoTDBTimeTsDatasetPullConsumerIT.java @@ -0,0 +1,211 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.loose_range; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/*** + * pull consumer + * pattern: ts + * loose-range: time + * accurate time range + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBTimeTsDatasetPullConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.TimeTsDatasetPullConsumer"; + private static final String database2 = "root.TimeTsDatasetPullConsumer"; + private static final String topicName = "TopicTimeTsDatasetPullConsumer"; + private String device = database + ".d_0"; + private String device2 = database + ".d_1"; + private String device3 = database2 + ".d_2"; + private String pattern = device + ".s_0"; + private List schemaList = new ArrayList<>(); + private SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s( + topicName, + pattern, + "2024-01-01T00:00:00+08:00", + "2024-03-31T23:59:59+08:00", + false, + TopicConstant.MODE_LIVE_VALUE, + TopicConstant.LOOSE_RANGE_TIME_VALUE); + session_src.createTimeseries( + pattern, TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + pattern, TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement("create timeseries " + device3 + ".s_0 int32;"); + session_dest.executeNonQueryStatement("create timeseries " + device3 + ".s_0 int32;"); + session_src.executeNonQueryStatement("create timeseries " + device3 + ".s_1 double;"); + session_dest.executeNonQueryStatement("create timeseries " + device3 + ".s_1 double;"); + session_src.executeNonQueryStatement( + "insert into " + device3 + "(time,s_0,s_1)values(1000,132,4567.89);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + consumer.close(); + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (1 + row) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + String sql = + "select count(s_0) from " + + device + + " where time >= 2024-01-01T00:00:00+08:00 and time <= 2024-03-31T23:59:59+08:00"; + consumer = + create_pull_consumer("pull_ts_pattern_accurate_dataset", "loose_range_time", false, null); + + // Write data before subscribing + insert_data(1704038396000L, device); // 2023-12-31 23:59:56+08:00 + insert_data(1704038396000L, device2); // 2023-12-31 23:59:56+08:00 + System.out.println("src filter:" + getCount(session_src, sql)); + + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + // Before consumption subscription data + consume_data(consumer, session_dest); + check_count_non_strict(3, sql, "Start time boundary data: s_0 " + device); + check_count(0, "select count(s_1) from " + device, "Start time boundary data: s_1 " + device); + check_count(0, "select count(s_0) from " + device2, "Start time boundary data: s_0 " + device2); + check_count(0, "select count(s_1) from " + device2, "Start time limit data: s_1 " + device2); + + insert_data(System.currentTimeMillis(), device); // not in range + insert_data(System.currentTimeMillis(), device2); // not in range + System.out.println("src filter:" + getCount(session_src, sql)); + + consume_data(consumer, session_dest); + check_count_non_strict(3, sql, "After writing some real-time data: s_0 " + device); + check_count( + 0, "select count(s_1) from " + device, "Write some real-time data later:s_1 " + device); + check_count(0, "select count(s_0) from " + device2, "not in range: s_0 " + device2); + check_count(0, "select count(s_1) from " + device2, "not in range: s_1 " + device2); + + insert_data(1707782400000L, device); // 2024-02-13 08:00:00+08:00 + insert_data(1707782400000L, device2); // 2024-02-13 08:00:00+08:00 + consume_data(consumer, session_dest); + System.out.println("src filter:" + getCount(session_src, sql)); + + check_count_non_strict(8, sql, "Data within time range: s_0 " + device); + check_count(0, "select count(s_1) from " + device, "data within the time range: s_1 " + device); + check_count( + 0, "select count(s_0) from " + device2, "Data within the time range: s_0 " + device2); + check_count( + 0, "select count(s_1) from " + device2, "Data within the time range: s_1 " + device2); + + insert_data(1711814398000L, device); // 2024-03-30 23:59:58+08:00 + insert_data(1711814398000L, device2); // 2024-03-30 23:59:58+08:00 + System.out.println("src filter:" + getCount(session_src, sql)); + + consume_data(consumer, session_dest); + check_count_non_strict(13, sql, "End time limit data: s_0 " + device); + check_count(0, "select count(s_1) from " + device, "End time limit data: s_1 " + device); + check_count(0, "select count(s_0) from " + device2, "End time limit data: s_0 " + device2); + check_count(0, "select count(s_1) from " + device2, "End time limit data: s_1 " + device2); + + insert_data(1711900798000L, device); // 2024-03-31 23:59:58+08:00 + insert_data(1711900798000L, device2); // 2024-03-31 23:59:58+08:00 + System.out.println("src filter:" + getCount(session_src, sql)); + + consume_data(consumer, session_dest); + check_count_non_strict( + 14, "select count(s_0) from " + device, "End time limit data 2:s_0 " + device); + check_count(0, "select count(s_1) from " + device, "End time limit data 2:s_1 " + device); + check_count(0, "select count(s_0) from " + device2, "End time limit data 2: s_0 " + device2); + check_count(0, "select count(s_1) from " + device2, "End time limit data 2: s_1 " + device2); + + consumer.unsubscribe(topicName); + consumer.subscribe(topicName); + consume_data(consumer, session_dest); + check_count_non_strict( + 14, "select count(s_0) from " + device, "End time limit data 2:s_0 " + device); + check_count(0, "select count(s_1) from " + device, "End time limit data 2:s_1 " + device); + check_count(0, "select count(s_0) from " + device2, "End time limit data 2: s_0 " + device2); + check_count(0, "select count(s_1) from " + device2, "End time limit data 2: s_1 " + device2); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/loose_range/IoTDBTimeTsTsfilePullConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/loose_range/IoTDBTimeTsTsfilePullConsumerIT.java new file mode 100644 index 0000000000000..404ac75f99e19 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/loose_range/IoTDBTimeTsTsfilePullConsumerIT.java @@ -0,0 +1,195 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.loose_range; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/*** + * PullConsumer + * pattern: ts + * format: tsfile + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBTimeTsTsfilePullConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.TimeTsTsfilePullConsumer"; + private static final String database2 = "root.TimeTsTsfilePullConsumer"; + private static final String topicName = "TopicTimeTsTsfilePullConsumer"; + private static final String device = database + ".d_0"; + private static final String pattern = device + ".s_0"; + private static final String device2 = database + ".d_1"; + private static final String device3 = database2 + ".d_2"; + private static SubscriptionPullConsumer consumer; + private static List schemaList = new ArrayList<>(); + private static List rowCountList; + private long nowTimestamp; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + nowTimestamp = System.currentTimeMillis(); + createTopic_s( + topicName, + pattern, + null, + String.valueOf(nowTimestamp), + true, + TopicConstant.MODE_LIVE_VALUE, + TopicConstant.LOOSE_RANGE_TIME_VALUE); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement("create timeseries " + device3 + ".s_0 int32;"); + session_dest.executeNonQueryStatement("create timeseries " + device3 + ".s_0 int32;"); + session_src.executeNonQueryStatement("create timeseries " + device3 + ".s_1 double;"); + session_dest.executeNonQueryStatement("create timeseries " + device3 + ".s_1 double;"); + session_src.executeNonQueryStatement( + "insert into " + device3 + "(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + device2 + "(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException, InterruptedException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (row + 1) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L, device); // 2024-01-31 08:00:00+08:00 + insert_data(1706659200000L, device2); // 2024-01-31 08:00:00+08:00 + consumer = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("ts_pattern_tsfile_pull") + .consumerGroupId("loose_range_time") + .autoCommit(false) + .fileSaveDir("target/pull-subscription") // hack for license check + .buildPullConsumer(); + consumer.open(); + + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions(topicName).forEach(System.out::println); + assertEquals( + subs.getSubscriptions(topicName).size(), 1, "show subscriptions after subscription"); + insert_data(nowTimestamp - 4000, device); + insert_data(nowTimestamp - 4000, device2); + + String sql = "select count(s_0) from " + device + " where time <=" + nowTimestamp; + System.out.println("TimeTsTsfilePullConsumer src1 filter:" + getCount(session_src, sql)); + + // Consumption data + List paths = new ArrayList<>(3); + paths.add(device); + paths.add(device2); + paths.add(device3); + + consume_tsfile_await( + consumer, paths, Arrays.asList(8, 0, 0), Arrays.asList(true, false, false)); + + // Unsubscribe + consumer.unsubscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 0, "Show subscriptions after unsubscription"); + + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals( + subs.getSubscriptions(topicName).size(), 1, "show subscriptions after re-subscribing"); + + insert_data(1707782400000L, device); // 2024-02-13 08:00:00+08:00 + insert_data(1707782400000L, device2); // 2024-02-13 08:00:00+08:00 + // Consumption data: Progress is not retained when re-subscribing after cancellation. Full + // synchronization. + System.out.println("TimeTsTsfilePullConsumer src2 filter:" + getCount(session_src, sql)); + + consume_tsfile_await( + consumer, paths, Arrays.asList(13, 0, 0), Arrays.asList(true, false, false)); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/mode/IoTDBSnapshotDevicePullConsumerDataSetIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/mode/IoTDBSnapshotDevicePullConsumerDataSetIT.java new file mode 100644 index 0000000000000..aca980b88e07a --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/mode/IoTDBSnapshotDevicePullConsumerDataSetIT.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.mode; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBSnapshotDevicePullConsumerDataSetIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.SnapshotDevicePullConsumerDataSet"; + private static final String database2 = "root.SnapshotDevicePullConsumerDataSet"; + private static final String topicName = "topicSnapshotDevicePullConsumerDataSet"; + private String device = database + ".d_0"; + + private String pattern = device + ".**"; + private static SubscriptionPullConsumer consumer; + private List schemaList = new ArrayList<>(); + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s(topicName, pattern, null, "now", false, TopicConstant.MODE_SNAPSHOT_VALUE, null); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 float;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 float;"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + consumer.close(); + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + consumer = create_pull_consumer("pull_mode", "device_snapshot_dataset", false, null); + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + Thread.sleep(1000); + insert_data(System.currentTimeMillis() - 30000L); + // Consumption data + consume_data(consumer, session_dest); + String sql = "select count(s_0) from " + device; + System.out.println("src: " + getCount(session_src, sql)); + check_count(4, sql, "Consumption data:" + pattern); + check_count(4, "select count(s_1) from " + device, "Consumption data: s_1"); + check_count(0, "select count(s_0) from " + database + ".d_1", "Consumption Data: d_1"); + check_count(0, "select count(s_0) from " + database2 + ".d_2", "Consumption data:d_2"); + insert_data(System.currentTimeMillis()); + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + // Consumption data: Progress is not retained after unsubscribing and re-subscribing. Full + // synchronization. + consume_data(consumer, session_dest); + check_count(8, "select count(s_0) from " + device, "Consume data again:" + pattern); + check_count(8, "select count(s_1) from " + device, "Consumption data: s_1"); + while (!consumer.allTopicMessagesHaveBeenConsumed()) { + Thread.sleep(1000); + } + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/mode/IoTDBSnapshotDevicePullConsumerTsfileIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/mode/IoTDBSnapshotDevicePullConsumerTsfileIT.java new file mode 100644 index 0000000000000..386ab0595453c --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/mode/IoTDBSnapshotDevicePullConsumerTsfileIT.java @@ -0,0 +1,171 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.mode; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/*** + * PullConsumer + * pattern: device + * tsfile + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBSnapshotDevicePullConsumerTsfileIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.SnapshotDevicePullConsumerTsfile"; + private static final String database2 = "root.SnapshotDevicePullConsumerTsfile"; + private static final String device = database + ".d_0"; + private static final String topicName = "topicSnapshotDevicePullConsumerTsfile"; + private static final String pattern = device + ".**"; + private static SubscriptionPullConsumer consumer; + private static List schemaList = new ArrayList<>(); + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s(topicName, pattern, null, null, true, TopicConstant.MODE_SNAPSHOT_VALUE, null); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 double;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 double;"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException, InterruptedException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (row + 1) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + + consumer = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("device_tsfile_snapshot") + .consumerGroupId("pull_mode") + .autoCommit(false) + .fileSaveDir("target/pull-subscription") // hack for license check + .buildPullConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + insert_data(System.currentTimeMillis()); + System.out.println("src :" + getCount(session_src, "select count(s_0) from " + device)); + + // Consumption data + List devices = new ArrayList<>(3); + devices.add(device); + devices.add(database + ".d_1"); + devices.add(database2 + ".d_2"); + consume_tsfile_await(consumer, devices, Arrays.asList(5, 0, 0)); + + // Unsubscribe + consumer.unsubscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 0, "Show subscriptions after unsubscription"); + + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + + // Consumption data: Progress is not retained after unsubscribing and then re-subscribing. Full + // synchronization. + consume_tsfile_await(consumer, devices, Arrays.asList(10, 0, 0)); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/multi/IoTDBConsumer2With1TopicShareProcessDataSetIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/multi/IoTDBConsumer2With1TopicShareProcessDataSetIT.java new file mode 100644 index 0000000000000..f3bbb2f50b8a3 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/multi/IoTDBConsumer2With1TopicShareProcessDataSetIT.java @@ -0,0 +1,144 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.multi; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBConsumer2With1TopicShareProcessDataSetIT + extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.Consumer2With1TopicShareProcessDataSet"; + private static final String device = database + ".d_0"; + private static final String topicName = "topicConsumer2With1TopicShareProcessDataSet"; + private String pattern = device + ".**"; + private SubscriptionPullConsumer consumer2; + private static SubscriptionPullConsumer consumer; + private static List schemaList = new ArrayList<>(); + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s(topicName, pattern, null, null, false); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest2.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest2.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + consumer2.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + consumer = create_pull_consumer("g1", "c1", false, null); + consumer2 = create_pull_consumer("g1", "c2", false, null); + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + // Subscribe + assertEquals(subs.getSubscriptions().size(), 0, "Before subscribing, show subscriptions"); + consumer.subscribe(topicName); + consumer2.subscribe(topicName); + System.out.println("After subscription:"); + subs.getSubscriptions().forEach((System.out::println)); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + consume_data(consumer, session_dest); + check_count( + 5, "select count(s_0) from " + device, "Consumption subscription before data: s_0 "); + check_count( + 5, "select count(s_1) from " + device, "Consumption subscription before data: s_1 "); + // Subscribe and then write data + insert_data(System.currentTimeMillis()); + consume_data(consumer2, session_dest2); + check_count2(5, "select count(s_0) from " + device, "Consumption subscription data: s_0"); + check_count2(5, "select count(s_1) from " + device, "Consumption subscription data: s_1"); + + // Consumed data will not be consumed again + consume_data(consumer, session_dest); + check_count(5, "select count(s_0) from " + device, "Consumption subscription before data: s_0"); + check_count( + 5, "select count(s_1) from " + device, "Consumption subscription previous data: s_1"); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/multi/IoTDBConsumer2With1TopicShareProcessTsfileIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/multi/IoTDBConsumer2With1TopicShareProcessTsfileIT.java new file mode 100644 index 0000000000000..a0db66383b6e8 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/multi/IoTDBConsumer2With1TopicShareProcessTsfileIT.java @@ -0,0 +1,192 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.multi; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * format: tsfile + * pattern:device + * Same group pull consumer share progress + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBConsumer2With1TopicShareProcessTsfileIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.Consumer2With1TopicShareProcessTsfile"; + private static final String device = database + ".d_0"; + private static final String topicName = "topicConsumer2With1TopicShareProcessTsfile"; + private static List schemaList = new ArrayList<>(); + private String pattern = device + ".**"; + private SubscriptionPullConsumer consumer2; + private static SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createTopic_s(topicName, pattern, null, null, true); + createDB(database); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest2.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest2.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + consumer2.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (row + 1) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + consumer = create_pull_consumer("tsfile_group_share_process", "c1", false, null); + consumer2 = create_pull_consumer("tsfile_group_share_process", "c2", false, 10L); + // Subscribe + assertEquals( + subs.getSubscriptions(topicName).size(), 0, "Show subscriptions before subscribing"); + consumer.subscribe(topicName); + consumer2.subscribe(topicName); + subs.getSubscriptions(topicName).forEach((System.out::println)); + assertEquals( + subs.getSubscriptions(topicName).size(), 1, "show subscriptions after subscription"); + // insert 1000 records + Thread thread = + new Thread( + () -> { + long timestamp = 1706659200000L; // 2024-01-31 08:00:00+08:00 + for (int i = 0; i < 20; i++) { + try { + insert_data(timestamp); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } + timestamp += 20000; + } + }); + AtomicInteger rowCount1 = new AtomicInteger(0); + AtomicInteger rowCount2 = new AtomicInteger(0); + Thread thread1 = + new Thread( + () -> { + try { + rowCount1.addAndGet(consume_tsfile(consumer, device)); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }); + Thread thread2 = + new Thread( + () -> { + try { + rowCount2.addAndGet(consume_tsfile(consumer2, device)); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }); + + thread1.start(); + thread2.start(); + thread.start(); + thread1.join(); + thread2.join(); + thread.join(); + + System.out.println("src=" + getCount(session_src, "select count(s_0) from " + device)); + System.out.println("rowCount1=" + rowCount1.get()); + System.out.println("rowCount2=" + rowCount2.get()); + AWAIT.untilAsserted( + () -> + assertGte( + rowCount1.get() + rowCount2.get(), + getCount(session_src, "select count(s_0) from " + device), + "consumer share process rowCount1=" + + rowCount1.get() + + " rowCount2=" + + rowCount2.get() + + " src=" + + getCount(session_src, "select count(s_0) from " + device))); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/multi/IoTDBMultiGroupVsMultiConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/multi/IoTDBMultiGroupVsMultiConsumerIT.java new file mode 100644 index 0000000000000..68f01be17a1a2 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/multi/IoTDBMultiGroupVsMultiConsumerIT.java @@ -0,0 +1,214 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.multi; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBMultiGroupVsMultiConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.pullMultiGroupVsMultiConsumer"; + private static final String device = database + ".d_0"; + private static List schemaList = new ArrayList<>(); + private String topicNamePrefix = "TopicPullMultiGroupVsMultiConsumer_"; + private int tsCount = 10; + private int consumertCount = 10; + private List consumers = new ArrayList<>(consumertCount); + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + for (int i = 0; i < tsCount; i++) { + createTopic_s(topicNamePrefix + i, device + ".s_" + i, null, null, false); + session_src.createTimeseries( + device + ".s_" + i, TSDataType.INT32, TSEncoding.RLE, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_" + i, TSDataType.INT32, TSEncoding.RLE, CompressionType.LZMA2); + session_dest2.createTimeseries( + device + ".s_" + i, TSDataType.INT32, TSEncoding.RLE, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_" + i, TSDataType.INT32)); + } + System.out.println("topics:" + subs.getTopics()); + } + + @Override + @After + public void tearDown() throws Exception { + for (SubscriptionPullConsumer c : consumers) { + try { + c.close(); + } catch (Exception e) { + } + } + for (int i = 0; i < tsCount; i++) { + subs.dropTopic(topicNamePrefix + i); + } + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + for (int i = 0; i < tsCount; i++) { + tablet.addValue( + schemaList.get(i).getMeasurementId(), rowIndex, (row + 1) * 20 + i * 1000 + row); + } + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + /*** + * |c0|t0|g1| + * |c1|t0|g1| + * |c2|t1|g1| + * |c3|t1|g1| + * |c4|t2,t3|g2| + * |c5|t3,t4|g2| + * |c6|t2,t4|g2| + * |c7|t0,t3|g3| + * |c8|t6|g3| + * |c9|t0,t3|g3| + */ + @Test + public void do_test() + throws TException, + IoTDBConnectionException, + IOException, + StatementExecutionException, + InterruptedException { + int i = 0; + for (i = 0; i < 4; i++) { + consumers.add( + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("consumer_id_" + i) + .consumerGroupId("pull_group_id_1") + .buildPullConsumer()); + } + for (; i < 7; i++) { + consumers.add( + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("consumer_id_" + i) + .consumerGroupId("pull_group_id_2") + .buildPullConsumer()); + } + for (; i < consumertCount; i++) { + consumers.add( + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("consumer_id_" + i) + .consumerGroupId("pull_group_id_3") + .buildPullConsumer()); + } + for (int j = 0; j < consumertCount; j++) { + consumers.get(j).open(); + } + + consumers.get(0).subscribe(topicNamePrefix + 0); + consumers.get(1).subscribe(topicNamePrefix + 0); + consumers.get(2).subscribe(topicNamePrefix + 1); + consumers.get(3).subscribe(topicNamePrefix + 1); + consumers.get(4).subscribe(topicNamePrefix + 2, topicNamePrefix + 3); + consumers.get(5).subscribe(topicNamePrefix + 3, topicNamePrefix + 4); + consumers.get(6).subscribe(topicNamePrefix + 2, topicNamePrefix + 4); + consumers.get(7).subscribe(topicNamePrefix + 0, topicNamePrefix + 3); + consumers.get(8).subscribe(topicNamePrefix + 6); + consumers.get(9).subscribe(topicNamePrefix + 0, topicNamePrefix + 3); + + subs.getSubscriptions().forEach(System.out::println); + // Write data + System.out.println("Write data 1"); + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + consume_data(consumers.get(0), session_dest); + System.out.println("src:" + getCount(session_src, "select count(s_0) from " + device)); + check_count(5, "select count(s_0) from " + device, "1 post-consumption check: s_0"); + for (i = 1; i < tsCount; i++) { + check_count(0, "select count(s_" + i + ") from " + device, "1 pre-consumption check: s_" + i); + } + consume_data(consumers.get(2), session_dest); + check_count(5, "select count(s_1) from " + device, "1 post-consumption check: s_1"); + consume_data(consumers.get(4), session_dest); + check_count(5, "select count(s_2) from " + device, "1 post-consumption check: s_2"); + check_count(5, "select count(s_3) from " + device, "1 post-consumption check: s_3"); + for (i = 4; i < tsCount; i++) { + check_count(0, "select count(s_" + i + ") from " + device, "1 pre-consumption check: s_" + i); + } + + System.out.println("Write data 2"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + System.out.println("src:" + getCount(session_src, "select count(s_0) from " + device)); + consume_data(consumers.get(1), session_dest2); + consume_data(consumers.get(3), session_dest2); + consume_data(consumers.get(5), session_dest2); + consume_data(consumers.get(8), session_dest2); + + for (i = 0; i < 4; i++) { + if (i == 2) continue; + check_count2( + 5, "select count(s_" + i + ") from " + device, "2 pre-consumption check: s_" + i); + } + check_count2(10, "select count(s_4) from " + device, "2 check dest2:s_4 after consumption"); + check_count2(10, "select count(s_6) from " + device, "2 check dest2:s_6 after consumption"); + + consume_data(consumers.get(7), session_dest); + check_count(10, "select count(s_0) from " + device, "2 post-consumption check: s_0"); + check_count(10, "select count(s_3) from " + device, "2 post-consumption check: s_3"); + + System.out.println("Write data 3"); + insert_data(System.currentTimeMillis()); + consume_data(consumers.get(7), session_dest2); + check_count2(10, "select count(s_0) from " + device, "3 check dest2:s_0 after consumption"); + check_count2(10, "select count(s_3) from " + device, "3 consume check dest2:s_3"); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/multi/IoTDBOneConsumerMultiTopicsDatasetIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/multi/IoTDBOneConsumerMultiTopicsDatasetIT.java new file mode 100644 index 0000000000000..1dad85d5af6c4 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/multi/IoTDBOneConsumerMultiTopicsDatasetIT.java @@ -0,0 +1,182 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.multi; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * 1 consumer subscribes to 2 topics: fixed time range + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBOneConsumerMultiTopicsDatasetIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.OneConsumerMultiTopicsDataset"; + private static final String device = database + ".d_0"; + private static List schemaList = new ArrayList<>(); + + private String pattern = device + ".s_0"; + private String pattern2 = device + ".s_1"; + private String topicName = "topic1_OneConsumerMultiTopicsDataset"; + private String topicName2 = "topic2_OneConsumerMultiTopicsDataset"; + private static SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s( + topicName, pattern, "2024-01-01T00:00:00+08:00", "2024-03-01T00:00:00+08:00", false); + createTopic_s( + topicName2, pattern2, "2024-01-01T00:00:00+08:00", "2024-03-13T00:00:00+08:00", false); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + consumer.close(); + subs.dropTopic(topicName); + subs.dropTopic(topicName2); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + Thread.sleep(1000); + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + session_src.executeNonQueryStatement( + "insert into " + + device + + "(time,s_0,s_1)values(1710288000000,313,6.78);"); // 2024-03-13 08:00:00+08:00 + // Subscribe + consumer = create_pull_consumer("multi_1consumer_multiTopics", "c1", false, null); + assertEquals(subs.getSubscriptions().size(), 0, "Before subscription show subscriptions"); + + consumer.subscribe(topicName, topicName2); + subs.getSubscriptions().forEach((System.out::println)); + assertEquals(subs.getSubscriptions().size(), 2, "show subscriptions after subscription"); + + Thread thread = + new Thread( + new Runnable() { + @Override + public void run() { + try { + insert_data(System.currentTimeMillis()); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + }); + thread.start(); + thread.join(); + String sql1 = "select count(s_0) from " + device; + String sql2 = "select count(s_1) from " + device; + System.out.println("src s_0:" + getCount(session_src, sql1)); + System.out.println("src s_1:" + getCount(session_src, sql2)); + // Consumption data + consume_data(consumer, session_dest); + System.out.println("dest s_0:" + getCount(session_dest, sql1)); + System.out.println("dest s_1:" + getCount(session_dest, sql2)); + AWAIT.untilAsserted( + () -> { + check_count(5, sql1, "Consumption data:" + pattern); + check_count(5, sql2, "Consumption data:" + pattern2); + }); + // Unsubscribe + consumer.unsubscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + // Subscribe and then write data + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + session_src.executeNonQueryStatement( + "insert into " + + device + + "(time,s_0,s_1)values(1703980800000,1231,321.45);"); // 2023-12-31 08:00:00+08:00 + + // Consumption data + consume_data(consumer, session_dest); + AWAIT.untilAsserted( + () -> { + check_count(5, sql1, "consume data again:" + pattern); + check_count(10, sql2, "consume data again:" + pattern2); + }); + // Unsubscribe + consumer.unsubscribe(topicName, topicName2); + System.out.println("###### Query after unsubscription again:"); + subs.getSubscriptions().forEach((System.out::println)); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/multi/IoTDBOneConsumerMultiTopicsMixIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/multi/IoTDBOneConsumerMultiTopicsMixIT.java new file mode 100644 index 0000000000000..2155d09ca8358 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/multi/IoTDBOneConsumerMultiTopicsMixIT.java @@ -0,0 +1,331 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.multi; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionMessage; +import org.apache.iotdb.session.subscription.payload.SubscriptionMessageType; +import org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant; +import org.apache.iotdb.subscription.it.Retry; +import org.apache.iotdb.subscription.it.RetryRule; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.read.TsFileReader; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.read.expression.QueryExpression; +import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * 1 consumer subscribes to 2 topics: Historical data + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBOneConsumerMultiTopicsMixIT extends AbstractSubscriptionRegressionIT { + + @Rule public RetryRule retryRule = new RetryRule(); + + private static final String database = "root.test.OneConsumerMultiTopicsMix"; + private static final String device = database + ".d_0"; + private String pattern = device + ".s_0"; + private String pattern2 = "root.**"; + private String topicName = "topic1_OneConsumerMultiTopicsMix"; + private String topicName2 = "topic2_OneConsumerMultiTopicsMix"; + private List schemaList = new ArrayList<>(); + private SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s(topicName, pattern, null, "now", false); + createTopic_s(topicName2, pattern2, null, "now", true); + session_src.createTimeseries( + device + ".s_0", TSDataType.FLOAT, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.TEXT, TSEncoding.DICTIONARY, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.FLOAT)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.TEXT)); + assertTrue(subs.getTopic(topicName).isPresent(), "create show topics"); + assertTrue(subs.getTopic(topicName2).isPresent(), "Create show topics 2"); + } + + @Override + protected void setUpConfig() { + super.setUpConfig(); + + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(sender); + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(receiver1); + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(receiver2); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + subs.dropTopic(topicName2); + dropDB(database); + schemaList.clear(); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row + 2.45f); + tablet.addValue("s_1", rowIndex, "rowIndex" + rowIndex); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Retry + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + session_src.executeNonQueryStatement( + "insert into " + + device + + "(time,s_0,s_1)values(1710288000000,313,'2024-03-13 08:00:00+08:00');"); // 2024-03-13 + // 08:00:00+08:00 + session_src.executeNonQueryStatement("flush;"); + // Subscribe + consumer = create_pull_consumer("multi_1consumer_mix", "tsfile_dataset", false, null); + System.out.println("###### Before Subscription Query:"); + subs.getSubscriptions().forEach((System.out::println)); + assertEquals(subs.getSubscriptions().size(), 0, "Before subscription show subscriptions"); + consumer.subscribe(topicName, topicName2); + long timestamp = System.currentTimeMillis(); + System.out.println("###### Subscription Query:"); + subs.getSubscriptions().forEach((System.out::println)); + assertEquals(subs.getSubscriptions().size(), 2, "show subscriptions after subscription"); + // Subscribe and then write data + Thread thread = + new Thread( + new Runnable() { + @Override + public void run() { + try { + insert_data(System.currentTimeMillis()); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + }); + thread.start(); + + AtomicBoolean isClosed = new AtomicBoolean(false); + AtomicInteger rowCount = new AtomicInteger(0); + Thread thread2 = + new Thread( + () -> { + while (!isClosed.get()) { + List messages = consumer.poll(Duration.ofMillis(10000)); + for (final SubscriptionMessage message : messages) { + final short messageType = message.getMessageType(); + if (SubscriptionMessageType.isValidatedMessageType(messageType)) { + switch (SubscriptionMessageType.valueOf(messageType)) { + case SESSION_DATA_SETS_HANDLER: + for (final Iterator it = + message.getSessionDataSetsHandler().tabletIterator(); + it.hasNext(); ) { + final Tablet tablet = it.next(); + try { + session_dest.insertTablet(tablet); + // + // System.out.println(format.format(new Date())+" "+tablet.rowSize); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + break; + case TS_FILE_HANDLER: + try { + TsFileReader reader = message.getTsFileHandler().openReader(); + QueryDataSet dataset = + reader.query( + QueryExpression.create( + Collections.singletonList(new Path(device, "s_1", true)), + null)); + while (dataset.hasNext()) { + rowCount.addAndGet(1); + RowRecord next = dataset.next(); + System.out.println( + device + ".s_1:" + next.getTimestamp() + "," + next.getFields()); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + consumer.commitSync(messages); + break; + } + } + } + } + }); + Thread thread3 = + new Thread( + () -> { + while (!isClosed.get()) { + List messages = consumer.poll(Duration.ofMillis(10000)); + for (final SubscriptionMessage message : messages) { + final short messageType = message.getMessageType(); + if (SubscriptionMessageType.isValidatedMessageType(messageType)) { + switch (SubscriptionMessageType.valueOf(messageType)) { + case SESSION_DATA_SETS_HANDLER: + for (final Iterator it = + message.getSessionDataSetsHandler().tabletIterator(); + it.hasNext(); ) { + final Tablet tablet = it.next(); + try { + session_dest.insertTablet(tablet); + System.out.println(FORMAT.format(new Date()) + " " + tablet.rowSize); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + break; + case TS_FILE_HANDLER: + try { + TsFileReader reader = message.getTsFileHandler().openReader(); + QueryDataSet dataset = + reader.query( + QueryExpression.create( + Collections.singletonList(new Path(device, "s_1", true)), + null)); + while (dataset.hasNext()) { + rowCount.addAndGet(1); + RowRecord next = dataset.next(); + System.out.println( + device + ":" + next.getTimestamp() + "," + next.getFields()); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + consumer.commitSync(messages); + break; + } + } + } + } + }); + thread2.start(); + thread.join(); + thread2.join(5000); + String sql1 = "select count(s_0) from " + device + " where time <= " + timestamp; + String sql2 = "select count(s_1) from " + device + " where time <= " + timestamp; + + System.out.println(FORMAT.format(new Date()) + " src:" + getCount(session_src, sql1)); + AWAIT.untilAsserted( + () -> { + // Consumption data + check_count(6, sql1, "Consumption data:" + pattern); + check_count(0, sql2, "Consumption data:" + pattern2); + assertEquals(rowCount.get(), 6, "tsfile consumer"); + }); + // Unsubscribe + consumer.unsubscribe(topicName); + System.out.println("###### After unsubscribing query:"); + subs.getSubscriptions().forEach((System.out::println)); + assertEquals(subs.getSubscriptions().size(), 1, "Unsubscribe 1 and then show subscriptions"); + + // Unsubscribe and then write data + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + session_src.executeNonQueryStatement( + "insert into " + + device + + "(time,s_0,s_1)values(1703980800000,3.45,'2023-12-31 08:00:00+08:00');"); // 2023-12-31 08:00:00+08:00 + insert_data(System.currentTimeMillis()); + session_src.executeNonQueryStatement("flush;"); + System.out.println( + FORMAT.format(new Date()) + + " Unsubscribe after writing data src:" + + getCount(session_src, sql1)); + + thread3.start(); + thread3.join(5000); + System.out.println(FORMAT.format(new Date())); + AWAIT.untilAsserted( + () -> { + assertEquals( + rowCount.get(), 12, "Re-consume data: tsfile consumer " + FORMAT.format(new Date())); + check_count(6, sql1, "consume data again:" + pattern); + check_count(0, sql2, "Reconsume data:" + pattern2); + }); + // close + consumer.close(); + isClosed.set(true); + try { + consumer.subscribe(topicName, topicName2); + } catch (Exception e) { + System.out.println("subscribe again after close, expecting an exception"); + } + assertEquals(subs.getSubscriptions().size(), 0, "show subscriptions after close"); + subs.getSubscriptions().forEach((System.out::println)); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/multi/IoTDBOneConsumerMultiTopicsTsfileIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/multi/IoTDBOneConsumerMultiTopicsTsfileIT.java new file mode 100644 index 0000000000000..d1b4d38f36eea --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/multi/IoTDBOneConsumerMultiTopicsTsfileIT.java @@ -0,0 +1,215 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.multi; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant; +import org.apache.iotdb.subscription.it.Retry; +import org.apache.iotdb.subscription.it.RetryRule; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +/*** + * 1 consumer subscribes to 2 topics: historical data + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBOneConsumerMultiTopicsTsfileIT extends AbstractSubscriptionRegressionIT { + + @Rule public RetryRule retryRule = new RetryRule(); + + private static final String database = "root.test.OneConsumerMultiTopicsTsfile"; + private static final String device = database + ".d_0"; + private static List schemaList = new ArrayList<>(); + private String pattern = database + ".**"; + private String database2 = "root.OneConsumerMultiTopicsTsfile"; + private String pattern2 = database2 + ".**"; + private String device2 = database2 + ".d_0"; + private String topicName = "topic1_OneConsumerMultiTopicsTsfile"; + private String topicName2 = "topic2_OneConsumerMultiTopicsTsfile"; + private SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s(topicName, pattern, "now", null, true); + createTopic_s(topicName2, pattern2, "now", null, true); + session_src.createTimeseries( + device + ".s_0", TSDataType.FLOAT, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.TEXT, TSEncoding.DICTIONARY, CompressionType.LZMA2); + session_src.createTimeseries( + device2 + ".s_0", TSDataType.FLOAT, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device2 + ".s_1", TSDataType.TEXT, TSEncoding.DICTIONARY, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.FLOAT)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.TEXT)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + assertTrue(subs.getTopic(topicName2).isPresent(), "Create show topics 2"); + } + + @Override + protected void setUpConfig() { + super.setUpConfig(); + + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(sender); + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(receiver1); + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(receiver2); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + subs.dropTopic(topicName2); + dropDB(database); + dropDB(database2); + schemaList.clear(); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row + 2.45f); + tablet.addValue("s_1", rowIndex, "rowIndex" + rowIndex); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Retry + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + Thread.sleep(1000); + // Write data before subscribing + session_src.executeNonQueryStatement( + "insert into " + + device + + "(time,s_0,s_1)values(1710288000000,313,'2024-03-13 08:00:00+08:00');"); // 2024-03-13 + // 08:00:00+08:00 + insert_data(1706659200000L, device); // 2024-01-31 08:00:00+08:00 + insert_data(1706659200000L, device2); // 2024-01-31 08:00:00+08:00 + // Subscribe + consumer = create_pull_consumer("multi_tsfile_2topic", "1_consumer", false, null); + System.out.println("###### Subscription Query Before:"); + subs.getSubscriptions().forEach((System.out::println)); + assertEquals(subs.getSubscriptions().size(), 0, "Before subscription show subscriptions"); + consumer.subscribe(topicName, topicName2); + System.out.println("###### Subscribe and query:"); + subs.getSubscriptions().forEach((System.out::println)); + assertEquals(subs.getSubscriptions().size(), 2, "subscribe then show subscriptions"); + + // Subscribe and then write data + Thread thread = + new Thread( + () -> { + long timestamp = System.currentTimeMillis(); + for (int i = 0; i < 20; i++) { + try { + insert_data(timestamp, device); + insert_data(timestamp, device2); + timestamp += 30000; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + }); + thread.start(); + thread.join(); + + System.out.println( + "src insert " + device + " :" + getCount(session_src, "select count(s_0) from " + device)); + System.out.println( + "src insert " + + device2 + + " :" + + getCount(session_src, "select count(s_0) from " + device2)); + // After first consumption + List devices = new ArrayList<>(2); + devices.add(device); + devices.add(device2); + consume_tsfile_await(consumer, devices, Arrays.asList(100, 100)); + // Unsubscribe + consumer.unsubscribe(topicName); + System.out.println("###### After cancellation query:"); + subs.getSubscriptions(topicName).forEach((System.out::println)); + assertEquals( + subs.getSubscriptions(topicName).size(), 0, "Unsubscribe 1 and then show subscriptions"); + + // Unsubscribe and then write data + insert_data(System.currentTimeMillis(), device2); + session_src.executeNonQueryStatement( + "insert into " + + device + + "(time,s_0,s_1)values(1703980800000,3.45,'2023-12-31 08:00:00+08:00');"); // 2023-12-31 08:00:00+08:00 + consume_tsfile_await( + consumer, Collections.singletonList(device2), Collections.singletonList(5)); + + // close + consumer.close(); + try { + consumer.subscribe(topicName, topicName2); + } catch (Exception e) { + System.out.println("subscribe again after close, expecting an exception"); + } + assertEquals(subs.getSubscriptions(topicName).size(), 0, "show subscriptions after close"); + subs.getSubscriptions(topicName).forEach((System.out::println)); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBDBPatternPullConsumeTsfileIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBDBPatternPullConsumeTsfileIT.java new file mode 100644 index 0000000000000..fbc355d30e465 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBDBPatternPullConsumeTsfileIT.java @@ -0,0 +1,173 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.pattern; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +/*** + * PullConsumer + * pattern: db + * Tsfile + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBDBPatternPullConsumeTsfileIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.DBPatternPullConsumeTsfile"; + private static final String database2 = "root.DBPatternPullConsumeTsfile"; + private static final String device = database + ".d_0"; + private static final String device2 = database + ".d_1"; + private static final String topicName = "topicDBPatternPullConsumeTsfile"; + private static List schemaList = new ArrayList<>(); + + private static final String pattern = database + ".**"; + private static SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s(topicName, pattern, null, null, true); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + + session_src.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + + session_src.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + device2 + "(time,s_0,s_1)values(2000,232,567.891);"); + + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException, InterruptedException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (row + 1) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + consumer = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("db_tsfile") + .consumerGroupId("pull_pattern") + .autoCommit(false) + .fileSaveDir("target/pull-subscription") // hack for license check + .buildPullConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions(topicName).forEach(System.out::println); + assertEquals( + subs.getSubscriptions(topicName).size(), 1, "show subscriptions after subscription"); + // insert_data(1706659200000L); //2024-01-31 08:00:00+08:00 + insert_data(System.currentTimeMillis()); + // Consumption data + consume_tsfile_with_file_count_await( + consumer, Collections.singletonList(device), Arrays.asList(10, 3)); + // Unsubscribe + consumer.unsubscribe(topicName); + assertEquals( + subs.getSubscriptions(topicName).size(), 0, "Show subscriptions after unsubscription"); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals( + subs.getSubscriptions(topicName).size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + + // Consumption data: Progress is not retained after unsubscribing and resubscribing. Full + // synchronization. + consume_tsfile_with_file_count_await( + consumer, Collections.singletonList(device), Arrays.asList(15, 4)); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBDBPatternPullConsumerDataSetIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBDBPatternPullConsumerDataSetIT.java new file mode 100644 index 0000000000000..3757f2941f096 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBDBPatternPullConsumerDataSetIT.java @@ -0,0 +1,149 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.pattern; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/*** + * format: dataset + * pattern:db + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBDBPatternPullConsumerDataSetIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.DBPatternPullConsumerDataSet"; + private static final String database2 = "root.DBPatternPullConsumerDataSet"; + private static final String device = database + ".d_0"; + private static final String topicName = "topicDBPatternPullConsumerDataSet"; + private static List schemaList = new ArrayList<>(); + private String pattern = database + ".**"; + private static SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s(topicName, pattern, null, null, false); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 double;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 double;"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + consumer.close(); + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + consumer = create_pull_consumer("db_dataset_snapshot", "pull_mode", false, null); + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis()); + // Consumption data + consume_data(consumer, session_dest); + check_count(8, "select count(s_0) from " + device, "Consumption Data: s_0"); + check_count(8, "select count(s_1) from " + device, "Consumption Data: s_1"); + check_count(1, "select count(s_0) from " + database + ".d_1", "Consumption data:d_1"); + check_count(0, "select count(s_0) from " + database2 + ".d_2", "Consumption data:d_1"); + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + // Consumption data: Progress is not retained when re-subscribing after cancellation. Full + // synchronization. + consume_data(consumer, session_dest); + check_count(12, "select count(s_0) from " + device, "consume data again:s_0"); + check_count(12, "select count(s_1) from " + device, "Consumption Data: s_1"); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBDefaultPatternPullConsumerDataSetIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBDefaultPatternPullConsumerDataSetIT.java new file mode 100644 index 0000000000000..2960701373759 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBDefaultPatternPullConsumerDataSetIT.java @@ -0,0 +1,144 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.pattern; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBDefaultPatternPullConsumerDataSetIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.DefaultPatternPullConsumerDataSet"; + private static final String database2 = "root.DefaultPatternPullConsumerDataSet"; + private static final String device = database + ".d_0"; + private static final String topicName = "topicDefaultPatternPullConsumerDataSet"; + private static SubscriptionPullConsumer consumer; + private static List schemaList = new ArrayList<>(); + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s(topicName, null, null, null, false); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + consumer.close(); + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + consumer = create_pull_consumer("pull_pattern", "default_pattern_dataset", false, null); + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis()); + // Consumption data + consume_data(consumer, session_dest); + check_count(8, "select count(s_0) from " + device, "Consumption data: s_0"); + check_count(8, "select count(s_1) from " + device, "Consumption data: s_1"); + check_count(1, "select count(s_0) from " + database + ".d_1", "Consumption data:d_1"); + check_count(1, "select count(s_0) from " + database2 + ".d_2", "Consumption data:d_2"); + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + // Consumption data: Progress is not retained after unsubscribing and resubscribing. Full + // synchronization. + consume_data(consumer, session_dest); + check_count(12, "select count(s_0) from " + device, "consume data again:s_0"); + check_count(12, "select count(s_1) from " + device, "Consumption data: s_1"); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBDevicePatternPullConsumeTsfileIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBDevicePatternPullConsumeTsfileIT.java new file mode 100644 index 0000000000000..dc19e2201f66b --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBDevicePatternPullConsumeTsfileIT.java @@ -0,0 +1,167 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.pattern; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/*** + * PullConsumer + * pattern: device + * Tsfile + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBDevicePatternPullConsumeTsfileIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.DevicePatternPullConsumeTsfile"; + private static final String database2 = "root.DevicePatternPullConsumeTsfile"; + private static final String device = database + ".d_0"; + private static final String device2 = database + ".d_1"; + private static final String device3 = database2 + ".d_2"; + private static final String topicName = "topicDevicePatternPullConsumeTsfile"; + private static List schemaList = new ArrayList<>(); + + private static final String pattern = device + ".**"; + private static SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s(topicName, pattern, null, null, true); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 double;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 double;"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException, InterruptedException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (row + 1) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + consumer = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("device_pattern_tsfile") + .consumerGroupId("pull_pattern") + .autoCommit(false) + .fileSaveDir("target/pull-subscription") // hack for license check + .buildPullConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis()); + // Consumption data + List devices = new ArrayList<>(3); + devices.add(device); + devices.add(device2); + devices.add(device3); + consume_tsfile_await(consumer, devices, Arrays.asList(10, 0, 0)); + // Unsubscribe + consumer.unsubscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 0, "Show subscriptions after unsubscribe"); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + // Consumption data: Progress is not retained after unsubscribing and resubscribing. Full + // synchronization. + consume_tsfile_await(consumer, devices, Arrays.asList(15, 0, 0)); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBDevicePatternPullConsumerDataSetIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBDevicePatternPullConsumerDataSetIT.java new file mode 100644 index 0000000000000..3dbb923baed9f --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBDevicePatternPullConsumerDataSetIT.java @@ -0,0 +1,168 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.pattern; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBDevicePatternPullConsumerDataSetIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.DevicePatternPullConsumerDataSet"; + private static final String database2 = "root.DevicePatternPullConsumerDataSet"; + private static final String device = database + ".d_0"; + private static final String device2 = database2 + ".d_2"; + private static final String topicName = "topicDevicePatternPullConsumerDataSet"; + private static List schemaList = new ArrayList<>(); + + private String pattern = device + ".**"; + public static SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s(topicName, pattern, null, "now", false); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement("create timeseries " + device2 + ".s_0 int32;"); + session_dest.executeNonQueryStatement("create timeseries " + device2 + ".s_0 int32;"); + session_src.executeNonQueryStatement("create timeseries " + device2 + ".s_1 float;"); + session_dest.executeNonQueryStatement("create timeseries " + device2 + ".s_1 float;"); + session_src.executeNonQueryStatement( + "insert into " + device2 + "(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } finally { + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + } + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + consumer = create_pull_consumer("pull_pattern", "device_dataset", false, null); + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis() - 30000L); + // Consumption data + String sql = "select count(s_0) from " + device; + consume_data_await( + consumer, + session_dest, + Collections.singletonList( + () -> { + System.out.println("src: " + getCount(session_src, sql)); + check_count(8, sql, "Consumption data:" + pattern); + check_count(8, "select count(s_1) from " + device, "Consumption data: s_1"); + check_count(0, "select count(s_0) from " + database + ".d_1", "Consumption data:d_1"); + check_count(0, "select count(s_0) from " + device2, "Consumption data:d_2"); + })); + insert_data(System.currentTimeMillis()); + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + System.out.println("src: " + getCount(session_src, sql)); + // Consumption data: Progress is not retained after unsubscribing and then re-subscribing. Full + // synchronization. + consume_data_await( + consumer, + session_dest, + Collections.singletonList( + () -> { + System.out.println("src: " + getCount(session_src, sql)); + check_count(12, "select count(s_0) from " + device, "consume data again:s_0"); + check_count(12, "select count(s_1) from " + device, "Consumption data: s_1"); + })); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBMiddleMatch2PatternPullConsumerDataSetIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBMiddleMatch2PatternPullConsumerDataSetIT.java new file mode 100644 index 0000000000000..b62576feec03d --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBMiddleMatch2PatternPullConsumerDataSetIT.java @@ -0,0 +1,173 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.pattern; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/*** + * pattern: root.*.d_*.* + * format: dataset + * time-range: history + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBMiddleMatch2PatternPullConsumerDataSetIT + extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.MiddleMatch2PatternPullConsumerDataSet"; + private static final String database2 = "root.MiddleMatch2PatternPullConsumerDataSet"; + private static List devices = new ArrayList<>(3); + private static final String device = database + ".d_0"; + private static final String device2 = database2 + ".sd_1"; + private static final String device3 = database2 + ".d_2"; + private static final String topicName = "topicMiddleMatch2PatternPullConsumerDataSet"; + private static List schemaList = new ArrayList<>(); + + private String pattern = "root.*.d_*.*"; + private static SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s(topicName, pattern, null, "now", false); + devices.add(device); + devices.add(device2); + devices.add(device3); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement("create timeseries " + device3 + ".s_0 int64;"); + session_dest.executeNonQueryStatement("create timeseries " + device3 + ".s_0 int64;"); + session_src.executeNonQueryStatement("create timeseries " + device3 + ".s_1 double;"); + session_dest.executeNonQueryStatement("create timeseries " + device3 + ".s_1 double;"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + consumer.close(); + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (row + 1) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + consumer = + create_pull_consumer("pull_pattern", "MiddleMatchPatternHistory_DataSet", false, null); + // Write data before subscribing + for (int i = 0; i < 3; i++) { + insert_data(1706659200000L, devices.get(i)); // 2024-01-31 08:00:00+08:00 + } + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + for (int i = 0; i < 3; i++) { + insert_data(System.currentTimeMillis() - 30000L, devices.get(i)); + } + // Consumption data + consume_data(consumer, session_dest); + String sql = "select count(s_0) from "; + for (int i = 0; i < 3; i++) { + System.out.println( + "src " + devices.get(i) + ": " + getCount(session_src, sql + devices.get(i))); + } + check_count(0, sql + device, "Consumption data: s_0 " + device); + check_count(0, "select count(s_1) from " + device, "Consumption data: s_1 " + device); + check_count(0, "select count(s_0) from " + device2, "Consumption data: s_0 " + device2); + check_count(0, "select count(s_1) from " + device2, "Consumption data: s_1 " + device2); + check_count(10, "select count(s_0) from " + device3, "Consumption data: s_0 " + device3); + check_count(10, "select count(s_1) from " + device3, "Consumption data: s_1 " + device3); + for (int i = 0; i < 3; i++) { + insert_data(System.currentTimeMillis(), devices.get(i)); + } + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + for (int i = 0; i < 3; i++) { + System.out.println( + "src " + devices.get(i) + ": " + getCount(session_src, sql + devices.get(i))); + } + // Consumption data: Progress is not retained when re-subscribing after cancellation. Full + // synchronization. + consume_data(consumer, session_dest); + check_count(0, "select count(s_0) from " + device, "consume data again:" + device); + check_count(0, "select count(s_0) from " + device2, "consume data again:" + device2); + check_count(10, "select count(s_1) from " + device3, "Consumption data:" + device3); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBMiddleMatchPatternPullConsumeTsfileIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBMiddleMatchPatternPullConsumeTsfileIT.java new file mode 100644 index 0000000000000..e667d8212c210 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBMiddleMatchPatternPullConsumeTsfileIT.java @@ -0,0 +1,176 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.pattern; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/*** + * PullConsumer + * pattern: device + * Tsfile + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBMiddleMatchPatternPullConsumeTsfileIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.MiddleMatchPatternPullConsumeTsfile"; + private static final String database2 = "root.MiddleMatchPatternPullConsumeTsfile"; + private static final String device = database + ".d_0"; + private static final String device2 = database + ".d_1"; + private static final String topicName = "topicMiddleMatchPatternPullConsumeTsfile"; + private static List schemaList = new ArrayList<>(); + private static final String pattern = "root.**.d_*.**"; + private static SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s(topicName, pattern, null, "now", true); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 double;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 double;"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + // TODO: remove it later + @Override + protected void setUpConfig() { + super.setUpConfig(); + + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(sender); + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(receiver1); + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(receiver2); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException, InterruptedException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (row + 1) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + consumer = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("MiddleMatchPatternHistory_tsfile") + .consumerGroupId("pull_pattern") + .autoCommit(false) + .fileSaveDir("target/pull-subscription") // hack for license check + .buildPullConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis() - 20000); + // Consumption data + List devices = new ArrayList<>(3); + devices.add(device); + devices.add(device2); + devices.add(database2 + ".d_2"); + consume_tsfile_await(consumer, devices, Arrays.asList(10, 1, 1)); + // Unsubscribe + consumer.unsubscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 0, "Show subscriptions after cancellation"); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + // Consumption data: Progress is not retained after canceling and re-subscribing. Full + // synchronization. + consume_tsfile_await(consumer, devices, Arrays.asList(15, 1, 1)); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBMiddleMatchPatternPullConsumerDataSetIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBMiddleMatchPatternPullConsumerDataSetIT.java new file mode 100644 index 0000000000000..d1914ff076480 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBMiddleMatchPatternPullConsumerDataSetIT.java @@ -0,0 +1,183 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.pattern; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBMiddleMatchPatternPullConsumerDataSetIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.MiddleMatchPatternPullConsumerDataSet"; + private static final String database2 = "root.MiddleMatchPatternPullConsumerDataSet"; + private static final String device = database + ".d_0"; + private static final String device2 = database + ".d_1"; + private static final String topicName = "topicMiddleMatchPatternPullConsumerDataSet"; + private static List schemaList = new ArrayList<>(); + + private String pattern = "root.**.d_*.s_0"; + public static SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s(topicName, pattern, null, "now", false); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 float;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 float;"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + consumer.close(); + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + consumer = + create_pull_consumer("pull_pattern", "MiddleMatchPatternHistory_DataSet", false, null); + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis() - 30000L); + // Consumption data + consume_data(consumer, session_dest); + String sql = "select count(s_0) from " + device; + System.out.println("src " + database + ".d_0.s_0: " + getCount(session_src, sql)); + System.out.println( + "src " + + database + + ".d_0.s_1: " + + getCount(session_src, "select count(s_1) from " + device)); + System.out.println( + "src " + + database + + ".d_1.s_0: " + + getCount(session_src, "select count(s_0) from " + database + ".d_1")); + System.out.println( + "src " + + database2 + + ".d_2.s_0: " + + getCount(session_src, "select count(s_0) from " + database2 + ".d_2")); + System.out.println("dest " + database + ".d_0.s_0: " + getCount(session_dest, sql)); + System.out.println( + "dest " + + database + + ".d_0.s_1: " + + getCount(session_dest, "select count(s_1) from " + device)); + System.out.println( + "dest " + + database + + ".d_1.s_0: " + + getCount(session_dest, "select count(s_0) from " + database + ".d_1")); + System.out.println( + "dest " + + database2 + + ".s_0: " + + getCount(session_dest, "select count(s_0) from " + database2 + ".d_2")); + check_count(10, sql, "Consumption Data:s_0"); + check_count(0, "select count(s_1) from " + device, "Consumption Data: s_1"); + check_count(1, "select count(s_0) from " + database + ".d_1", "Consumption Data:d_1"); + check_count(1, "select count(s_0) from " + database2 + ".d_2", "Consumption data:d_2"); + insert_data(System.currentTimeMillis()); + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + System.out.println("src: " + getCount(session_src, sql)); + // Consumption data: Progress is not preserved if you unsubscribe and then resubscribe. Full + // synchronization. + consume_data(consumer, session_dest); + check_count(15, "select count(s_0) from " + device, "Consume data again:s_0"); + check_count(0, "select count(s_1) from " + device, "Consumption data: s_1"); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBRootPatternPullConsumeTsfileIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBRootPatternPullConsumeTsfileIT.java new file mode 100644 index 0000000000000..c9409c5518d43 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBRootPatternPullConsumeTsfileIT.java @@ -0,0 +1,149 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.pattern; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/*** + * PullConsumer + * pattern: db + * Tsfile + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBRootPatternPullConsumeTsfileIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.RootPatternPullConsumeTsfile"; + private static final String device = database + ".d_0"; + private static final String topicName = "topicRootPatternPullConsumeTsfile"; + private static List schemaList = new ArrayList<>(); + + private static final String pattern = "root.**"; + public static SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s(topicName, pattern, null, null, true); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException, InterruptedException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (row + 1) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + consumer = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("root_tsfile") + .consumerGroupId("pull_pattern") + .autoCommit(false) + .fileSaveDir("target/pull-subscription") // hack for license check + .buildPullConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + // insert_data(1706659200000L); //2024-01-31 08:00:00+08:00 + insert_data(System.currentTimeMillis()); + // Consumption data + consume_tsfile_await( + consumer, Collections.singletonList(device), Collections.singletonList(10)); + // Unsubscribe + consumer.unsubscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 0, "show subscriptions after unsubscribe"); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + // Consumption data: Progress is not retained after unsubscribing and re-subscribing. Full + // synchronization. + consume_tsfile_await( + consumer, Collections.singletonList(device), Collections.singletonList(15)); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBTSPatternPullConsumeTsfileIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBTSPatternPullConsumeTsfileIT.java new file mode 100644 index 0000000000000..0b60dfd330d50 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBTSPatternPullConsumeTsfileIT.java @@ -0,0 +1,170 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.pattern; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/*** + * PullConsumer + * pattern: TS + * history + * tsfile + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBTSPatternPullConsumeTsfileIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.DBPatternPullConsumeTsfile"; + private static final String database2 = "root.DBPatternPullConsumeTsfile"; + private static final String device = database + ".d_0"; + private static final String device2 = database + ".d_1"; + private static final String topicName = "topicDBPatternPullConsumeTsfile"; + private static List schemaList = new ArrayList<>(); + + private static final String pattern = device + ".s_0"; + + public static SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s(topicName, pattern, null, "now", true); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_0 int32;"); + session_src.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 double;"); + session_dest.executeNonQueryStatement("create timeseries " + database2 + ".d_2.s_1 double;"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException, InterruptedException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (row + 1) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + consumer = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("ts_tsfile") + .consumerGroupId("pull_pattern") + .autoCommit(false) + .fileSaveDir("target/pull-subscription") // hack for license check + .buildPullConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + // insert_data(1706659200000L); //2024-01-31 08:00:00+08:00 + insert_data(System.currentTimeMillis() - 30000L); + // Consumption data + List devices = new ArrayList<>(3); + devices.add(device); + devices.add(device2); + devices.add(database2 + ".d_2"); + consume_tsfile_await(consumer, devices, Arrays.asList(10, 0, 0)); + insert_data(System.currentTimeMillis()); + // Unsubscribe + consumer.unsubscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 0, "Show subscriptions after unsubscription"); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + // Consumption data: Progress is not retained after unsubscribing and re-subscribing. Full + // synchronization. + consume_tsfile_await(consumer, devices, Arrays.asList(15, 0, 0)); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBTSPatternPullConsumerDataSetIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBTSPatternPullConsumerDataSetIT.java new file mode 100644 index 0000000000000..96db73d9e94f0 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/pattern/IoTDBTSPatternPullConsumerDataSetIT.java @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.pattern; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBTSPatternPullConsumerDataSetIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.TSPatternPullConsumerDataSet"; + private static final String device = database + ".d_0"; + private static final String topicName = "topicTSPatternPullConsumerDataSet"; + private static List schemaList = new ArrayList<>(); + + private static final String pattern = device + ".s_0"; + public static SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s(topicName, pattern, null, null, false); + session_src.createTimeseries( + pattern, TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + pattern, TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + consumer.close(); + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException, InterruptedException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + Thread.sleep(1000); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + consumer = create_pull_consumer("pull_pattern", "ts_dataset", false, null); + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis()); + // Consumption data + consume_data(consumer, session_dest); + check_count(8, "select count(s_0) from " + device, "Consumption data: s_0"); + check_count(0, "select count(s_1) from " + device, "Consumption Data: s_1"); + // Unsubscribe + consumer.unsubscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 0, "show subscriptions after unsubscription"); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + // Consumption data: Progress is not retained after unsubscribing and re-subscribing. Full + // synchronization. + consume_data(consumer, session_dest); + check_count(12, "select count(s_0) from " + device, "consume data again:s_0"); + check_count(0, "select count(s_1) from " + device, "Consumption Data: s_1"); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/time/IoTDBAllPullConsumerDataSetIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/time/IoTDBAllPullConsumerDataSetIT.java new file mode 100644 index 0000000000000..05e3bff2ec654 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/time/IoTDBAllPullConsumerDataSetIT.java @@ -0,0 +1,130 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.time; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBAllPullConsumerDataSetIT extends AbstractSubscriptionRegressionIT { + private String database = "root.AllPullConsumerDataSet"; + private String device = database + ".d_0"; + private String pattern = device + ".s_0"; + // private String topicName = "`1-group.1-consumer.ts`"; + private String topicName = "topic_AllPullConsumerDataSet"; + private List schemaList = new ArrayList<>(); + private SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s(topicName, pattern, null, null, false); + session_src.createTimeseries( + pattern, TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + pattern, TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + consumer.close(); + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + consumer = create_pull_consumer("pull_time", "ts_time_default_dataset", false, null); + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + // Subscribe + consumer.subscribe(topicName); + Thread.sleep(10000); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis()); + // Consumption data + consume_data(consumer, session_dest); + check_count(8, "select count(s_0) from " + device, "Consumption data:" + pattern); + check_count(0, "select count(s_1) from " + device, "Consumption data: s_1"); + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + // Consumption data: Progress is not retained after unsubscribing and re-subscribing. Full + // synchronization. + consume_data(consumer, session_dest); + check_count(12, "select count(s_0) from " + device, "consume data again:" + pattern); + check_count(0, "select count(s_1) from " + device, "Consumption data: s_1"); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/time/IoTDBHistoryPullConsumerDataSetIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/time/IoTDBHistoryPullConsumerDataSetIT.java new file mode 100644 index 0000000000000..3a2df55f549e3 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/time/IoTDBHistoryPullConsumerDataSetIT.java @@ -0,0 +1,138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.time; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBHistoryPullConsumerDataSetIT extends AbstractSubscriptionRegressionIT { + private String database = "root.HistoryPullConsumerDataSet"; + private String device = database + ".d_0"; + private String pattern = device + ".s_0"; + // private String topicName = "`1-group.1-consumer.ts`"; + private String topicName = "topic_HistoryPullConsumerDataSet"; + private List schemaList = new ArrayList<>(); + private SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s(topicName, pattern, null, "now", false); + session_src.createTimeseries( + pattern, TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + pattern, TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest2.createTimeseries( + pattern, TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest2.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + consumer.close(); + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + consumer = create_pull_consumer("pull_time", "ts_history_dataset", false, null); + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach((System.out::println)); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis()); + // Consumption data + consume_data(consumer, session_dest); + + // System.out.println("Check consumption data"); + check_count(4, "select count(s_0) from " + device, "Consumption data:" + pattern); + check_count(0, "select count(s_1) from " + device, "Consumption data: s_1"); + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + // System.out.println("After resubscribing:"); + // subs.getSubscriptions().forEach((System.out::println)); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + // Consumption data: Progress is not retained after canceling and re-subscribing. Full + // synchronization. + consume_data(consumer, session_dest2); + check_count2(8, "select count(s_0) from " + device, "consume data again:" + pattern); + check_count2(0, "select count(s_1) from " + device, "Consumption Data: s_1"); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/time/IoTDBRealTimePullConsumerDataSetIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/time/IoTDBRealTimePullConsumerDataSetIT.java new file mode 100644 index 0000000000000..7da4243ca558b --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/time/IoTDBRealTimePullConsumerDataSetIT.java @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.time; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBRealTimePullConsumerDataSetIT extends AbstractSubscriptionRegressionIT { + private String database = "root.RealTimePullConsumerDataSet"; + private String device = database + ".d_0"; + private String pattern = device + ".s_0"; + private String topicName = "topic_RealTimePullConsumerDataSet"; + private List schemaList = new ArrayList<>(); + private SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + + createDB(database); + createTopic_s(topicName, pattern, "now", null, false); + session_src.createTimeseries( + pattern, TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + pattern, TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest2.createTimeseries( + pattern, TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest2.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + consumer.close(); + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + consumer = create_pull_consumer("pull_time", "ts_realtime_dataset", false, null); + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach((System.out::println)); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + // Consumption data + consume_data(consumer, session_dest); + check_count(0, "select count(s_0) from " + device, "Consumption data:" + pattern); + insert_data(System.currentTimeMillis()); + consume_data(consumer, session_dest); + check_count(4, "select count(s_0) from " + device, "Consumption data:" + pattern); + + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + insert_data(System.currentTimeMillis()); // now + consumer.subscribe(topicName); + System.out.println("After re-subscribing:"); + subs.getSubscriptions().forEach((System.out::println)); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + // Consumption data + consume_data(consumer, session_dest2); + check_count(4, "select count(s_0) from " + device, "Consume data again:" + pattern); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/time/IoTDBTimeRangeAccuratePullConsumerDataSetIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/time/IoTDBTimeRangeAccuratePullConsumerDataSetIT.java new file mode 100644 index 0000000000000..2d213f512f1dc --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/time/IoTDBTimeRangeAccuratePullConsumerDataSetIT.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.time; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBTimeRangeAccuratePullConsumerDataSetIT extends AbstractSubscriptionRegressionIT { + private String database = "root.TimeRangeAccuratePullConsumerDataSet"; + private String device = database + ".d_0"; + private String pattern = device + ".s_0"; + private String topicName = "topic_TimeRangeAccuratePullConsumerDataSet"; + private List schemaList = new ArrayList<>(); + private SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s( + topicName, pattern, "2024-01-01T00:00:00+08:00", "2024-03-31T23:59:59+08:00", false); + session_src.createTimeseries( + pattern, TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + pattern, TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + consumer.close(); + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + consumer = create_pull_consumer("pull_time", "ts_accurate_dataset", false, null); + // Write data before subscribing + insert_data(1704038396000L); // 2023-12-31 23:59:56+08:00 + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + // Before consumption subscription data + consume_data(consumer, session_dest); + check_count(2, "select count(s_0) from " + device, "Start time boundary data:" + pattern); + + insert_data(System.currentTimeMillis()); // now + consume_data(consumer, session_dest); + check_count( + 2, "select count(s_0) from " + device, "Write some real-time data after:" + pattern); + + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + consume_data(consumer, session_dest); + check_count(6, "select count(s_0) from " + device, "Data within the time range:" + pattern); + + insert_data(1711814398000L); // 2024-03-30 23:59:58+08:00 + consume_data(consumer, session_dest); + check_count(10, "select count(s_0) from " + device, "End time boundary data:" + pattern); + + insert_data(1711900798000L); // 2024-03-31 23:59:58+08:00 + consume_data(consumer, session_dest); + check_count(11, "select count(s_0) from " + device, "End time limit data 2:" + pattern); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/time/IoTDBTimeRangePullConsumerDataSetIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/time/IoTDBTimeRangePullConsumerDataSetIT.java new file mode 100644 index 0000000000000..45f9b680108b1 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pullconsumer/time/IoTDBTimeRangePullConsumerDataSetIT.java @@ -0,0 +1,139 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pullconsumer.time; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/*** + * Start time, end time are both closed intervals. If not specified, the time will be 00:00:00. + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBTimeRangePullConsumerDataSetIT extends AbstractSubscriptionRegressionIT { + private String database = "root.TimeRangePullConsumerDataSet"; + private String device = database + ".d_0"; + private String pattern = device + ".s_0"; + private String topicName = "topic_TimeRangePullConsumerDataSet"; + private List schemaList = new ArrayList<>(); + private SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s( + topicName, pattern, "2024-01-01T00:00:00+08:00", "2024-03-31T00:00:00+08:00", false); + session_src.createTimeseries( + pattern, TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + pattern, TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + consumer.close(); + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + consumer = create_pull_consumer("pull_time", "ts_time_range_dataset", false, null); + // Write data before subscribing + insert_data(1704038396000L); // 2023-12-31 23:59:56+08:00 + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + // Before consumption subscription data + consume_data(consumer, session_dest); + check_count(2, "select count(s_0) from " + device, "Start time boundary data:" + pattern); + + insert_data(System.currentTimeMillis()); // now + consume_data(consumer, session_dest); + check_count( + 2, "select count(s_0) from " + device, "Write some real-time data later:" + pattern); + + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + consume_data(consumer, session_dest); + check_count(6, "select count(s_0) from " + device, "Data within the time range:" + pattern); + + insert_data(1711814398000L); // 2024-03-30 23:59:58+08:00 + consume_data(consumer, session_dest); + // Because the end time is 2024-03-31 00:00:00, closed interval + check_count(8, "select count(s_0) from " + device, "End time limit data:" + pattern); + + insert_data(1711900798000L); // 2024-03-31 23:59:58+08:00 + consume_data(consumer, session_dest); + check_count(8, "select count(s_0) from " + device, "End time boundary data:" + pattern); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/format/IoTDBTestPushConsumeDataSetIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/format/IoTDBTestPushConsumeDataSetIT.java new file mode 100644 index 0000000000000..42b5cb1a6d695 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/format/IoTDBTestPushConsumeDataSetIT.java @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.format; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer: BEFORE_CONSUME + * DataSet + * pattern: root.** + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBTestPushConsumeDataSetIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.TestPushConsumeDataSet"; + private static final String topicName = "topic_TestPushConsumeDataSet"; + private static List schemaList = new ArrayList<>(); + private static final String pattern = "root.**"; + private static SubscriptionPushConsumer consumer; + private static final String device = database + ".d_push_dataset"; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s(topicName, pattern, null, null, false); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException, InterruptedException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + + final AtomicInteger rowCount = new AtomicInteger(0); + consumer = + new SubscriptionPushConsumer.Builder() + .nodeUrls(Collections.singletonList(SRC_HOST + ":" + SRC_PORT)) + .consumerId("root_dataset_consumer") + .consumerGroupId("push_format") + .ackStrategy(AckStrategy.BEFORE_CONSUME) + .fileSaveDir("target/iotdb-subscription") + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis()); + String sql0 = "select count(s_0) from " + device; + String sql1 = "select count(s_1) from " + device; + long expectCount = getCount(session_src, sql0); + System.out.println("###### src " + expectCount); + long finalExpectCount = expectCount; + AWAIT.untilAsserted( + () -> { + assertEquals(getCount(session_dest, sql0), finalExpectCount, "First result:s_0"); + assertEquals(getCount(session_dest, sql1), finalExpectCount, "First result:s_1"); + }); + + // Unsubscribe + consumer.unsubscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 0, "After cancellation, show subscriptions"); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + expectCount = getCount(session_src, sql0); + System.out.println("###### src2 " + expectCount); + // Consumption data: Progress is not retained when re-subscribing after cancellation. Full + // synchronization. + long finalExpectCount1 = expectCount; + AWAIT.untilAsserted( + () -> { + assertEquals(getCount(session_dest, sql0), finalExpectCount1, "Second result: s_0"); + assertEquals(getCount(session_dest, sql1), finalExpectCount1, "Second result: s_1"); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/format/IoTDBTestPushConsumeNoTargetDirTsfileIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/format/IoTDBTestPushConsumeNoTargetDirTsfileIT.java new file mode 100644 index 0000000000000..1c12cf46bfb74 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/format/IoTDBTestPushConsumeNoTargetDirTsfileIT.java @@ -0,0 +1,180 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.format; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.read.TsFileReader; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.read.expression.QueryExpression; +import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer:BEFORE_CONSUME + * TsFileHandler + * pattern: db + * target: no + * consumer_id: no + * group_id: no + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBTestPushConsumeNoTargetDirTsfileIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.TestPushConsumeNoTargetDirTsfile"; + private static final String device = database + ".d_0"; + private static final String topicName = "topic_TestPushConsumeNoTargetDirTsfile"; + private static List schemaList = new ArrayList<>(); + private static final String pattern = database + ".**"; + private static SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s(topicName, pattern, null, null, true); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException, InterruptedException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (1 + row) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + System.out.println("### TestPushConsumeNoTargetDirTsfile ###"); + + final AtomicInteger onReceiveCount = new AtomicInteger(0); + final AtomicInteger rowCount = new AtomicInteger(0); + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .ackStrategy(AckStrategy.BEFORE_CONSUME) + .consumeListener( + message -> { + onReceiveCount.incrementAndGet(); + try { + TsFileReader reader = message.getTsFileHandler().openReader(); + Path path = new Path(device, "s_0", true); + QueryDataSet dataset = + reader.query(QueryExpression.create(Collections.singletonList(path), null)); + while (dataset.hasNext()) { + rowCount.addAndGet(1); + dataset.next(); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis()); + + AWAIT.untilAsserted( + () -> { + assertEquals(onReceiveCount.get(), 2, "should 2 tsfile"); + assertEquals(rowCount.get(), 10, "should process 8 rows data"); + }); + + // Unsubscribe + consumer.unsubscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 0, "show subscriptions after unsubscription"); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + AWAIT.untilAsserted( + () -> { + // Consumption data: Progress is not retained after canceling and re-subscribing. Full + // synchronization. + assertEquals(onReceiveCount.get(), 5, "should 5 tsfiles include 2 duplicates"); + assertEquals(rowCount.get(), 25, "should process 20 rows data, include "); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/format/IoTDBTestPushConsumeTsfileIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/format/IoTDBTestPushConsumeTsfileIT.java new file mode 100644 index 0000000000000..fd3e0e6ef257c --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/format/IoTDBTestPushConsumeTsfileIT.java @@ -0,0 +1,180 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.format; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.read.TsFileReader; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.read.expression.QueryExpression; +import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer:AFTER_CONSUME + * TsFileHandler + * pattern: db + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBTestPushConsumeTsfileIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.TestPushConsumeTsfile"; + private static final String device = database + ".d_0"; + private static final String topicName = "topic_TestPushConsumeTsfile"; + private static List schemaList = new ArrayList<>(); + private static final String pattern = database + ".**"; + private static SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s(topicName, pattern, null, null, true); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException, InterruptedException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + + final AtomicInteger onReceiveCount = new AtomicInteger(0); + final AtomicInteger rowCount = new AtomicInteger(0); + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("DB_TsFile_specify_target_dir_consumer") + .consumerGroupId("push_format") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + onReceiveCount.incrementAndGet(); + try { + TsFileReader reader = message.getTsFileHandler().openReader(); + Path path = new Path(device, "s_0", true); + QueryDataSet dataset = + reader.query(QueryExpression.create(Collections.singletonList(path), null)); + while (dataset.hasNext()) { + rowCount.addAndGet(1); + RowRecord next = dataset.next(); + System.out.println(next.getTimestamp() + "," + next.getFields()); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis()); + String sql = "select count(s_0) from " + device; + System.out.println("src: " + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertEquals(rowCount.get(), 8, "should process 8 rows data"); + }); + // Unsubscribe + consumer.unsubscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 0, "After cancellation, show subscriptions"); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + System.out.println("src: " + getCount(session_src, sql)); + AWAIT.untilAsserted( + () -> { + // Consumption data: Progress is not retained after unsubscribing and re-subscribing. Full + // synchronization. + assertEquals(rowCount.get(), 20, "should process 12 rows data, include "); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBLooseAllTsDatasetPushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBLooseAllTsDatasetPushConsumerIT.java new file mode 100644 index 0000000000000..b46c9764ec705 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBLooseAllTsDatasetPushConsumerIT.java @@ -0,0 +1,227 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.loose_range; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer + * DataSet + * pattern: ts + * loose-range: all + * mode: live + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBLooseAllTsDatasetPushConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.LooseAllTsDatasetPushConsumer"; + private static final String database2 = "root.LooseAllTsDatasetPushConsumer"; + private static final String device = database + ".d_0"; + private static final String device2 = database + ".d_1"; + private static final String topicName = "topic_LooseAllTsDatasetPushConsumer"; + private static List schemaList = new ArrayList<>(); + private String pattern = device + ".**"; + private static SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s( + topicName, + pattern, + "2024-01-01T00:00:00+08:00", + "2024-02-13T08:00:02+08:00", + false, + TopicConstant.MODE_LIVE_VALUE, + TopicConstant.LOOSE_RANGE_ALL_VALUE); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (row + 1) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + String sql = + "select count(s_0) from " + + device + + " where time >= 2024-01-01T00:00:00+08:00 and time <= 2024-02-13T08:00:02+08:00"; + + // Write data before subscribing + insert_data(1704038399000L, device); // 2023-12-31 23:59:59+08:00 + insert_data(1704038399000L, device2); // 2023-12-31 23:59:59+08:00 + insert_data(1706659200000L, device); // 2024-01-31 08:00:00+08:00 + insert_data(1706659200000L, device2); // 2024-01-31 08:00:00+08:00 + session_src.executeNonQueryStatement("flush;"); + System.out.println("LooseAllTsDatasetPushConsumer src1: " + getCount(session_src, sql)); + + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("device_accurate_dataset_push_snapshot") + .consumerGroupId("loose_range_all") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target") + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + insert_data(1706745600000L, device); // 2024-02-01 08:00:00+08:00 + insert_data(1706745600000L, device2); // 2024-02-01 08:00:00+08:00 + session_src.executeNonQueryStatement("flush;"); + System.out.println("LooseAllTsDatasetPushConsumer src2: " + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + check_count_non_strict( + 14, "select count(s_0) from " + device, "Consumption data: s_0 " + device); + check_count_non_strict( + 14, "select count(s_1) from " + device, "Consumption data: s_1 " + device); + check_count(0, "select count(s_0) from " + device2, "Consumption data: s_0 " + device2); + check_count(0, "select count(s_1) from " + device2, "Consumption data: s_1 " + device2); + check_count(0, "select count(s_0) from " + database2 + ".d_2", "Consumption data:d_2"); + }); + + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + + insert_data(1707782400000L, device); // 2024-02-13 08:00:00+08:00 + insert_data(1707782400000L, device2); // 2024-02-13 08:00:00+08:00 + session_src.executeNonQueryStatement("flush;"); + System.out.println("LooseAllTsDatasetPushConsumer src3: " + getCount(session_src, sql)); + + // Consumption data: Progress is not retained when re-subscribing after cancellation. Full + // synchronization. + AWAIT.untilAsserted( + () -> { + check_count_non_strict( + 16, "select count(s_0) from " + device, "consume data again: s_0 " + device); + check_count_non_strict( + 16, "select count(s_1) from " + device, "Consumption data: s_1 " + device); + check_count(0, "select count(s_0) from " + device2, "Consumption data: s_0 " + device2); + check_count(0, "select count(s_1) from " + device2, "Consumption data: s_1 " + device2); + check_count(0, "select count(s_0) from " + database2 + ".d_2", "Consumption data:d_2"); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBLooseAllTsDatasetPushConsumerSnapshotIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBLooseAllTsDatasetPushConsumerSnapshotIT.java new file mode 100644 index 0000000000000..d0f8e0a3ccabd --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBLooseAllTsDatasetPushConsumerSnapshotIT.java @@ -0,0 +1,226 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.loose_range; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer + * DataSet + * pattern: ts + * loose-range: all + * mode: snapshot + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBLooseAllTsDatasetPushConsumerSnapshotIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.LooseAllTsDatasetPushConsumerSnapshot"; + private static final String database2 = "root.LooseAllTsDatasetPushConsumerSnapshot"; + private static final String device = database + ".d_0"; + private static final String device2 = database + ".d_1"; + private static final String topicName = "topic_LooseAllTsDatasetPushConsumerSnapshot"; + private static List schemaList = new ArrayList<>(); + private String pattern = device + ".**"; + private static SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s( + topicName, + pattern, + "2024-01-01T00:00:00+08:00", + "2024-02-13T08:00:02+08:00", + false, + TopicConstant.MODE_SNAPSHOT_VALUE, + TopicConstant.LOOSE_RANGE_ALL_VALUE); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (row + 1) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + String sql = + "select count(s_0) from " + + device + + " where time >= 2024-01-01T00:00:00+08:00 and time <= 2024-02-13T08:00:02+08:00"; + + // Write data before subscribing + insert_data(1704038399000L, device); // 2023-12-31 23:59:59+08:00 + insert_data(1704038399000L, device2); // 2023-12-31 23:59:59+08:00 + insert_data(1706659200000L, device); // 2024-01-31 08:00:00+08:00 + insert_data(1706659200000L, device2); // 2024-01-31 08:00:00+08:00 + session_src.executeNonQueryStatement("flush;"); + System.out.println("src: " + getCount(session_src, sql)); + + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("device_accurate_dataset_push_snapshot") + .consumerGroupId("loose_range_all") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target") + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + insert_data(1706745600000L, device); // 2024-02-01 08:00:00+08:00 + insert_data(1706745600000L, device2); // 2024-02-01 08:00:00+08:00 + session_src.executeNonQueryStatement("flush;"); + System.out.println("src: " + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + check_count_non_strict( + 9, "select count(s_0) from " + device, "Consumption data: s_0 " + device); + check_count_non_strict( + 9, "select count(s_1) from " + device, "Consumption data: s_1 " + device); + check_count(0, "select count(s_0) from " + device2, "Consumption data: s_0 " + device2); + check_count(0, "select count(s_1) from " + device2, "Consumption data: s_1 " + device2); + check_count(0, "select count(s_0) from " + database2 + ".d_2", "Consumption data:d_2"); + }); + + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + + insert_data(1707782400000L, device); // 2024-02-13 08:00:00+08:00 + insert_data(1707782400000L, device2); // 2024-02-13 08:00:00+08:00 + session_src.executeNonQueryStatement("flush;"); + System.out.println("src: " + getCount(session_src, sql)); + + // Consumption data: Progress is not retained after cancellation and re-subscription. Full + // synchronization. + AWAIT.untilAsserted( + () -> { + check_count_non_strict( + 11, "select count(s_0) from " + device, "consume data again: s_0 " + device); + check_count_non_strict( + 11, "select count(s_1) from " + device, "Consumption data: s_1 " + device); + check_count(0, "select count(s_0) from " + device2, "Consumption data: s_0 " + device2); + check_count(0, "select count(s_1) from " + device2, "Consumption data: s_1 " + device2); + check_count(0, "select count(s_0) from " + database2 + ".d_2", "Consumption data:d_2"); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBLooseAllTsfilePushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBLooseAllTsfilePushConsumerIT.java new file mode 100644 index 0000000000000..a3c40f2e05580 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBLooseAllTsfilePushConsumerIT.java @@ -0,0 +1,256 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.loose_range; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.read.TsFileReader; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.read.expression.QueryExpression; +import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * push consumer + * mode: live + * pattern: db + * loose-range: all + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBLooseAllTsfilePushConsumerIT extends AbstractSubscriptionRegressionIT { + private String database = "root.LooseAllTsfilePushConsumer"; + private String device = database + ".d_0"; + private String device2 = database + ".d_1"; + private String pattern = database + ".**"; + private String topicName = "topic_LooseAllTsfilePushConsumer"; + private List schemaList = new ArrayList<>(); + private SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s( + topicName, + pattern, + "2024-01-01T00:00:00+08:00", + "2024-03-31T00:00:00+08:00", + true, + TopicConstant.MODE_LIVE_VALUE, + TopicConstant.LOOSE_RANGE_ALL_VALUE); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.createTimeseries( + device2 + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device2 + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device2 + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device2 + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (1 + row) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + String sql = + "select count(s_0) from " + + device + + " where time >= 2024-01-01T00:00:00+08:00 and time <= 2024-03-31T00:00:00+08:00"; + + List rowCounts = new ArrayList<>(2); + rowCounts.add(new AtomicInteger(0)); + rowCounts.add(new AtomicInteger(0)); + final AtomicInteger onReceive = new AtomicInteger(0); + + // Write data before subscribing + insert_data(1704038396000L, device); // 2023-12-31 23:59:56+08:00 + insert_data(1704038396000L, device2); // 2023-12-31 23:59:56+08:00 + session_src.executeNonQueryStatement("flush;"); + + List paths = new ArrayList<>(2); + paths.add(device); + paths.add(device2); + + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("time_range_ts_tsfile_push") + .consumerGroupId("loose_range_all") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + try { + onReceive.addAndGet(1); + TsFileReader reader = message.getTsFileHandler().openReader(); + for (int i = 0; i < 2; i++) { + QueryDataSet dataset = + reader.query( + QueryExpression.create( + Collections.singletonList(new Path(paths.get(i), "s_0", true)), + null)); + while (dataset.hasNext()) { + rowCounts.get(i).addAndGet(1); + RowRecord next = dataset.next(); + System.out.println(next.getTimestamp() + "," + next.getFields()); + } + } + System.out.println( + FORMAT.format(new Date()) + + " " + + rowCounts.get(0).get() + + "," + + rowCounts.get(1).get()); + } catch (IOException e) { + throw new RuntimeException(e); + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + System.out.println(FORMAT.format(new Date()) + " src: " + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertGte(rowCounts.get(0).get(), 3); + assertGte(rowCounts.get(1).get(), 3); + }); + + insert_data(System.currentTimeMillis(), device); // now, not in range + insert_data(System.currentTimeMillis(), device2); // now, not in range + session_src.executeNonQueryStatement("flush;"); + System.out.println(FORMAT.format(new Date()) + " src: " + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertGte(rowCounts.get(0).get(), 3); + assertGte(rowCounts.get(1).get(), 3); + }); + + insert_data(1707782400000L, device); // 2024-02-13 08:00:00+08:00 + insert_data(1707782400000L, device2); // 2024-02-13 08:00:00+08:00 + session_src.executeNonQueryStatement("flush;"); + System.out.println(FORMAT.format(new Date()) + " src: " + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertGte(rowCounts.get(0).get(), 8); + assertGte(rowCounts.get(1).get(), 8); + }); + + insert_data(1711814398000L, device); // 2024-03-30 23:59:58+08:00 + insert_data(1711814398000L, device2); // 2024-03-30 23:59:58+08:00 + session_src.executeNonQueryStatement("flush;"); + System.out.println(FORMAT.format(new Date()) + " src: " + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertGte(rowCounts.get(0).get(), 10); + assertGte(rowCounts.get(1).get(), 10); + }); + + insert_data(1711900798000L, device); // 2024-03-31 23:59:58+08:00, not in range + insert_data(1711900798000L, device2); // 2024-03-31 23:59:58+08:00 + session_src.executeNonQueryStatement("flush;"); + System.out.println(FORMAT.format(new Date()) + " src: " + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertGte(rowCounts.get(0).get(), 10, "Inserted data is out of range"); + assertGte(rowCounts.get(1).get(), 10); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBPathLooseDeviceTsfilePushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBPathLooseDeviceTsfilePushConsumerIT.java new file mode 100644 index 0000000000000..d1a654709355d --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBPathLooseDeviceTsfilePushConsumerIT.java @@ -0,0 +1,253 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.loose_range; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.read.TsFileReader; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.read.expression.QueryExpression; +import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * loose range: path + * pattern: device + * push consumer + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBPathLooseDeviceTsfilePushConsumerIT extends AbstractSubscriptionRegressionIT { + private String database = "root.PathLooseDeviceTsfilePushConsumer"; + private String device = database + ".d_0"; + private String device2 = database + ".d_1"; + private String pattern = device + ".**"; + private String topicName = "topic_PathLooseDeviceTsfilePushConsumer"; + private List schemaList = new ArrayList<>(); + private SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s( + topicName, + pattern, + "2024-01-01T00:00:00+08:00", + "2024-03-31T00:00:00+08:00", + true, + TopicConstant.MODE_LIVE_VALUE, + TopicConstant.LOOSE_RANGE_PATH_VALUE); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.createTimeseries( + device2 + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device2 + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device2 + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device2 + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (1 + row) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + final AtomicInteger onReceive = new AtomicInteger(0); + List rowCounts = new ArrayList<>(2); + rowCounts.add(new AtomicInteger(0)); + rowCounts.add(new AtomicInteger(0)); + + List paths = new ArrayList<>(2); + paths.add(new Path(device, "s_0", true)); + paths.add(new Path(device2, "s_0", true)); + + String sql = + "select count(s_0) from " + + device + + " where time >= 2024-01-01T00:00:00+08:00 and time <= 2024-03-31T00:00:00+08:00"; + + // Write data before subscribing + insert_data(1704038396000L, device); // 2023-12-31 23:59:56+08:00 + insert_data(1704038396000L, device2); // 2023-12-31 23:59:56+08:00 + session_src.executeNonQueryStatement("flush;"); + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("time_range_accurate_device_tsfile_push") + .consumerGroupId("loose_range_path") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + try { + onReceive.addAndGet(1); + TsFileReader reader = message.getTsFileHandler().openReader(); + for (int i = 0; i < 2; i++) { + QueryDataSet dataset = + reader.query( + QueryExpression.create( + Collections.singletonList(paths.get(i)), null)); + while (dataset.hasNext()) { + rowCounts.get(i).addAndGet(1); + RowRecord next = dataset.next(); + System.out.println(next.getTimestamp() + "," + next.getFields()); + } + } + + } catch (IOException e) { + throw new RuntimeException(e); + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + System.out.println(FORMAT.format(new Date()) + " src :" + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertGte(onReceive.get(), 1); + assertGte(rowCounts.get(0).get(), 3, "Write data before subscription" + device); + assertGte(rowCounts.get(0).get(), 3, "Write data before subscription" + device2); + }); + + insert_data(System.currentTimeMillis(), device); // now, not in range + insert_data(System.currentTimeMillis(), device2); // now, not in range + session_src.executeNonQueryStatement("flush;"); + System.out.println(FORMAT.format(new Date()) + " src :" + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertGte(onReceive.get(), 1); + assertGte(rowCounts.get(0).get(), 3, "Write out-of-range data" + device); + assertGte(rowCounts.get(0).get(), 3, "Write out-of-range data" + device2); + }); + + insert_data(1707782400000L, device); // 2024-02-13 08:00:00+08:00 + insert_data(1707782400000L, device2); // 2024-02-13 08:00:00+08:00 + session_src.executeNonQueryStatement("flush;"); + System.out.println(FORMAT.format(new Date()) + " src :" + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertGte(onReceive.get(), 2); + assertGte(rowCounts.get(0).get(), 8, "write data" + device); + assertGte(rowCounts.get(0).get(), 8, "write data " + device2); + }); + + insert_data(1711814398000L, device); // 2024-03-30 23:59:58+08:00 + insert_data(1711814398000L, device2); // 2024-03-30 23:59:58+08:00 + session_src.executeNonQueryStatement("flush;"); + System.out.println(FORMAT.format(new Date()) + " src :" + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertGte(onReceive.get(), 3); + assertGte(rowCounts.get(0).get(), 10, "Write data: end boundary at " + device); + assertGte(rowCounts.get(0).get(), 10, "Write data: end boundary at " + device2); + }); + + insert_data(1711900798000L, device); // 2024-03-31 23:59:58+08:00 + insert_data(1711900798000L, device2); // 2024-03-31 23:59:58+08:00 + System.out.println(FORMAT.format(new Date()) + " src :" + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertGte(onReceive.get(), 3); + assertGte(rowCounts.get(0).get(), 10, "Write data: > end " + device); + assertGte(rowCounts.get(0).get(), 10, "Write data: > end " + device2); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBPathLooseTsDatasetPushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBPathLooseTsDatasetPushConsumerIT.java new file mode 100644 index 0000000000000..2deda678b41b7 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBPathLooseTsDatasetPushConsumerIT.java @@ -0,0 +1,254 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.loose_range; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer + * mode: DataSet + * pattern: ts + * loose-range: path + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBPathLooseTsDatasetPushConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.PathLooseTsDatasetPushConsumer"; + private static final String database2 = "root.PathLooseTsDatasetPushConsumer"; + private static final String device = database + ".d_0"; + private static final String device2 = database + ".d_1"; + private static final String topicName = "topic_PathLooseTsDatasetPushConsumer"; + private static List schemaList = new ArrayList<>(); + + private static final String pattern = database + ".d_0.s_0"; + private static SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s( + topicName, + pattern, + "2024-01-01T00:00:00+08:00", + "2024-02-13T08:00:02+08:00", + false, + TopicConstant.MODE_LIVE_VALUE, + TopicConstant.LOOSE_RANGE_PATH_VALUE); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (row + 1) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + String sql = + "select count(s_0) from " + + device + + " where time >= 2024-01-01T00:00:00+08:00 and time <= 2024-02-13T08:00:02+08:00"; + // Write data before subscribing + insert_data(1704038399000L, device); // 2023-12-31 23:59:59+08:00 + insert_data(1704038399000L, device2); // 2023-12-31 23:59:59+08:00 + + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("ts_accurate_dataset_consumer") + .consumerGroupId("loose_range_path") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target") + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + System.out.println(FORMAT.format(new Date()) + " src:" + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + check_count_non_strict( + 4, + "select count(s_0) from " + device, + "Subscribe before writing data: s_0 " + device); + check_count( + 0, + "select count(s_1) from " + device, + "Subscribe before writing data: s_1 " + device); + check_count( + 0, + "select count(s_0) from " + device2, + "Subscribe before writing data: s_0 " + device2); + check_count( + 0, + "select count(s_1) from " + device2, + "Subscribe before writing data: s_1 " + device2); + check_count(0, "select count(s_0) from " + database + ".d_1", "Consumption data:d_1"); + check_count(0, "select count(s_0) from " + database2 + ".d_2", "Consumption data:d_2"); + }); + + insert_data(1706659200000L, device); // 2024-01-31 08:00:00+08:00 + insert_data(1706659200000L, device2); // 2024-01-31 08:00:00+08:00 + System.out.println(FORMAT.format(new Date()) + " src:" + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + check_count_non_strict( + 9, "select count(s_0) from " + device, "Consumption data: s_0" + device); + check_count(0, "select count(s_1) from " + device, "Consumption data: s_1" + device); + check_count(0, "select count(s_0) from " + device2, "Consumption data: s_0" + device2); + check_count(0, "select count(s_1) from " + device2, "Consumption data: s_1" + device2); + check_count(0, "select count(s_0) from " + database + ".d_1", "Consumption Data: d_1"); + check_count(0, "select count(s_0) from " + database2 + ".d_2", "Consumption data:d_2"); + }); + + insert_data(System.currentTimeMillis(), device); // not in range + insert_data(System.currentTimeMillis(), device2); + System.out.println(FORMAT.format(new Date()) + " src:" + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + check_count_non_strict( + 9, "select count(s_0) from " + device, "Out-of-range data: s_0" + device); + check_count(0, "select count(s_1) from " + device, "Out-of-range data: s_1" + device); + check_count(0, "select count(s_0) from " + device2, "Out-of-range data: s_0" + device2); + check_count(0, "select count(s_1) from " + device2, "Out-of-range data: s_1" + device2); + check_count(0, "select count(s_0) from " + database + ".d_1", "Consumption data:d_1"); + check_count(0, "select count(s_0) from " + database2 + ".d_2", "Consumption data:d_2"); + }); + + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + + insert_data(1707782400000L, device); // 2024-02-13 08:00:00+08:00 + insert_data(1707782400000L, device2); // 2024-02-13 08:00:00+08:00 + System.out.println(FORMAT.format(new Date()) + " src:" + getCount(session_src, sql)); + + // Consumption data: Progress is not preserved if you unsubscribe and then resubscribe. Full + // synchronization. + AWAIT.untilAsserted( + () -> { + check_count_non_strict( + 11, "select count(s_0) from " + device, "consume data again:s_0" + device); + check_count(0, "select count(s_1) from " + device, "Consumption Data: s_1" + device); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBPathLooseTsfilePushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBPathLooseTsfilePushConsumerIT.java new file mode 100644 index 0000000000000..e9624e9370257 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBPathLooseTsfilePushConsumerIT.java @@ -0,0 +1,223 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.loose_range; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.read.TsFileReader; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.read.expression.QueryExpression; +import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * loose range: path + * push consumer + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBPathLooseTsfilePushConsumerIT extends AbstractSubscriptionRegressionIT { + private String database = "root.PathLooseTsfilePushConsumer"; + private String device = database + ".d_0"; + private String device2 = database + ".d_1"; + private String pattern = database + ".**"; + private String topicName = "topic_PathLooseTsfilePushConsumer"; + private List schemaList = new ArrayList<>(); + private SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s( + topicName, + pattern, + "2024-01-01T00:00:00+08:00", + "2024-03-31T00:00:00+08:00", + true, + TopicConstant.MODE_LIVE_VALUE, + TopicConstant.LOOSE_RANGE_PATH_VALUE); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.createTimeseries( + device2 + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device2 + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device2 + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device2 + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (1 + row) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + final AtomicInteger rowCount = new AtomicInteger(0); + final AtomicInteger onReceive = new AtomicInteger(0); + + // Write data before subscribing + insert_data(1704038396000L, device); // 2023-12-31 23:59:56+08:00 + insert_data(1704038396000L, device2); // 2023-12-31 23:59:56+08:00 + session_src.executeNonQueryStatement("flush;"); + + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("time_range_accurate_db_tsfile_push") + .consumerGroupId("loose_range_path") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + try { + onReceive.addAndGet(1); + TsFileReader reader = message.getTsFileHandler().openReader(); + List paths = new ArrayList<>(2); + for (int i = 0; i < 2; i++) { + paths.add(new Path(device, "s_" + i, true)); + } + QueryDataSet dataset = reader.query(QueryExpression.create(paths, null)); + while (dataset.hasNext()) { + rowCount.addAndGet(1); + RowRecord next = dataset.next(); + System.out.println(next.getTimestamp() + "," + next.getFields()); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + AWAIT.untilAsserted( + () -> { + assertGte(rowCount.get(), 3); + }); + + insert_data(System.currentTimeMillis(), device); // now, not in range + insert_data(System.currentTimeMillis(), device2); // now, not in range + session_src.executeNonQueryStatement("flush;"); + + AWAIT.untilAsserted( + () -> { + assertGte(rowCount.get(), 3); + }); + + insert_data(1707782400000L, device); // 2024-02-13 08:00:00+08:00 + insert_data(1707782400000L, device2); // 2024-02-13 08:00:00+08:00 + session_src.executeNonQueryStatement("flush;"); + + AWAIT.untilAsserted( + () -> { + assertGte(rowCount.get(), 8); + }); + + insert_data(1711814398000L, device); // 2024-03-30 23:59:58+08:00 + insert_data(1711814398000L, device2); // 2024-03-30 23:59:58+08:00 + session_src.executeNonQueryStatement("flush;"); + + AWAIT.untilAsserted( + () -> { + assertGte(rowCount.get(), 10); + }); + + insert_data(1711900798000L, device); // 2024-03-31 23:59:58+08:00 + insert_data(1711900798000L, device2); // 2024-03-31 23:59:58+08:00 + + AWAIT.untilAsserted( + () -> { + assertGte(rowCount.get(), 10); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBPathTsLooseDatasetPushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBPathTsLooseDatasetPushConsumerIT.java new file mode 100644 index 0000000000000..ee305f828f413 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBPathTsLooseDatasetPushConsumerIT.java @@ -0,0 +1,208 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.loose_range; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer + * DataSet + * pattern: ts + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBPathTsLooseDatasetPushConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.PathTsLooseDatasetPushConsumer"; + private static final String database2 = "root.PathTsLooseDatasetPushConsumer"; + private static final String device = database + ".d_0"; + private static final String device2 = database + ".d_1"; + private static final String topicName = "topic_PathTsLooseDatasetPushConsumer"; + private static List schemaList = new ArrayList<>(); + + private static final String pattern = database + ".d_0.s_0"; + public static SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s( + topicName, + pattern, + "2024-01-01T00:00:00+08:00", + "2024-02-13T08:00:02+08:00", + false, + TopicConstant.MODE_LIVE_VALUE, + TopicConstant.LOOSE_RANGE_PATH_VALUE); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (row + 1) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1704038399000L, device); // 2023-12-31 23:59:59+08:00 + insert_data(1704038399000L, device2); // 2023-12-31 23:59:59+08:00 + insert_data(1706659200000L, device); // 2024-01-31 08:00:00+08:00 + insert_data(1706659200000L, device2); // 2024-01-31 08:00:00+08:00 + + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("ts_accurate_dataset_consumer") + .consumerGroupId("loose_range_path") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target") + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + AWAIT.untilAsserted( + () -> { + check_count_non_strict( + 9, "select count(s_0) from " + device, "Consumption Data: s_0" + device); + check_count_non_strict( + 0, "select count(s_1) from " + device, "Consumption data: s_1" + device); + check_count(0, "select count(s_0) from " + device2, "Consumption data: s_0" + device2); + check_count(0, "select count(s_1) from " + device, "Consumption data: s_1" + device2); + check_count(0, "select count(s_0) from " + database + ".d_1", "Consumption data:d_1"); + check_count(0, "select count(s_0) from " + database2 + ".d_2", "Consumption data:d_2"); + }); + + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + + insert_data(1707782400000L, device); // 2024-02-13 08:00:00+08:00 + insert_data(1707782400000L, device2); // 2024-02-13 08:00:00+08:00 + + // Consumption data: Progress is not retained after unsubscribing and resubscribing. Full + // synchronization. + AWAIT.untilAsserted( + () -> { + check_count_non_strict( + 11, "select count(s_0) from " + device, "consume data again:s_0" + device); + check_count(0, "select count(s_1) from " + device, "Consumption Data: s_1" + device); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBTimeLooseTsDatasetPushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBTimeLooseTsDatasetPushConsumerIT.java new file mode 100644 index 0000000000000..eb846ff0b7e70 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBTimeLooseTsDatasetPushConsumerIT.java @@ -0,0 +1,219 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.loose_range; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer + * DataSet + * pattern: ts + * loose-range: time + * live + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBTimeLooseTsDatasetPushConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.TimeLooseTsDatasetPushConsumer"; + private static final String database2 = "root.TimeLooseTsDatasetPushConsumer"; + private static final String device = database + ".d_0"; + private static final String device2 = database + ".d_1"; + private static final String topicName = "topic_TimeLooseTsDatasetPushConsumer"; + private static List schemaList = new ArrayList<>(); + + private static String pattern = device + ".s_0"; + private static SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s( + topicName, + pattern, + "2024-01-01T00:00:00+08:00", + "2024-02-13T08:00:02+08:00", + false, + TopicConstant.MODE_LIVE_VALUE, + TopicConstant.LOOSE_RANGE_TIME_VALUE); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (row + 1) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + String sql = + "select count(s_0) from " + + device + + " where time >= 2024-01-01T00:00:00+08:00 and time <= 2024-02-13T08:00:02+08:00"; + + // Write data before subscribing + insert_data(1704038399000L, device); // 2023-12-31 23:59:59+08:00 + insert_data(1704038399000L, device2); // 2023-12-31 23:59:59+08:00 + session_src.executeNonQueryStatement("flush;"); + + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("push_dataset_ts_dataset_consumer") + .consumerGroupId("loose_range_time") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target") + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + insert_data(1706659200000L, device); // 2024-01-31 08:00:00+08:00 + insert_data(1706659200000L, device2); // 2024-01-31 08:00:00+08:00 + System.out.println(FORMAT.format(new Date()) + " src:" + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + check_count_non_strict( + 9, "select count(s_0) from " + device, "Consumption data: s_0" + device); + check_count(0, "select count(s_1) from " + device, "Consumption data: s_1" + device); + check_count(0, "select count(s_0) from " + device2, "Consumption data: s_0" + device2); + check_count(0, "select count(s_1) from " + device, "Consumption data: s_1" + device2); + check_count(0, "select count(s_0) from " + database + ".d_1", "Consumption Data: d_1"); + check_count(0, "select count(s_0) from " + database2 + ".d_2", "Consumption data:d_2"); + }); + + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + + insert_data(1707782400000L, device); // 2024-02-13 08:00:00+08:00 + insert_data(1707782400000L, device2); // 2024-02-13 08:00:00+08:00 + session_src.executeNonQueryStatement("flush;"); + System.out.println(FORMAT.format(new Date()) + " src:" + getCount(session_src, sql)); + + // Consumption data: Progress is not retained after unsubscribing and re-subscribing. Full + // synchronization. + AWAIT.untilAsserted( + () -> { + check_count_non_strict( + 11, "select count(s_0) from " + device, "Consumption data: s_0" + device); + check_count(0, "select count(s_1) from " + device, "Consumption Data: s_1" + device); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBTimeLooseTsTsfilePushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBTimeLooseTsTsfilePushConsumerIT.java new file mode 100644 index 0000000000000..1b5ab3e7d016a --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBTimeLooseTsTsfilePushConsumerIT.java @@ -0,0 +1,276 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.loose_range; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.read.TsFileReader; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.read.expression.QueryExpression; +import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * mode: live + * loose-range:path + * format: tsfile + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBTimeLooseTsTsfilePushConsumerIT extends AbstractSubscriptionRegressionIT { + private String database = "root.TimeLooseTsTsfilePushConsumer"; + private String device = database + ".d_0"; + private String device2 = database + ".d_1"; + private String pattern = device + ".s_0"; + private String topicName = "topic_TimeLooseTsTsfilePushConsumer"; + private List schemaList = new ArrayList<>(); + private SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s( + topicName, + pattern, + "2024-01-01T00:00:00+08:00", + "2024-03-31T00:00:00+08:00", + true, + TopicConstant.MODE_LIVE_VALUE, + TopicConstant.LOOSE_RANGE_TIME_VALUE); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.createTimeseries( + device2 + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device2 + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device2 + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device2 + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (1 + row) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + final AtomicInteger onReceive = new AtomicInteger(0); + List rowCounts = new ArrayList<>(4); + for (int i = 0; i < 4; i++) { + rowCounts.add(new AtomicInteger(0)); + } + + List paths = new ArrayList<>(4); + paths.add(new Path(device, "s_0", true)); + paths.add(new Path(device, "s_1", true)); + paths.add(new Path(device2, "s_0", true)); + paths.add(new Path(device2, "s_1", true)); + + String sql = + "select count(s_0) from " + + device + + " where time >= 2024-01-01T00:00:00+08:00 and time <= 2024-03-31T00:00:00+08:00"; + + // Write data before subscribing + insert_data(1704038396000L, device); // 2023-12-31 23:59:56+08:00 + insert_data(1704038396000L, device2); // 2023-12-31 23:59:56+08:00 + session_src.executeNonQueryStatement("flush;"); + System.out.println(FORMAT.format(new Date()) + " src: " + getCount(session_src, sql)); + + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("time_range_accurate_ts_tsfile_push") + .consumerGroupId("loose_range_time") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + try { + onReceive.addAndGet(1); + TsFileReader reader = message.getTsFileHandler().openReader(); + for (int i = 0; i < 4; i++) { + QueryDataSet dataset = + reader.query( + QueryExpression.create( + Collections.singletonList(paths.get(i)), null)); + while (dataset.hasNext()) { + rowCounts.get(i).addAndGet(1); + RowRecord next = dataset.next(); + System.out.println(next.getTimestamp() + "," + next.getFields()); + } + System.out.println( + FORMAT.format(new Date()) + " " + i + " " + rowCounts.get(i).get()); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + AWAIT.untilAsserted( + () -> { + assertGte( + rowCounts.get(0).get(), + 3, + device + ".s_0, subscribe before writing data start boundary"); + assertEquals( + rowCounts.get(1).get(), + 0, + device + ".s_1, Subscription before writing data start boundary"); + assertEquals( + rowCounts.get(2).get(), + 0, + device2 + ".s_0, Subscribe before writing data start boundary"); + assertEquals( + rowCounts.get(3).get(), + 0, + device2 + ".s_1, Subscription before writing data start boundary"); + }); + + insert_data(System.currentTimeMillis(), device); // now, not in range + insert_data(System.currentTimeMillis(), device2); // now, not in range + session_src.executeNonQueryStatement("flush;"); + System.out.println(FORMAT.format(new Date()) + " src: " + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertGte(rowCounts.get(0).get(), 3, device + ".s_0, Write out-of-range data"); + assertEquals(rowCounts.get(1).get(), 0, device + ".s_1, Write out-of-range data"); + assertEquals(rowCounts.get(2).get(), 0, device2 + ".s_0, Write out-of-range data"); + assertEquals(rowCounts.get(3).get(), 0, device2 + ".s_1, Write out-of-range data"); + }); + + insert_data(1707782400000L, device); // 2024-02-13 08:00:00+08:00 + insert_data(1707782400000L, device2); // 2024-02-13 08:00:00+08:00 + session_src.executeNonQueryStatement("flush;"); + System.out.println(FORMAT.format(new Date()) + " src: " + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertGte(rowCounts.get(0).get(), 8, device + ".s_0, write normal data"); + assertEquals(rowCounts.get(1).get(), 0, device + ".s_1, write normal data"); + assertEquals(rowCounts.get(2).get(), 0, device2 + ".s_0, Write normal data"); + assertEquals(rowCounts.get(3).get(), 0, device2 + ".s_1, Write normal data"); + }); + + insert_data(1711814398000L, device); // 2024-03-30 23:59:58+08:00 + insert_data(1711814398000L, device2); // 2024-03-30 23:59:58+08:00 + session_src.executeNonQueryStatement("flush;"); + System.out.println(FORMAT.format(new Date()) + " src: " + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertGte(rowCounts.get(0).get(), 10, device + ".s_0, write end boundary data"); + assertEquals(rowCounts.get(1).get(), 0, device + ".s_1, write end boundary data"); + assertEquals(rowCounts.get(2).get(), 0, device2 + ".s_0, write end boundary data"); + assertEquals(rowCounts.get(3).get(), 0, device2 + ".s_1, write end boundary data"); + }); + + insert_data(1711900798000L, device); // 2024-03-31 23:59:58+08:00 + insert_data(1711900798000L, device2); // 2024-03-31 23:59:58+08:00 + session_src.executeNonQueryStatement("flush;"); + System.out.println(FORMAT.format(new Date()) + " src: " + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertGte(rowCounts.get(0).get(), 10, device + ".s_0, Write data outside end range"); + assertEquals(rowCounts.get(1).get(), 0, device + ".s_0, Write data outside of end range"); + assertEquals(rowCounts.get(2).get(), 0, device + ".s_0, Write data outside of end range"); + assertEquals(rowCounts.get(3).get(), 0, device + ".s_0, Write data outside of end range"); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBTimeLooseTsfilePushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBTimeLooseTsfilePushConsumerIT.java new file mode 100644 index 0000000000000..50f37c4183e27 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBTimeLooseTsfilePushConsumerIT.java @@ -0,0 +1,236 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.loose_range; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.read.TsFileReader; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.read.expression.QueryExpression; +import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBTimeLooseTsfilePushConsumerIT extends AbstractSubscriptionRegressionIT { + private String database = "root.TimeLooseTsfilePushConsumer"; + private String device = database + ".d_0"; + private String device2 = database + ".d_1"; + private String pattern = device + ".**"; + private String topicName = "topic_TimeLooseTsfilePushConsumer"; + private List schemaList = new ArrayList<>(); + private SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s( + topicName, + pattern, + "2024-01-01T00:00:00+08:00", + "2024-03-31T00:00:00+08:00", + true, + TopicConstant.MODE_LIVE_VALUE, + TopicConstant.LOOSE_RANGE_TIME_VALUE); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.createTimeseries( + device2 + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device2 + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device2 + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device2 + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (1 + row) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + String sql = + "select count(s_0) from " + + device + + " where time >= 2024-01-01T00:00:00+08:00 and time <= 2024-03-31T00:00:00+08:00"; + + final AtomicInteger rowCount = new AtomicInteger(0); + final AtomicInteger onReceive = new AtomicInteger(0); + + // Write data before subscribing + insert_data(1704038396000L, device); // 2023-12-31 23:59:56+08:00 + insert_data(1704038396000L, device2); // 2023-12-31 23:59:56+08:00 + session_src.executeNonQueryStatement("flush;"); + System.out.println(FORMAT.format(new Date()) + " src:" + getCount(session_src, sql)); + + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("time_range_accurate_db_tsfile_push") + .consumerGroupId("loose_range_time") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + try { + onReceive.addAndGet(1); + TsFileReader reader = message.getTsFileHandler().openReader(); + List paths = new ArrayList<>(2); + for (int i = 0; i < 2; i++) { + paths.add(new Path(device, "s_" + i, true)); + } + QueryDataSet dataset = reader.query(QueryExpression.create(paths, null)); + while (dataset.hasNext()) { + rowCount.addAndGet(1); + RowRecord next = dataset.next(); + System.out.println(next.getTimestamp() + "," + next.getFields()); + } + System.out.println("onReceive=" + onReceive.get()); + } catch (IOException e) { + throw new RuntimeException(e); + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + AWAIT.untilAsserted( + () -> { + assertGte(onReceive.get(), 1); + assertGte(rowCount.get(), 3); + }); + + insert_data(System.currentTimeMillis(), device); // now, not in range + insert_data(System.currentTimeMillis(), device2); // now, not in range + session_src.executeNonQueryStatement("flush;"); + System.out.println(FORMAT.format(new Date()) + " src:" + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertGte(onReceive.get(), 1); + assertGte(rowCount.get(), 3); + }); + + insert_data(1707782400000L, device); // 2024-02-13 08:00:00+08:00 + insert_data(1707782400000L, device2); // 2024-02-13 08:00:00+08:00 + session_src.executeNonQueryStatement("flush;"); + System.out.println(FORMAT.format(new Date()) + " src:" + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertGte(onReceive.get(), 2); + assertGte(rowCount.get(), 8); + }); + + insert_data(1711814398000L, device); // 2024-03-30 23:59:58+08:00 + insert_data(1711814398000L, device2); // 2024-03-30 23:59:58+08:00 + session_src.executeNonQueryStatement("flush;"); + System.out.println(FORMAT.format(new Date()) + " src:" + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertGte(onReceive.get(), 3); + assertGte(rowCount.get(), 10); + }); + + insert_data(1711900798000L, device); // 2024-03-31 23:59:58+08:00 + insert_data(1711900798000L, device2); // 2024-03-31 23:59:58+08:00 + session_src.executeNonQueryStatement("flush;"); + System.out.println(FORMAT.format(new Date()) + " src:" + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertGte(onReceive.get(), 3); + assertGte(rowCount.get(), 10); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBTimeTsLooseDatasetPushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBTimeTsLooseDatasetPushConsumerIT.java new file mode 100644 index 0000000000000..dac2c5fd160d3 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/loose_range/IoTDBTimeTsLooseDatasetPushConsumerIT.java @@ -0,0 +1,218 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.loose_range; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer + * DataSet + * pattern: ts + * time loose + * live + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBTimeTsLooseDatasetPushConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.TimeTsLooseDatasetPushConsumer"; + private static final String database2 = "root.TimeTsLooseDatasetPushConsumer"; + private static final String device = database + ".d_0"; + private static final String device2 = database + ".d_1"; + private static final String topicName = "topic_TimeTsLooseDatasetPushConsumer"; + private static List schemaList = new ArrayList<>(); + + private static final String pattern = device + ".s_0"; + public static SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s( + topicName, + pattern, + "2024-01-01T00:00:00+08:00", + "2024-02-13T08:00:02+08:00", + false, + TopicConstant.MODE_LIVE_VALUE, + TopicConstant.LOOSE_RANGE_TIME_VALUE); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + device2 + "(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } finally { + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + } + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (row + 1) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1704038399000L, device); // 2023-12-31 23:59:59+08:00 + insert_data(1704038399000L, device2); // 2023-12-31 23:59:59+08:00 + insert_data(1706659200000L, device); // 2024-01-31 08:00:00+08:00 + insert_data(1706659200000L, device2); // 2024-01-31 08:00:00+08:00 + session_src.executeNonQueryStatement("flush;"); + + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("push_dataset_ts_dataset_consumer") + .consumerGroupId("loose_range_time") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target") + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + assertEquals( + subs.getSubscriptions(topicName).size(), 1, "show subscriptions after subscription"); + + AWAIT.untilAsserted( + () -> { + assertGte( + getCount(session_dest, "select count(s_0) from " + device), + 9, + "Consumption data: s_0 " + device); + check_count(0, "select count(s_1) from " + device, "Consumption data: s_1" + device); + check_count(0, "select count(s_0) from " + device2, "Consumption data: s_0" + device2); + check_count(0, "select count(s_1) from " + device, "Consumption data: s_1" + device2); + check_count(0, "select count(s_0) from " + database + ".d_1", "Consumption data:d_1"); + check_count(0, "select count(s_0) from " + database2 + ".d_2", "Consumption data:d_2"); + }); + + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals( + subs.getSubscriptions(topicName).size(), 1, "show subscriptions after re-subscribing"); + + insert_data(1707782400000L, device); // 2024-02-13 08:00:00+08:00 + insert_data(1707782400000L, device2); // 2024-02-13 08:00:00+08:00 + session_src.executeNonQueryStatement("flush;"); + + // Consumption data: Progress is not retained after canceling and re-subscribing. Full + // synchronization. + AWAIT.untilAsserted( + () -> { + assertGte( + getCount(session_dest, "select count(s_0) from " + device), + 11, + "re-subscribing s_0 count=" + + getCount(session_dest, "select count(s_0) from " + device)); + check_count(0, "select count(s_1) from " + device, "Consumption Data: s_1" + device); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/mode/IoTDBSnapshotTSPatternDatasetPushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/mode/IoTDBSnapshotTSPatternDatasetPushConsumerIT.java new file mode 100644 index 0000000000000..f19f0594537d8 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/mode/IoTDBSnapshotTSPatternDatasetPushConsumerIT.java @@ -0,0 +1,215 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.mode; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; +import org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant; +import org.apache.iotdb.subscription.it.Retry; +import org.apache.iotdb.subscription.it.RetryRule; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer + * DataSet + * pattern: ts + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBSnapshotTSPatternDatasetPushConsumerIT extends AbstractSubscriptionRegressionIT { + + @Rule public RetryRule retryRule = new RetryRule(); + + private static final String database = "root.test.SnapshotTSPatternDatasetPushConsumer"; + private static final String database2 = "root.SnapshotTSPatternDatasetPushConsumer"; + private static final String device = database + ".d_0"; + private static final String device2 = database + ".d_1"; + private static final String topicName = "topic_SnapshotTSPatternDatasetPushConsumer"; + private static List schemaList = new ArrayList<>(); + private static final String pattern = device + ".s_0"; + private static SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s(topicName, pattern, null, null, false, TopicConstant.MODE_SNAPSHOT_VALUE, null); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + // TODO: remove it later + @Override + protected void setUpConfig() { + super.setUpConfig(); + + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(sender); + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(receiver1); + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(receiver2); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + schemaList.clear(); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + @Retry + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("ts_dataset_snapshot") + .consumerGroupId("push_mode") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions(topicName).forEach(System.out::println); + assertEquals( + subs.getSubscriptions(topicName).size(), 1, "show subscriptions after subscription"); + + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + insert_data(System.currentTimeMillis()); + + AWAIT.untilAsserted( + () -> { + check_count(4, "select count(s_0) from " + device, "Consumption data: s_0 " + device); + check_count(0, "select count(s_1) from " + device, "Consumption data: s_1 " + device); + check_count(0, "select count(s_0) from " + database + ".d_1", "Consumption Data:d_1"); + check_count(0, "select count(s_0) from " + database2 + ".d_2", "Consumption data:d_2"); + }); + + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals( + subs.getSubscriptions(topicName).size(), 1, "show subscriptions after re-subscribing"); + + // Consumption data: Progress is not retained when re-subscribing after cancellation. Full + // synchronization. + IoTDBSubscriptionITConstant.AWAIT_WITH_FLUSH( + session_src, + () -> { + check_count(12, "select count(s_0) from " + device, "consume data again:s_0 " + device); + check_count(0, "select count(s_1) from " + device, "Consumption data: s_1 " + device); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/mode/IoTDBSnapshotTSPatternTsfilePushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/mode/IoTDBSnapshotTSPatternTsfilePushConsumerIT.java new file mode 100644 index 0000000000000..5561b5c265050 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/mode/IoTDBSnapshotTSPatternTsfilePushConsumerIT.java @@ -0,0 +1,253 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.mode; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.subscription.it.Retry; +import org.apache.iotdb.subscription.it.RetryRule; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.read.TsFileReader; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.read.expression.QueryExpression; +import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer + * TsFile + * pattern: ts + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBSnapshotTSPatternTsfilePushConsumerIT extends AbstractSubscriptionRegressionIT { + + @Rule public RetryRule retryRule = new RetryRule(); + + private static final String database = "root.test.SnapshotTSPatternTsfilePushConsumer"; + private static final String database2 = "root.SnapshotTSPatternTsfilePushConsumer"; + private static final String device = database + ".d_0"; + private static final String device2 = database + ".d_1"; + private static final String topicName = "topic_SnapshotTSPatternTsfilePushConsumer"; + private static List schemaList = new ArrayList<>(); + private static final String pattern = device + ".s_0"; + private static SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s( + topicName, + pattern, + "2024-01-01T00:00:00+08:00", + "2024-03-31T00:00:00+08:00", + true, + TopicConstant.MODE_SNAPSHOT_VALUE, + null); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + schemaList.clear(); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + @Retry + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + String sql = + "select count(s_0) from " + + device + + " where time >= 2024-01-01T00:00:00+08:00 and time <= 2024-03-31T00:00:00+08:00"; + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + final AtomicInteger onReceiveCount = new AtomicInteger(0); + List rowCounts = new ArrayList<>(4); + for (int i = 0; i < 4; i++) { + rowCounts.add(new AtomicInteger(0)); + } + + Path path_d0s0 = new Path(device, "s_0", true); + Path path_d0s1 = new Path(device, "s_1", true); + Path path_d1s0 = new Path(database + ".d_1", "s_0", true); + Path path_other_d2 = new Path(database2 + ".d_2", "s_0", true); + List paths = new ArrayList<>(4); + paths.add(path_d0s0); + paths.add(path_d0s1); + paths.add(path_d1s0); + paths.add(path_other_d2); + + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("ts_tsfile_snapshot") + .consumerGroupId("push_mode") + .ackStrategy(AckStrategy.BEFORE_CONSUME) + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + onReceiveCount.incrementAndGet(); + System.out.println("onReceiveCount=" + onReceiveCount.get()); + try { + TsFileReader reader = message.getTsFileHandler().openReader(); + for (int i = 0; i < 4; i++) { + QueryDataSet dataset = + reader.query( + QueryExpression.create( + Collections.singletonList(paths.get(i)), null)); + while (dataset.hasNext()) { + rowCounts.get(i).addAndGet(1); + RowRecord next = dataset.next(); + System.out.println(next.getTimestamp() + "," + next.getFields()); + } + System.out.println( + FORMAT.format(new Date()) + + " rowCounts_" + + i + + ":" + + rowCounts.get(i).get()); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions(topicName).forEach(System.out::println); + assertEquals( + subs.getSubscriptions(topicName).size(), 1, "show subscriptions after subscription"); + + insert_data(1707609600000L); // 2024-02-11 08:00:00+08:00 + insert_data(System.currentTimeMillis()); + System.out.println(FORMAT.format(new Date()) + " src:" + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertEquals(onReceiveCount.get(), 1, "receive files"); + assertEquals(rowCounts.get(0).get(), 5, device + ".s_0"); + assertEquals(rowCounts.get(1).get(), 0, device + ".s_1"); + assertEquals(rowCounts.get(2).get(), 0, database + ".d_1.s_0"); + assertEquals(rowCounts.get(3).get(), 0, database2 + ".d_2.s_0"); + }); + + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals( + subs.getSubscriptions(topicName).size(), 1, "show subscriptions after re-subscribing"); + + AWAIT.untilAsserted( + () -> { + assertGte(onReceiveCount.get(), 2, "receive files over 2"); + assertEquals(rowCounts.get(0).get(), 15, device + ".s_0"); + assertEquals(rowCounts.get(1).get(), 0, device + ".s_1"); + assertEquals(rowCounts.get(2).get(), 0, database + ".d_1.s_0"); + assertEquals(rowCounts.get(3).get(), 0, database2 + ".d_2.s_0"); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/multi/IoTDBConsumer2With1TopicShareProcessDataSetIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/multi/IoTDBConsumer2With1TopicShareProcessDataSetIT.java new file mode 100644 index 0000000000000..baf20c68af91a --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/multi/IoTDBConsumer2With1TopicShareProcessDataSetIT.java @@ -0,0 +1,215 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.multi; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer + * pattern: db + * Dataset + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBConsumer2With1TopicShareProcessDataSetIT + extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.Consumer2With1TopicShareProcessDataSet"; + private static final String device = database + ".d_0"; + private static final String topicName = "topic_Consumer2With1TopicShareProcessDataSet"; + private static List schemaList = new ArrayList<>(); + + private static final String pattern = database + ".**"; + private static SubscriptionPushConsumer consumer; + private static SubscriptionPushConsumer consumer2; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s(topicName, pattern, null, null, false); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest2.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest2.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + consumer2.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + // insert_data(1706659200000L); + Thread thread = + new Thread( + () -> { + long timestamp = 1706659200000L; // 2024-01-31 08:00:00+08:00 + for (int i = 0; i < 20; i++) { + try { + insert_data(timestamp); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } + timestamp += 20000; + } + }); + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("db_dataset_consumer_1") + .consumerGroupId("push_multi_test") + .ackStrategy(AckStrategy.BEFORE_CONSUME) + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + consumer.subscribe(topicName); + consumer2 = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("db_dataset_consumer_2") + .consumerGroupId("push_multi_test") + .ackStrategy(AckStrategy.BEFORE_CONSUME) + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest2.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer2.open(); + consumer2.subscribe(topicName); + thread.start(); + + thread.join(); + System.out.println("After subscription:"); + subs.getSubscriptions().forEach((System.out::println)); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + String sql = "select count(s_0) from " + device; + // The first 5 entries may have duplicate data + System.out.println(FORMAT.format(new Date()) + " src:" + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertTrue(getCount(session_dest, sql) >= 0, "first consumer"); + assertTrue(getCount(session_dest2, sql) >= 0, "second Consumer"); + assertEquals( + getCount(session_dest, sql) + getCount(session_dest2, sql), + getCount(session_src, sql), + "share process"); + }); + System.out.println(FORMAT.format(new Date()) + " dest:" + getCount(session_dest, sql)); + System.out.println(FORMAT.format(new Date()) + " dest2:" + getCount(session_dest2, sql)); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/multi/IoTDBConsumer2With1TopicShareProcessTsfileIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/multi/IoTDBConsumer2With1TopicShareProcessTsfileIT.java new file mode 100644 index 0000000000000..7430bf7a2f619 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/multi/IoTDBConsumer2With1TopicShareProcessTsfileIT.java @@ -0,0 +1,220 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.multi; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.read.TsFileReader; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.read.expression.QueryExpression; +import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer + * pattern: db + * tsfile + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBConsumer2With1TopicShareProcessTsfileIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.Consumer2With1TopicShareProcessTsfile"; + private static final String device = database + ".d_0"; + private static final String topicName = "topic_Consumer2With1TopicShareProcessTsfile"; + private static List schemaList = new ArrayList<>(); + + private static final String pattern = database + ".**"; + private static SubscriptionPushConsumer consumer; + private static SubscriptionPushConsumer consumer2; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s(topicName, pattern, null, null, true); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + consumer2.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + Thread thread = + new Thread( + () -> { + long timestamp = 1706659200000L; // 2024-01-31 08:00:00+08:00 + for (int i = 0; i < 20; i++) { + try { + insert_data(timestamp); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } + timestamp += 20000; + } + }); + AtomicInteger rowCount1 = new AtomicInteger(0); + AtomicInteger rowCount2 = new AtomicInteger(0); + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("db_tsfile_consumer_1") + .consumerGroupId("push_multi_Consumer2With1TopicShareProcessTsfile") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + try { + TsFileReader reader = message.getTsFileHandler().openReader(); + List paths = new ArrayList<>(2); + for (int i = 0; i < 2; i++) { + paths.add(new Path(device, "s_" + i, true)); + } + QueryDataSet dataset = reader.query(QueryExpression.create(paths, null)); + while (dataset.hasNext()) { + rowCount1.addAndGet(1); + RowRecord next = dataset.next(); + // + // System.out.println(next.getTimestamp()+","+next.getFields()); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + consumer.subscribe(topicName); + consumer2 = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("db_tsfile_consumer_2") + .consumerGroupId("push_multi_Consumer2With1TopicShareProcessTsfile") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + try { + TsFileReader reader = message.getTsFileHandler().openReader(); + List paths = new ArrayList<>(2); + for (int i = 0; i < 2; i++) { + paths.add(new Path(device, "s_" + i, true)); + } + QueryDataSet dataset = reader.query(QueryExpression.create(paths, null)); + while (dataset.hasNext()) { + rowCount2.addAndGet(1); + RowRecord next = dataset.next(); + // + // System.out.println(next.getTimestamp()+","+next.getFields()); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer2.open(); + consumer2.subscribe(topicName); + thread.start(); + + thread.join(); + System.out.println("After subscription:"); + subs.getSubscriptions().forEach((System.out::println)); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + // The first 5 entries may have duplicate data + String sql = "select count(s_0) from " + device; + System.out.println("src " + getCount(session_src, sql)); + System.out.println("rowCount1.get()=" + rowCount1.get()); + System.out.println("rowCount2.get()=" + rowCount2.get()); + + AWAIT.untilAsserted( + () -> + assertGte( + rowCount1.get() + rowCount2.get(), getCount(session_src, sql), "share process")); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/multi/IoTDBMultiGroupVsMultiConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/multi/IoTDBMultiGroupVsMultiConsumerIT.java new file mode 100644 index 0000000000000..af39d745933a8 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/multi/IoTDBMultiGroupVsMultiConsumerIT.java @@ -0,0 +1,613 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.multi; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionMessageType; +import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; +import org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.read.TsFileReader; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.read.expression.QueryExpression; +import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * pattern: device, different db + * |c0|t0|g1| tsfile databasePrefix+"0.**", "2024-01-01T00:00:00+08:00", "2024-03-31T23:59:59+08:00" + * |c1|t0|g1| tsfile + * |c2|t1|g1| dataset(dest) databasePrefix+"1.**" + * |c3|t1|g1| dataset(dest2) + * |c4|t2,t3|g2| dataset(dest) databasePrefix+"2.**", "now", null; databasePrefix+"3.**", null, "now" + * |c5|t3,t4|g2| dataset(dest2) databasePrefix+"4.**", null, "2024-03-31T23:59:59+08:00" + * |c6|t2,t4|g2| dataset(dest) + * |c7|t0,t3|g3| dataset(dest)/tsfile + * |c8|t6|g3| tsfile databasePrefix+"6.**", "now", null, + * |c9|t0,t3|g3| dataset(dest2)/tsfile + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBMultiGroupVsMultiConsumerIT extends AbstractSubscriptionRegressionIT { + + private String topicNamePrefix = "topic_pushMultiGroupVsMultiConsumer_"; + private String databasePrefix = "root.test.pushMultiGroupVsMultiConsumer_"; + private int tsCount = 10; + private int consumertCount = 10; + private static List schemaList = new ArrayList<>(); + + private List consumers = new ArrayList<>(consumertCount); + private AtomicInteger rowCount00 = new AtomicInteger(0); + private AtomicInteger rowCount10 = new AtomicInteger(0); + private AtomicInteger rowCount70 = new AtomicInteger(0); + private AtomicInteger rowCount90 = new AtomicInteger(0); + private AtomicInteger rowCount6 = new AtomicInteger(0); + private String sql1 = "select count(s_0) from " + databasePrefix + "1.d_0"; + private String sql2 = "select count(s_0) from " + databasePrefix + "2.d_0"; + private String sql3 = "select count(s_0) from " + databasePrefix + "3.d_0"; + private String sql4 = "select count(s_0) from " + databasePrefix + "4.d_0"; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT32)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + for (int i = 0; i < tsCount; i++) { + createDB(databasePrefix + i); + } + createTopic_s( + topicNamePrefix + 0, + databasePrefix + "0.**", + "2024-01-01T00:00:00+08:00", + "2024-03-31T23:59:59+08:00", + true); + createTopic_s(topicNamePrefix + 1, databasePrefix + "1.**", null, null, false); + + createTopic_s(topicNamePrefix + 2, databasePrefix + "2.**", "now", null, false); + createTopic_s(topicNamePrefix + 3, databasePrefix + "3.**", null, "now", false); + createTopic_s( + topicNamePrefix + 4, databasePrefix + "4.**", null, "2024-03-31T23:59:59+08:00", false); + + createTopic_s(topicNamePrefix + 6, databasePrefix + "6.**", "now", null, true); + + createTopic_s( + topicNamePrefix + 5, databasePrefix + "5.**", "2024-01-01T00:00:00+08:00", null, false); + createTopic_s( + topicNamePrefix + 7, databasePrefix + "7.**", null, "2024-03-31T23:59:59+08:00", true); + createTopic_s(topicNamePrefix + 8, databasePrefix + "8.**", null, "now", true); + createTopic_s( + topicNamePrefix + 9, databasePrefix + "9.**", "2024-01-01T00:00:00+08:00", null, true); + + subs.getTopics().forEach(System.out::println); + } + + @Override + protected void setUpConfig() { + super.setUpConfig(); + + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(sender); + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(receiver1); + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(receiver2); + } + + @Override + @After + public void tearDown() throws Exception { + System.out.println(databasePrefix + "1.d_0:[src]" + getCount(session_src, sql1)); + System.out.println(databasePrefix + "1.d_0:[dest]" + getCount(session_dest, sql1)); + System.out.println(databasePrefix + "1.d_0:[dest2]" + getCount(session_dest2, sql1)); + System.out.println(databasePrefix + "2.d_0:[src]" + getCount(session_src, sql2)); + System.out.println(databasePrefix + "2.d_0:[dest]" + getCount(session_dest, sql2)); + System.out.println(databasePrefix + "2.d_0:[dest2]" + getCount(session_dest2, sql2)); + System.out.println(databasePrefix + "3.d_0:[src]" + getCount(session_src, sql3)); + System.out.println(databasePrefix + "3.d_0:[dest]" + getCount(session_dest, sql3)); + System.out.println(databasePrefix + "3.d_0:[dest2]" + getCount(session_dest2, sql3)); + System.out.println(databasePrefix + "4.d_0:[src]" + getCount(session_src, sql4)); + System.out.println(databasePrefix + "4.d_0:[dest]" + getCount(session_dest, sql4)); + System.out.println(databasePrefix + "4.d_0:[dest2]" + getCount(session_dest2, sql4)); + System.out.println("rowCount00.get()=" + rowCount00.get()); + System.out.println("rowCount10.get()=" + rowCount10.get()); + System.out.println("rowCount70.get()=" + rowCount70.get()); + System.out.println("rowCount90.get()=" + rowCount90.get()); + System.out.println("rowCount6.get()=" + rowCount6.get()); + for (SubscriptionPushConsumer c : consumers) { + try { + c.close(); + } catch (Exception e) { + } + } + for (int i = 0; i < tsCount; i++) { + subs.dropTopic(topicNamePrefix + i); + } + dropDB(databasePrefix); + super.tearDown(); + } + + private void insert_data(long timestamp, String device, int rows) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, rows); + int rowIndex = 0; + for (int row = 0; row < rows; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue(schemaList.get(0).getMeasurementId(), rowIndex, (row + 1) * 1400 + row); + tablet.addValue(schemaList.get(1).getMeasurementId(), rowIndex, (row + 1) * 100 + 0.5); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + /*** + * |c0|t0|g1| tsfile databasePrefix+"0.**", "2024-01-01T00:00:00+08:00", "2024-03-31T23:59:59+08:00" + * |c1|t0|g1| tsfile + * |c2|t1|g1| dataset(dest) databasePrefix+"1.**" + * |c3|t1|g1| dataset(dest2) + * |c4|t2,t3|g2| dataset(dest) databasePrefix+"2.**", "now", null; databasePrefix+"3.**", null, "now" + * |c5|t3,t4|g2| dataset(dest2) databasePrefix+"4.**", null, "2024-03-31T23:59:59+08:00" + * |c6|t2,t4|g2| dataset(dest) + * |c7|t0,t3|g3| dataset(dest)/tsfile + * |c8|t6|g3| tsfile databasePrefix+"6.**", "now", null, + * |c9|t0,t3|g3| dataset(dest2)/tsfile + */ + @Test + public void do_test() + throws TException, + IoTDBConnectionException, + IOException, + StatementExecutionException, + InterruptedException { + consumers.add( + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("consumer_id_0") + .consumerGroupId("push_group_id_1") + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + try { + TsFileReader reader = message.getTsFileHandler().openReader(); + QueryDataSet dataset = + reader.query( + QueryExpression.create( + Collections.singletonList( + new Path(databasePrefix + "0.d_0", "s_0", true)), + null)); + while (dataset.hasNext()) { + rowCount00.addAndGet(1); + RowRecord next = dataset.next(); + // System.out.println("c0,g1:[rowCount00]" + + // next.getTimestamp() + "," + next.getFields()); + } + // + // System.out.println("c0,g1,rowCount00="+rowCount00.get()); + } catch (IOException e) { + throw new RuntimeException(e); + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer()); + consumers.add( + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("consumer_id_1") + .consumerGroupId("push_group_id_1") + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + try { + TsFileReader reader = message.getTsFileHandler().openReader(); + QueryDataSet dataset = + reader.query( + QueryExpression.create( + Collections.singletonList( + new Path(databasePrefix + "0.d_0", "s_0", true)), + null)); + while (dataset.hasNext()) { + rowCount10.addAndGet(1); + RowRecord next = dataset.next(); + // + // System.out.println(databasePrefix+0+".d_0:[rowCount10]" + + // next.getTimestamp() + "," + next.getFields()); + } + // + // System.out.println("c1,g1,rowCount10="+rowCount00.get()); + } catch (IOException e) { + throw new RuntimeException(e); + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer()); + consumers.add( + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("consumer_id_2") + .consumerGroupId("push_group_id_1") + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer()); + consumers.add( + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("consumer_id_3") + .consumerGroupId("push_group_id_1") + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest2.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer()); + consumers.add( + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("consumer_id_4") + .consumerGroupId("push_group_id_2") + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer()); + consumers.add( + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("consumer_id_5") + .consumerGroupId("push_group_id_2") + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest2.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer()); + consumers.add( + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("consumer_id_6") + .consumerGroupId("push_group_id_2") + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer()); + consumers.add( + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("consumer_id_7") + .consumerGroupId("push_group_id_3") + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + final short messageType = message.getMessageType(); + if (SubscriptionMessageType.isValidatedMessageType(messageType)) { + switch (SubscriptionMessageType.valueOf(messageType)) { + case SESSION_DATA_SETS_HANDLER: + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + break; + case TS_FILE_HANDLER: + try { + TsFileReader reader = message.getTsFileHandler().openReader(); + QueryDataSet dataset = + reader.query( + QueryExpression.create( + Collections.singletonList( + new Path(databasePrefix + "0.d_0", "s_0", true)), + null)); + while (dataset.hasNext()) { + rowCount70.addAndGet(1); + RowRecord next = dataset.next(); + // + // System.out.println(databasePrefix+"0.d_0:[rowCount70]" + + // next.getTimestamp() + "," + next.getFields()); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + break; + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer()); + consumers.add( + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("consumer_id_8") + .consumerGroupId("push_group_id_3") + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + try { + TsFileReader reader = message.getTsFileHandler().openReader(); + QueryDataSet dataset = + reader.query( + QueryExpression.create( + Collections.singletonList( + new Path(databasePrefix + "6.d_0", "s_0", true)), + null)); + while (dataset.hasNext()) { + rowCount6.addAndGet(1); + RowRecord next = dataset.next(); + // System.out.println(databasePrefix+6+".d_0:" + + // next.getTimestamp() + "," + next.getFields()); + } + // + // System.out.println("c8,g3,rowCount00="+rowCount6.get()); + } catch (IOException e) { + throw new RuntimeException(e); + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer()); + consumers.add( + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("consumer_id_9") + .consumerGroupId("push_group_id_3") + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + final short messageType = message.getMessageType(); + if (SubscriptionMessageType.isValidatedMessageType(messageType)) { + switch (SubscriptionMessageType.valueOf(messageType)) { + case SESSION_DATA_SETS_HANDLER: + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest2.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + break; + case TS_FILE_HANDLER: + try { + TsFileReader reader = message.getTsFileHandler().openReader(); + QueryDataSet dataset = + reader.query( + QueryExpression.create( + Collections.singletonList( + new Path(databasePrefix + "0.d_0", "s_0", true)), + null)); + while (dataset.hasNext()) { + rowCount90.addAndGet(1); + RowRecord next = dataset.next(); + // + // System.out.println(databasePrefix+"0.d_0:[rowCount90]" + + // next.getTimestamp() + "," + next.getFields()); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + break; + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer()); + + for (int j = 0; j < consumers.size(); j++) { + consumers.get(j).open(); + } + + consumers.get(0).subscribe(topicNamePrefix + 0); + consumers.get(1).subscribe(topicNamePrefix + 0); + consumers.get(2).subscribe(topicNamePrefix + 1); + consumers.get(3).subscribe(topicNamePrefix + 1); + consumers.get(4).subscribe(topicNamePrefix + 2, topicNamePrefix + 3); + consumers.get(5).subscribe(topicNamePrefix + 3, topicNamePrefix + 4); + consumers.get(6).subscribe(topicNamePrefix + 2, topicNamePrefix + 4); + consumers.get(7).subscribe(topicNamePrefix + 0, topicNamePrefix + 3); + consumers.get(8).subscribe(topicNamePrefix + 6); + consumers.get(9).subscribe(topicNamePrefix + 0, topicNamePrefix + 3); + subs.getSubscriptions().forEach((System.out::println)); + + // Write data + Thread thread = + new Thread( + () -> { + long timestamp = 1706659200000L; // 2024-01-31 08:00:00+08:00 + for (int i = 0; i < 20; i++) { + for (int k = 0; k < 10; k++) { + String device = databasePrefix + k + ".d_0"; + try { + insert_data(timestamp, device, 20); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } + } + timestamp += 40000; + } + for (int i = 0; i < 10; i++) { + String device = databasePrefix + i + ".d_0"; + try { + insert_data(System.currentTimeMillis(), device, 5); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } + } + String device = databasePrefix + 2 + ".d_0"; + for (int i = 0; i < 20; i++) { + try { + insert_data(System.currentTimeMillis(), device, 5); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } + } + }); + + thread.start(); + thread.join(); + + System.out.println(databasePrefix + "1.d_0:[src]" + getCount(session_src, sql1)); + System.out.println(databasePrefix + "2.d_0:[src]" + getCount(session_src, sql2)); + System.out.println(databasePrefix + "3.d_0:[src]" + getCount(session_src, sql3)); + System.out.println(databasePrefix + "4.d_0:[src]" + getCount(session_src, sql4)); + + AWAIT.untilAsserted( + () -> { + assertGte(rowCount00.get() + rowCount10.get(), 400, "c0,c1,topic0,tsfile"); + assertEquals( + getCount(session_dest, sql1) + getCount(session_dest2, sql1), + getCount(session_src, sql1), + "c2,c3,topic1,group1"); + assertEquals( + getCount(session_dest, sql2) + getCount(session_dest2, sql2), + getCount(session_src, sql2) - 400, + "c4,c6,topic2,group2"); + final long topic3Total = getCount(session_dest, sql3) + getCount(session_dest2, sql3); + assertTrue(400 <= topic3Total && topic3Total <= 800, "c4,c5|c7,c9|topic3"); + assertEquals( + getCount(session_dest, sql4) + getCount(session_dest2, sql4), + 400, + "c5,c6,topic4,group3"); + assertGte(rowCount70.get() + rowCount90.get(), 400, "c7,c9,topic0,tsfile"); + assertEquals(rowCount6.get(), 5, "c8,topic6,tsfile"); + // assertTrue(rowCount00.get()>0); + // assertTrue(rowCount10.get()>0); + // assertTrue(rowCount70.get()>0); + // assertTrue(rowCount90.get()>0); + }); + } +} +/*** + * Expected result: + * root.test.MultiGroupVsMultiConsumer_1.d_0:[src]405 + * root.test.MultiGroupVsMultiConsumer_1.d_0:[dest]305 + * root.test.MultiGroupVsMultiConsumer_1.d_0:[dest2]100 + * root.test.MultiGroupVsMultiConsumer_2.d_0:[src]505 + * root.test.MultiGroupVsMultiConsumer_2.d_0:[dest]105 + * root.test.MultiGroupVsMultiConsumer_2.d_0:[dest2]0 + * root.test.MultiGroupVsMultiConsumer_3.d_0:[src]405 + * root.test.MultiGroupVsMultiConsumer_3.d_0:[dest]220 + * root.test.MultiGroupVsMultiConsumer_3.d_0:[dest2]300 + * root.test.MultiGroupVsMultiConsumer_4.d_0:[src]405 + * root.test.MultiGroupVsMultiConsumer_4.d_0:[dest]300 + * root.test.MultiGroupVsMultiConsumer_4.d_0:[dest2]100 + * rowCount00.get()=200 + * rowCount10.get()=200 + * rowCount70.get()=240 + * rowCount90.get()=160 + * rowCount6.get()=5 + **/ diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/multi/IoTDBOneConsumerMultiTopicsDatasetIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/multi/IoTDBOneConsumerMultiTopicsDatasetIT.java new file mode 100644 index 0000000000000..9f1b20f30ce97 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/multi/IoTDBOneConsumerMultiTopicsDatasetIT.java @@ -0,0 +1,201 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.multi; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * 1 consumer subscribes to 2 topics: fixed time range + * dataset + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBOneConsumerMultiTopicsDatasetIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.OneConsumerMultiTopicsDataset"; + private static final String database2 = "root.OneConsumerMultiTopicsDataset"; + private static final String device = database + ".d_0"; + private static final String topicName = "topic_OneConsumerMultiTopicsDataset_1"; + private static List schemaList = new ArrayList<>(); + + private String pattern = database + ".**"; + private String pattern2 = database2 + ".**"; + private static final String device2 = database2 + ".d_1"; + private String topicName2 = "topic_OneConsumerMultiTopicsDataset_2"; + private static SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s( + topicName, pattern, "2024-01-01T00:00:00+08:00", "2024-03-01T00:00:00+08:00", false); + createTopic_s(topicName2, pattern2, null, null, false); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.createTimeseries( + device2 + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device2 + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device2 + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device2 + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + subs.dropTopic(topicName2); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L, device); // 2024-01-31 08:00:00+08:00 + insert_data(1707782400000L, device2); // 2024-02-13 + session_src.executeNonQueryStatement( + "insert into " + + device + + "(time,s_0,s_1)values(1710288000000,313,6.78);"); // 2024-03-13 08:00:00+08:00 + // Subscribe + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("root_dataset_consumer") + .consumerGroupId("push_multi") + .ackStrategy(AckStrategy.BEFORE_CONSUME) + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + assertEquals(subs.getSubscriptions().size(), 0, "Before subscription show subscriptions"); + + consumer.subscribe(topicName, topicName2); + subs.getSubscriptions().forEach((System.out::println)); + assertEquals(subs.getSubscriptions().size(), 2, "show subscriptions after subscription"); + + Thread thread = + new Thread( + () -> { + try { + insert_data(System.currentTimeMillis(), device); + insert_data(System.currentTimeMillis(), device2); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + thread.start(); + thread.join(); + String sql = + "select count(s_0) from " + + device + + " where time >= 2024-01-01T00:00:00+08:00 and time <= 2024-03-01T00:00:00+08:00"; + String sql2 = "select count(s_0) from " + device2; + System.out.println(FORMAT.format(new Date()) + " src device:" + getCount(session_src, sql)); + System.out.println(FORMAT.format(new Date()) + " src device:" + getCount(session_src, sql2)); + + AWAIT.untilAsserted( + () -> { + // Consumption data + check_count(5, "select count(s_0) from " + device, "Consumption data:" + pattern); + check_count(10, "select count(s_0) from " + device2, "Consumption data:" + pattern2); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/multi/IoTDBOneConsumerMultiTopicsMixIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/multi/IoTDBOneConsumerMultiTopicsMixIT.java new file mode 100644 index 0000000000000..d6637e0c35153 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/multi/IoTDBOneConsumerMultiTopicsMixIT.java @@ -0,0 +1,246 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.multi; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionMessageType; +import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; +import org.apache.iotdb.subscription.it.Retry; +import org.apache.iotdb.subscription.it.RetryRule; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.read.TsFileReader; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.read.expression.QueryExpression; +import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * 1 consumer subscribes to 2 topics: historical data + * The timing of flush is very critical. If the data inside the filter and the data outside the filter are within one tsfile, they will all be extracted. + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBOneConsumerMultiTopicsMixIT extends AbstractSubscriptionRegressionIT { + + @Rule public RetryRule retryRule = new RetryRule(); + + private static final String database = "root.test.OneConsumerMultiTopicsMix"; + private static final String database2 = "root.OneConsumerMultiTopicsMix"; + private static final String device = database + ".d_0"; + private static final String topicName = "topic_OneConsumerMultiTopicsMix_1"; + private String pattern = database + ".**"; + private String pattern2 = database2 + ".**"; + private String device2 = database2 + ".d_0"; + private String topicName2 = "topic_OneConsumerMultiTopicsMix_2"; + private List schemaList = new ArrayList<>(); + private SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s(topicName, pattern, null, null, false); + createTopic_s(topicName2, pattern2, null, null, true); + session_src.createTimeseries( + device + ".s_0", TSDataType.FLOAT, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.TEXT, TSEncoding.DICTIONARY, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.FLOAT, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.TEXT, TSEncoding.DICTIONARY, CompressionType.LZMA2); + session_src.createTimeseries( + device2 + ".s_0", TSDataType.FLOAT, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device2 + ".s_1", TSDataType.TEXT, TSEncoding.DICTIONARY, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.FLOAT)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.TEXT)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + subs.dropTopic(topicName2); + dropDB(database); + dropDB(database2); + schemaList.clear(); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row + 2.45f); + tablet.addValue("s_1", rowIndex, "rowIndex" + rowIndex); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush"); + } + + @Retry + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + session_src.executeNonQueryStatement( + "insert into " + + device + + "(time,s_0,s_1)values(1710288000000,313,'2024-03-13 08:00:00+08:00');"); // 2024-03-13 + // 08:00:00+08:00 + session_src.executeNonQueryStatement( + "insert into " + + device + + "(time,s_0,s_1)values(1703980800000,133.45,'2023-12-31 08:00:00+08:00');"); // 2023-12-31 08:00:00+08:00 + insert_data(1706659200000L, device); // 2024-01-31 08:00:00+08:00 + insert_data(1706659200000L, device2); // 2024-01-31 08:00:00+08:00 + AtomicInteger rowCount1 = new AtomicInteger(0); + AtomicInteger rowCount2 = new AtomicInteger(0); + // Subscribe + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("db_mix_consumer_2_topic") + .consumerGroupId("OneConsumerMultiTopicsMix") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + final short messageType = message.getMessageType(); + if (SubscriptionMessageType.isValidatedMessageType(messageType)) { + switch (SubscriptionMessageType.valueOf(messageType)) { + case SESSION_DATA_SETS_HANDLER: + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + break; + case TS_FILE_HANDLER: + try { + TsFileReader reader = message.getTsFileHandler().openReader(); + QueryDataSet dataset = + reader.query( + QueryExpression.create( + Collections.singletonList(new Path(device, "s_0", true)), + null)); + while (dataset.hasNext()) { + rowCount1.addAndGet(1); + RowRecord next = dataset.next(); + System.out.println( + device + ":" + next.getTimestamp() + "," + next.getFields()); + } + dataset = + reader.query( + QueryExpression.create( + Collections.singletonList(new Path(device2, "s_0", true)), + null)); + while (dataset.hasNext()) { + rowCount2.addAndGet(1); + RowRecord next = dataset.next(); + System.out.println( + device2 + ":" + next.getTimestamp() + "," + next.getFields()); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + break; + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + consumer.subscribe(topicName, topicName2); + + System.out.println("###### Subscription Query:"); + subs.getSubscriptions().forEach((System.out::println)); + assertEquals(subs.getSubscriptions().size(), 2, "subscribe and show subscriptions"); + // Subscribe and then write data + Thread thread = + new Thread( + () -> { + try { + insert_data(System.currentTimeMillis(), device); + insert_data(System.currentTimeMillis(), device2); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + thread.start(); + thread.join(); + + AWAIT.untilAsserted( + () -> { + assertEquals(rowCount1.get(), 0, "pattern1"); + check_count(12, "select count(s_0) from " + device, "dataset pattern1"); + assertEquals(rowCount2.get(), 10, "pattern2"); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/multi/IoTDBOneConsumerMultiTopicsTsfileIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/multi/IoTDBOneConsumerMultiTopicsTsfileIT.java new file mode 100644 index 0000000000000..35f1409f9fff3 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/multi/IoTDBOneConsumerMultiTopicsTsfileIT.java @@ -0,0 +1,228 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.multi; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant; +import org.apache.iotdb.subscription.it.Retry; +import org.apache.iotdb.subscription.it.RetryRule; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.read.TsFileReader; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.read.expression.QueryExpression; +import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * 1 consumer subscribes to 2 topics: historical data + * The timing of flush is very critical. If the data inside the filter and the data outside the filter are within one tsfile, they will all be extracted. + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBOneConsumerMultiTopicsTsfileIT extends AbstractSubscriptionRegressionIT { + + @Rule public RetryRule retryRule = new RetryRule(); + + private static final String database = "root.test.OneConsumerMultiTopicsTsfile"; + private static final String database2 = "root.OneConsumerMultiTopicsTsfile"; + private static final String device = database + ".d_0"; + private static final String topicName = "topic_OneConsumerMultiTopicsTsfile_1"; + private String pattern = database + ".**"; + private String pattern2 = database2 + ".**"; + private String device2 = database2 + ".d_0"; + private String topicName2 = "topic_OneConsumerMultiTopicsTsfile_2"; + private List schemaList = new ArrayList<>(); + private SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s(topicName, pattern, null, "now", true); + createTopic_s(topicName2, pattern2, null, "now", true); + session_src.createTimeseries( + device + ".s_0", TSDataType.FLOAT, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.TEXT, TSEncoding.DICTIONARY, CompressionType.LZMA2); + session_src.createTimeseries( + device2 + ".s_0", TSDataType.FLOAT, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device2 + ".s_1", TSDataType.TEXT, TSEncoding.DICTIONARY, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.FLOAT)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.TEXT)); + assertTrue(subs.getTopic(topicName).isPresent(), "create show topics"); + } + + @Override + protected void setUpConfig() { + super.setUpConfig(); + + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(sender); + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(receiver1); + IoTDBSubscriptionITConstant.FORCE_SCALABLE_SINGLE_NODE_MODE.accept(receiver2); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + subs.dropTopic(topicName2); + dropDB(database); + dropDB(database2); + schemaList.clear(); + super.tearDown(); + } + + private void insert_data(long timestamp, String device) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row + 2.45f); + tablet.addValue("s_1", rowIndex, "rowIndex" + rowIndex); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush"); + } + + @Test + @Retry + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + session_src.executeNonQueryStatement( + "insert into " + + device + + "(time,s_0,s_1)values(1710288000000,313,'2024-03-13 08:00:00+08:00');"); // 2024-03-13 + // 08:00:00+08:00 + session_src.executeNonQueryStatement( + "insert into " + + device + + "(time,s_0,s_1)values(1703980800000,133.45,'2023-12-31 08:00:00+08:00');"); // 2023-12-31 08:00:00+08:00 + insert_data(1706659200000L, device); // 2024-01-31 08:00:00+08:00 + insert_data(1706659200000L, device2); // 2024-01-31 08:00:00+08:00 + AtomicInteger rowCount1 = new AtomicInteger(0); + AtomicInteger rowCount2 = new AtomicInteger(0); + // Subscribe + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("db_tsfile_consumer_2_topic") + .consumerGroupId("OneConsumerMultiTopicsTsfile") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + try { + TsFileReader reader = message.getTsFileHandler().openReader(); + QueryDataSet dataset = + reader.query( + QueryExpression.create( + Collections.singletonList(new Path(device, "s_0", true)), null)); + while (dataset.hasNext()) { + rowCount1.addAndGet(1); + RowRecord next = dataset.next(); + System.out.println( + device + ":" + next.getTimestamp() + "," + next.getFields()); + } + dataset = + reader.query( + QueryExpression.create( + Collections.singletonList(new Path(device2, "s_0", true)), null)); + while (dataset.hasNext()) { + rowCount2.addAndGet(1); + RowRecord next = dataset.next(); + System.out.println( + device2 + ":" + next.getTimestamp() + "," + next.getFields()); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + consumer.subscribe(topicName, topicName2); + + System.out.println("###### Subscription query:"); + subs.getSubscriptions().forEach((System.out::println)); + assertEquals(subs.getSubscriptions().size(), 2, "subscribe then show subscriptions"); + // Subscribe and then write data + Thread thread = + new Thread( + () -> { + try { + insert_data(System.currentTimeMillis(), device); + insert_data(System.currentTimeMillis(), device2); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + thread.start(); + thread.join(); + + AWAIT.untilAsserted( + () -> { + assertEquals(rowCount1.get(), 7, "pattern1"); + assertEquals(rowCount2.get(), 5, "pattern2"); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBDBPatternDatasetPushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBDBPatternDatasetPushConsumerIT.java new file mode 100644 index 0000000000000..1c841854c6f0d --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBDBPatternDatasetPushConsumerIT.java @@ -0,0 +1,190 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.pattern; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer + * DataSet + * pattern: db + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBDBPatternDatasetPushConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.DBPatternDatasetPushConsumer"; + private static final String database2 = "root.DBPatternDatasetPushConsumer"; + private static final String device = database + ".d_0"; + private static final String device2 = database + ".d_1"; + private static final String topicName = "topic_DBPatternDatasetPushConsumer"; + private static List schemaList = new ArrayList<>(); + + private static String pattern = database + ".**"; + private static SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s(topicName, pattern, null, null, false); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("DB_dataset_consumer") + .consumerGroupId("push_pattern") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target") + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis()); + + AWAIT.untilAsserted( + () -> { + check_count(8, "select count(s_0) from " + device, "Consumption Data: s_0"); + check_count(8, "select count(s_1) from " + device, "Consumption data: s_1"); + check_count(1, "select count(s_0) from " + database + ".d_1", "Consumption data:d_1"); + check_count(0, "select count(s_0) from " + database2 + ".d_2", "Consumption data:d_2"); + }); + + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + // Consumption data: Progress is not retained after canceling and re-subscribing. Full + // synchronization. + AWAIT.untilAsserted( + () -> { + check_count(12, "select count(s_0) from " + device, "consume data again:s_0"); + check_count(12, "select count(s_1) from " + device, "Consumption Data:s_1"); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBDBPatternTsfilePushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBDBPatternTsfilePushConsumerIT.java new file mode 100644 index 0000000000000..3f72c1aef92c6 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBDBPatternTsfilePushConsumerIT.java @@ -0,0 +1,220 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.pattern; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.read.TsFileReader; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.read.expression.QueryExpression; +import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer + * TsFile + * pattern: db + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBDBPatternTsfilePushConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.DBPatternTsfilePushConsumer"; + private static final String database2 = "root.DBPatternTsfilePushConsumer"; + private static final String device = database + ".d_0"; + private static final String device2 = database + ".d_1"; + private static final String topicName = "topic_DBPatternTsfilePushConsumer"; + private static List schemaList = new ArrayList<>(); + + private static String pattern = database + ".**"; + private static SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s(topicName, pattern, null, null, true); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + final AtomicInteger onReceiveCount = new AtomicInteger(0); + final AtomicInteger d0s0_rowCount = new AtomicInteger(0); + final AtomicInteger d0s1_rowCount = new AtomicInteger(0); + final AtomicInteger d1s0_rowCount = new AtomicInteger(0); + final AtomicInteger other_d2_rowCount = new AtomicInteger(0); + List rowCounts = new ArrayList<>(4); + rowCounts.add(d0s0_rowCount); + rowCounts.add(d0s1_rowCount); + rowCounts.add(d1s0_rowCount); + rowCounts.add(other_d2_rowCount); + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("DB_TsFile_consumer") + .consumerGroupId("push_pattern") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + onReceiveCount.incrementAndGet(); + try { + TsFileReader reader = message.getTsFileHandler().openReader(); + Path path_d0s0 = new Path(device, "s_0", true); + Path path_d0s1 = new Path(device, "s_1", true); + Path path_d1s0 = new Path(database + ".d_1", "s_0", true); + Path path_other_d2 = new Path(database2 + ".d_2", "s_0", true); + List paths = new ArrayList<>(4); + paths.add(path_d0s0); + paths.add(path_d0s1); + paths.add(path_d1s0); + paths.add(path_other_d2); + for (int i = 0; i < 4; i++) { + QueryDataSet dataset = + reader.query( + QueryExpression.create( + Collections.singletonList(paths.get(i)), null)); + while (dataset.hasNext()) { + rowCounts.get(i).addAndGet(1); + dataset.next(); + } + } + } catch (IOException e) { + throw new RuntimeException(e); + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis()); + AWAIT.untilAsserted( + () -> { + assertTrue(onReceiveCount.get() >= 2, "receive files over 2"); + assertEquals(rowCounts.get(0).get(), 8, device + ".s_0"); + assertEquals(rowCounts.get(1).get(), 8, device + ".s_1"); + assertEquals(rowCounts.get(2).get(), 1, database + ".d_1.s_0"); + assertEquals(rowCounts.get(3).get(), 0, database2 + ".d_2.s_0"); + }); + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + + AWAIT.untilAsserted( + () -> { + assertTrue(onReceiveCount.get() >= 5, "receive files over 2"); + assertEquals(rowCounts.get(0).get(), 20, device + ".s_0"); + assertEquals(rowCounts.get(1).get(), 20, device + ".s_1"); + assertEquals(rowCounts.get(2).get(), 2, database + ".d_1.s_0"); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBDefaultPatternTsfilePushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBDefaultPatternTsfilePushConsumerIT.java new file mode 100644 index 0000000000000..ef78934dfa9c6 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBDefaultPatternTsfilePushConsumerIT.java @@ -0,0 +1,219 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.pattern; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.read.TsFileReader; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.read.expression.QueryExpression; +import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer + * TsFile + * pattern: root.** + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBDefaultPatternTsfilePushConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.DefaultPatternTsfilePushConsumer"; + private static final String database2 = "root.DefaultPatternTsfilePushConsumer"; + private static final String device = database + ".d_0"; + private static final String device2 = database + ".d_1"; + private static final String topicName = "topic_DefaultPatternTsfilePushConsumer"; + private static List schemaList = new ArrayList<>(); + + private SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s(topicName, null, null, null, true); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + final AtomicInteger onReceiveCount = new AtomicInteger(0); + final AtomicInteger d0s0_rowCount = new AtomicInteger(0); + final AtomicInteger d0s1_rowCount = new AtomicInteger(0); + final AtomicInteger d1s0_rowCount = new AtomicInteger(0); + final AtomicInteger other_d2_rowCount = new AtomicInteger(0); + List rowCounts = new ArrayList<>(4); + rowCounts.add(d0s0_rowCount); + rowCounts.add(d0s1_rowCount); + rowCounts.add(d1s0_rowCount); + rowCounts.add(other_d2_rowCount); + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("default_pattern_TsFile_consumer") + .consumerGroupId("push_pattern") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + onReceiveCount.incrementAndGet(); + try { + TsFileReader reader = message.getTsFileHandler().openReader(); + Path path_d0s0 = new Path(device, "s_0", true); + Path path_d0s1 = new Path(device, "s_1", true); + Path path_d1s0 = new Path(database + ".d_1", "s_0", true); + Path path_other_d2 = new Path(database2 + ".d_2", "s_0", true); + List paths = new ArrayList<>(4); + paths.add(path_d0s0); + paths.add(path_d0s1); + paths.add(path_d1s0); + paths.add(path_other_d2); + for (int i = 0; i < 4; i++) { + QueryDataSet dataset = + reader.query( + QueryExpression.create( + Collections.singletonList(paths.get(i)), null)); + while (dataset.hasNext()) { + rowCounts.get(i).addAndGet(1); + dataset.next(); + } + } + } catch (IOException e) { + throw new RuntimeException(e); + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis()); + AWAIT.untilAsserted( + () -> { + assertEquals(rowCounts.get(0).get(), 8, device + ".s_0"); + assertEquals(rowCounts.get(1).get(), 8, device + ".s_1"); + assertEquals(rowCounts.get(2).get(), 1, database + ".d_1.s_0"); + assertEquals(rowCounts.get(3).get(), 1, database2 + ".d_2.s_0"); + }); + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + + // Unsubscribe, and it will consume all again. + AWAIT.untilAsserted( + () -> { + assertEquals(rowCounts.get(0).get(), 20, device + ".s_0"); + assertEquals(rowCounts.get(1).get(), 20, device + ".s_1"); + assertEquals(rowCounts.get(2).get(), 2, database + "d_1.s_0"); + assertEquals(rowCounts.get(3).get(), 2, database2 + ".d_2.s_0"); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBDevicePatternDatasetPushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBDevicePatternDatasetPushConsumerIT.java new file mode 100644 index 0000000000000..16b18b13cff7d --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBDevicePatternDatasetPushConsumerIT.java @@ -0,0 +1,189 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.pattern; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer + * DataSet + * pattern: device + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBDevicePatternDatasetPushConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.DevicePatternDatasetPushConsumer"; + private static final String database2 = "root.DevicePatternDatasetPushConsumer"; + private static final String device = database + ".d_0"; + private static final String device2 = database + ".d_1"; + private static final String topicName = "topic_DevicePatternDatasetPushConsumer"; + private static List schemaList = new ArrayList<>(); + + private static String pattern = database + ".d_0.**"; + private static SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s(topicName, pattern, null, null, false); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("device_dataset_consumer") + .consumerGroupId("push_pattern") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis()); + + AWAIT.untilAsserted( + () -> { + check_count(8, "select count(s_0) from " + device, "Consumption data:" + pattern); + check_count(8, "select count(s_1) from " + device, "Consumption data: s_1"); + check_count(0, "select count(s_0) from " + database + ".d_1", "Consumption Data: d_1"); + check_count(0, "select count(s_0) from " + database2 + ".d_2", "Consumption data:d_2"); + }); + + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + // Consumption data: Progress is not retained when re-subscribing after cancellation. Full + // synchronization. + AWAIT.untilAsserted( + () -> { + check_count(12, "select count(s_0) from " + device, "Consume data again:" + pattern); + check_count(12, "select count(s_1) from " + device, "Consumption data: s_1"); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBDevicePatternTsfilePushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBDevicePatternTsfilePushConsumerIT.java new file mode 100644 index 0000000000000..3ba71c40ce298 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBDevicePatternTsfilePushConsumerIT.java @@ -0,0 +1,232 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.pattern; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.read.TsFileReader; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.read.expression.QueryExpression; +import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer + * TsFile + * pattern: device + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBDevicePatternTsfilePushConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.DevicePatternTsfilePushConsumer"; + private static final String database2 = "root.DevicePatternTsfilePushConsumer"; + private static final String device = database + ".d_0"; + private static final String topicName = "topic_DevicePatternTsfilePushConsumer"; + private static List schemaList = new ArrayList<>(); + + private static String pattern = device + ".**"; + private SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s(topicName, pattern, null, null, true); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, (1 + row) * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + String sql = "select count(s_0) from " + device; + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + final AtomicInteger onReceiveCount = new AtomicInteger(0); + final AtomicInteger d0s0_rowCount = new AtomicInteger(0); + final AtomicInteger d0s1_rowCount = new AtomicInteger(0); + final AtomicInteger d1s0_rowCount = new AtomicInteger(0); + final AtomicInteger other_d2_rowCount = new AtomicInteger(0); + List rowCounts = new ArrayList<>(4); + rowCounts.add(d0s0_rowCount); + rowCounts.add(d0s1_rowCount); + rowCounts.add(d1s0_rowCount); + rowCounts.add(other_d2_rowCount); + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("device_TsFile_consumer") + .consumerGroupId("push_pattern") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + onReceiveCount.incrementAndGet(); + try { + TsFileReader reader = message.getTsFileHandler().openReader(); + Path path_d0s0 = new Path(device, "s_0", true); + Path path_d0s1 = new Path(device, "s_1", true); + Path path_d1s0 = new Path(database + ".d_1", "s_0", true); + Path path_other_d2 = new Path(database2 + ".d_2", "s_0", true); + List paths = new ArrayList<>(4); + paths.add(path_d0s0); + paths.add(path_d0s1); + paths.add(path_d1s0); + paths.add(path_other_d2); + for (int i = 0; i < 4; i++) { + QueryDataSet dataset = + reader.query( + QueryExpression.create( + Collections.singletonList(paths.get(i)), null)); + while (dataset.hasNext()) { + rowCounts.get(i).addAndGet(1); + RowRecord next = dataset.next(); + System.out.println( + FORMAT.format(new Date()) + + ", " + + i + + ", " + + next.getTimestamp() + + "," + + next.getFields()); + } + } + System.out.println("onReceiveCount=" + onReceiveCount.get()); + } catch (IOException e) { + throw new RuntimeException(e); + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis()); + System.out.println(FORMAT.format(new Date()) + " src:" + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertEquals(rowCounts.get(0).get(), 10, device + ".s_0"); + assertEquals(rowCounts.get(1).get(), 10, device + ".s_1"); + assertEquals(rowCounts.get(2).get(), 0, database + ".d_1.s_0"); + assertEquals(rowCounts.get(3).get(), 0, database2 + ".d_2.s_0"); + }); + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + System.out.println(FORMAT.format(new Date()) + " src:" + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertEquals(rowCounts.get(0).get(), 25, device + ".s_0"); + assertEquals(rowCounts.get(1).get(), 25, device + ".s_1"); + assertEquals(rowCounts.get(2).get(), 0, database + ".d_1.s_0"); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBRootPatternDatasetPushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBRootPatternDatasetPushConsumerIT.java new file mode 100644 index 0000000000000..befca5b65f44b --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBRootPatternDatasetPushConsumerIT.java @@ -0,0 +1,190 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.pattern; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer + * DataSet + * pattern: db + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBRootPatternDatasetPushConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.RootPatternDatasetPushConsumer"; + private static final String database2 = "root.RootPatternDatasetPushConsumer"; + private static final String device = database + ".d_0"; + private static final String device2 = database + ".d_1"; + private static final String topicName = "topic_RootPatternDatasetPushConsumer"; + private static List schemaList = new ArrayList<>(); + + private static final String pattern = "root.**"; + private static SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s(topicName, pattern, null, null, false); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("root_dataset_consumer") + .consumerGroupId("push_pattern") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis()); + + AWAIT.untilAsserted( + () -> { + check_count(10, "select count(s_0) from " + device, "Consumption data:" + pattern); + check_count(10, "select count(s_1) from " + device, "Consumption Data: s_1"); + check_count(1, "select count(s_0) from " + database + ".d_1", "Consumption data:d_1"); + check_count(1, "select count(s_0) from " + database2 + ".d_2", "Consumption data:d_2"); + }); + + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + // Consumption data: Progress is not preserved when re-subscribing after cancellation. Full + // synchronization. + AWAIT.untilAsserted( + () -> { + check_count(15, "select count(s_0) from " + device, "Consume data again:" + pattern); + check_count(15, "select count(s_1) from " + device, "Consumption Data: s_1"); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBTSPatternDatasetPushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBTSPatternDatasetPushConsumerIT.java new file mode 100644 index 0000000000000..7e681d137220e --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBTSPatternDatasetPushConsumerIT.java @@ -0,0 +1,189 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.pattern; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer + * DataSet + * pattern: ts + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBTSPatternDatasetPushConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.TSPatternDatasetPushConsumer"; + private static final String database2 = "root.TSPatternDatasetPushConsumer"; + private static final String device = database + ".d_0"; + private static final String topicName = "topic_TSPatternDatasetPushConsumer"; + private static List schemaList = new ArrayList<>(); + + private static String pattern = database + ".d_0.s_0"; + private static SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s(topicName, pattern, null, null, false); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("ts_dataset_consumer") + .consumerGroupId("push_pattern") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target") + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + session_dest.insertTablet(dataSet.getTablet()); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis()); + + AWAIT.untilAsserted( + () -> { + check_count(8, "select count(s_0) from " + device, "Consumption data:" + pattern); + check_count(0, "select count(s_1) from " + device, "Consumption data: s_1"); + check_count(0, "select count(s_0) from " + database + ".d_1", "Consumption Data: d_1"); + check_count(0, "select count(s_0) from " + database2 + ".d_2", "Consumption data:d_2"); + }); + + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + // Consumption data: Progress is not retained after unsubscribing and re-subscribing. Full + // synchronization. + AWAIT.untilAsserted( + () -> { + check_count(12, "select count(s_0) from " + device, "Consume data again:" + pattern); + check_count(0, "select count(s_1) from " + device, "Consumption Data: s_1"); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBTSPatternTsfilePushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBTSPatternTsfilePushConsumerIT.java new file mode 100644 index 0000000000000..705310a1baa65 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/pattern/IoTDBTSPatternTsfilePushConsumerIT.java @@ -0,0 +1,226 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.pattern; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.read.TsFileReader; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.read.expression.QueryExpression; +import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * PushConsumer + * TsFile + * pattern: ts + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBTSPatternTsfilePushConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.TSPatternTsfilePushConsumer"; + private static final String database2 = "root.TSPatternTsfilePushConsumer"; + private static final String device = database + ".d_0"; + private static final String topicName = "topic_TSPatternTsfilePushConsumer"; + private static List schemaList = new ArrayList<>(); + + private static final String pattern = device + ".s_0"; + private static SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createDB(database2); + createTopic_s(topicName, pattern, null, null, true); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database + ".d_1(s_0 int64,s_1 double);"); + session_src.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_dest.executeNonQueryStatement( + "create aligned timeseries " + database2 + ".d_2(s_0 int32,s_1 float);"); + session_src.executeNonQueryStatement( + "insert into " + database2 + ".d_2(time,s_0,s_1)values(1000,132,4567.89);"); + session_src.executeNonQueryStatement( + "insert into " + database + ".d_1(time,s_0,s_1)values(2000,232,567.891);"); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + dropDB(database2); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + final AtomicInteger onReceiveCount = new AtomicInteger(0); + final AtomicInteger d0s0_rowCount = new AtomicInteger(0); + final AtomicInteger d0s1_rowCount = new AtomicInteger(0); + final AtomicInteger d1s0_rowCount = new AtomicInteger(0); + final AtomicInteger other_d2_rowCount = new AtomicInteger(0); + List rowCounts = new ArrayList<>(4); + rowCounts.add(d0s0_rowCount); + rowCounts.add(d0s1_rowCount); + rowCounts.add(d1s0_rowCount); + rowCounts.add(other_d2_rowCount); + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("ts_TsFile_consumer") + .consumerGroupId("push_pattern") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + onReceiveCount.incrementAndGet(); + System.out.println("onReceiveCount=" + onReceiveCount.get()); + try { + TsFileReader reader = message.getTsFileHandler().openReader(); + Path path_d0s0 = new Path(device, "s_0", true); + Path path_d0s1 = new Path(device, "s_1", true); + Path path_d1s0 = new Path(database + ".d_1", "s_0", true); + Path path_other_d2 = new Path(database2 + ".d_2", "s_0", true); + List paths = new ArrayList<>(4); + paths.add(path_d0s0); + paths.add(path_d0s1); + paths.add(path_d1s0); + paths.add(path_other_d2); + for (int i = 0; i < 4; i++) { + QueryDataSet dataset = + reader.query( + QueryExpression.create( + Collections.singletonList(paths.get(i)), null)); + while (dataset.hasNext()) { + rowCounts.get(i).addAndGet(1); + dataset.next(); + } + System.out.println("rowCounts_" + i + ":" + rowCounts.get(i).get()); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis()); + String sql = "select count(s_0) from " + device; + System.out.println(FORMAT.format(new Date()) + " src:" + getCount(session_src, sql)); + + AWAIT.untilAsserted( + () -> { + assertGte(onReceiveCount.get(), 1, "receive files over 1"); + assertEquals(rowCounts.get(0).get(), 10, device + ".s_0"); + assertEquals(rowCounts.get(1).get(), 0, device + ".s_1"); + assertEquals(rowCounts.get(2).get(), 0, database + ".d_1.s_0"); + assertEquals(rowCounts.get(3).get(), 0, database2 + ".d_2.s_0"); + }); + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + + System.out.println(FORMAT.format(new Date()) + " src:" + getCount(session_src, sql)); + AWAIT.untilAsserted( + () -> { + assertGte(onReceiveCount.get(), 2, "receive files over 2"); + assertEquals(rowCounts.get(0).get(), 25, device + ".s_0"); + assertEquals(rowCounts.get(1).get(), 0, device + ".s_1"); + assertEquals(rowCounts.get(2).get(), 0, database + ".d_1.s_0"); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/time/IoTDBHistoryRootDatasetPushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/time/IoTDBHistoryRootDatasetPushConsumerIT.java new file mode 100644 index 0000000000000..15ff9a2e67ba5 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/time/IoTDBHistoryRootDatasetPushConsumerIT.java @@ -0,0 +1,174 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.time; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * AFTER + * pattern: root + * dataset + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBHistoryRootDatasetPushConsumerIT extends AbstractSubscriptionRegressionIT { + private String database = "root.HistoryRootDatasetPushConsumer"; + private String device = database + ".d_0"; + private String pattern = "root.**"; + private String topicName = "topic_HistoryRootDatasetPushConsumer"; + private List schemaList = new ArrayList<>(); + private SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s(topicName, pattern, null, "now", false); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException, InterruptedException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + Thread.sleep(5000); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("root_history_dataset") + .consumerGroupId("push_time") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target") + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + Tablet tablet = dataSet.getTablet(); + session_dest.insertTablet(tablet); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach((System.out::println)); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis()); + + AWAIT.untilAsserted( + () -> { + check_count(5, "select count(s_0) from " + device, "Consumption data:" + pattern); + check_count(5, "select count(s_1) from " + device, "Consumption Data: s_1"); + }); + // Unsubscribe + consumer.unsubscribe(topicName); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + + AWAIT.untilAsserted( + () -> { + // Consumption data: Progress is not retained after unsubscribing and re-subscribing. Full + // synchronization. + check_count(10, "select count(s_0) from " + device, "Consume data again:" + pattern); + check_count(10, "select count(s_1) from " + device, "Consumption data: s_1"); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/time/IoTDBHistoryRootTsFilePushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/time/IoTDBHistoryRootTsFilePushConsumerIT.java new file mode 100644 index 0000000000000..66ed34827dc72 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/time/IoTDBHistoryRootTsFilePushConsumerIT.java @@ -0,0 +1,177 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.time; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.read.TsFileReader; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.read.expression.QueryExpression; +import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * Tsfile + * end_time: now + * pattern: root + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBHistoryRootTsFilePushConsumerIT extends AbstractSubscriptionRegressionIT { + private String database = "root.HistoryRootTsFilePushConsumer"; + private String device = database + ".d_0"; + private String pattern = "root.**"; + private String topicName = "topic_HistoryRootTsFilePushConsumer"; + private List schemaList = new ArrayList<>(); + private SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s(topicName, pattern, null, "2024-02-13T07:59:59+08:00", true); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + final AtomicInteger rowCount = new AtomicInteger(0); + final AtomicInteger onReceive = new AtomicInteger(0); + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerGroupId("push_time") + .consumerId("root_history_tsfile") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + try { + onReceive.addAndGet(1); + TsFileReader reader = message.getTsFileHandler().openReader(); + List paths = new ArrayList<>(2); + for (int i = 0; i < 2; i++) { + paths.add(new Path(device, "s_" + i, true)); + } + QueryDataSet dataset = reader.query(QueryExpression.create(paths, null)); + while (dataset.hasNext()) { + rowCount.addAndGet(1); + RowRecord next = dataset.next(); + System.out.println(next.getTimestamp() + "," + next.getFields()); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach((System.out::println)); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + insert_data(System.currentTimeMillis()); + + AWAIT.untilAsserted( + () -> { + assertEquals(rowCount.get(), 4, "4 records"); + }); + + System.out.println("insert 2024-02-13 08:00:00+08:00"); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + AWAIT.untilAsserted( + () -> { + assertEquals(rowCount.get(), 4, "4 records"); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/time/IoTDBRealTimeDBDatasetPushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/time/IoTDBRealTimeDBDatasetPushConsumerIT.java new file mode 100644 index 0000000000000..ac098771ca282 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/time/IoTDBRealTimeDBDatasetPushConsumerIT.java @@ -0,0 +1,163 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.time; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBRealTimeDBDatasetPushConsumerIT extends AbstractSubscriptionRegressionIT { + private String database = "root.RealTimeDBDatasetPushConsumer"; + private String device = database + ".d_0"; + private String pattern = database + ".**"; + private String topicName = "topic_RealTimeDBDatasetPushConsumer"; + private List schemaList = new ArrayList<>(); + private SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s(topicName, pattern, "now", null, false); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("DB_realtime_dataset") + .consumerGroupId("push_time") + .ackStrategy(AckStrategy.BEFORE_CONSUME) + .fileSaveDir("target") + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + Tablet tablet = dataSet.getTablet(); + session_dest.insertTablet(tablet); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach((System.out::println)); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + AWAIT.untilAsserted( + () -> { + check_count(0, "select count(s_0) from " + device, "Consumption data:" + pattern); + }); + insert_data(System.currentTimeMillis()); + AWAIT.untilAsserted( + () -> { + check_count(4, "select count(s_0) from " + device, "Consumption data:" + pattern); + }); + + // Subscribe and then write data + insert_data(System.currentTimeMillis() + 200000); // now + + AWAIT.untilAsserted( + () -> { + check_count(8, "select count(s_0) from " + device, "Consume data again:" + pattern); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/time/IoTDBRealTimeDBTsfilePushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/time/IoTDBRealTimeDBTsfilePushConsumerIT.java new file mode 100644 index 0000000000000..c6b9420f13b2b --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/time/IoTDBRealTimeDBTsfilePushConsumerIT.java @@ -0,0 +1,179 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.time; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.read.TsFileReader; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.read.expression.QueryExpression; +import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBRealTimeDBTsfilePushConsumerIT extends AbstractSubscriptionRegressionIT { + private String database = "root.RealTimeDBTsfilePushConsumer"; + private String device = database + ".d_0"; + private String pattern = database + ".**"; + private String topicName = "topic_RealTimeDBTsfilePushConsumer"; + private List schemaList = new ArrayList<>(); + private SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s(topicName, pattern, "2024-01-31T08:02:00+08:00", null, true); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + final AtomicInteger rowCount = new AtomicInteger(0); + final AtomicInteger onReceive = new AtomicInteger(0); + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("DB_realtime_tsfile") + .consumerGroupId("push_time") + .ackStrategy(AckStrategy.BEFORE_CONSUME) + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + try { + onReceive.addAndGet(1); + TsFileReader reader = message.getTsFileHandler().openReader(); + List paths = new ArrayList<>(2); + for (int i = 0; i < 2; i++) { + paths.add(new Path(device, "s_" + i, true)); + } + QueryDataSet dataset = reader.query(QueryExpression.create(paths, null)); + while (dataset.hasNext()) { + rowCount.addAndGet(1); + RowRecord next = dataset.next(); + System.out.println(next.getTimestamp() + "," + next.getFields()); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach((System.out::println)); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + AWAIT.untilAsserted( + () -> { + assertEquals(onReceive.get(), 0); + assertEquals(rowCount.get(), 0); + }); + insert_data(System.currentTimeMillis()); + AWAIT.untilAsserted( + () -> { + assertGte(onReceive.get(), 1, "should process 1 file"); + assertGte(rowCount.get(), 4, "4 records"); + }); + + // Subscribe and then write data + insert_data(System.currentTimeMillis() + 200000); // now + + AWAIT.untilAsserted( + () -> { + assertGte(onReceive.get(), 2, "should process 2 file"); + assertGte(rowCount.get(), 8, "8 records"); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/time/IoTDBTimeRangeAccurateDBDataSetPushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/time/IoTDBTimeRangeAccurateDBDataSetPushConsumerIT.java new file mode 100644 index 0000000000000..46418ec94f0c5 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/time/IoTDBTimeRangeAccurateDBDataSetPushConsumerIT.java @@ -0,0 +1,179 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.time; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBTimeRangeAccurateDBDataSetPushConsumerIT + extends AbstractSubscriptionRegressionIT { + private String database = "root.TimeRangeAccurateDBDataSetPushConsumer"; + private String device = database + ".d_0"; + private String pattern = database + ".**"; + private String topicName = "topic_TimeRangeAccurateDBDataSetPushConsumer"; + private List schemaList = new ArrayList<>(); + private SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s( + topicName, pattern, "2024-01-01T00:00:00+08:00", "2024-03-31T23:59:59+08:00", false); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1704038396000L); // 2023-12-31 23:59:56+08:00 + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("DB_time_accurate_range_dataset") + .consumerGroupId("push_time") + .ackStrategy(AckStrategy.BEFORE_CONSUME) + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + Tablet tablet = dataSet.getTablet(); + session_dest.insertTablet(tablet); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + AWAIT.untilAsserted( + () -> { + // Before consumption subscription data + check_count(2, "select count(s_0) from " + device, "Start time boundary data:" + pattern); + }); + + insert_data(System.currentTimeMillis()); // now + AWAIT.untilAsserted( + () -> { + check_count( + 2, + "select count(s_0) from " + device, + "After writing some real-time data:" + pattern); + }); + + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + AWAIT.untilAsserted( + () -> { + check_count( + 6, "select count(s_0) from " + device, "Data within the time range:" + pattern); + }); + + insert_data(1711814398000L); // 2024-03-30 23:59:58+08:00 + AWAIT.untilAsserted( + () -> { + check_count(10, "select count(s_0) from " + device, "End time limit data:" + pattern); + }); + insert_data(1711900798000L); // 2024-03-31 23:59:58+08:00 + AWAIT.untilAsserted( + () -> { + check_count(11, "select count(s_0) from " + device, "End time limit data 2:" + pattern); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/time/IoTDBTimeRangeDBDataSetPushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/time/IoTDBTimeRangeDBDataSetPushConsumerIT.java new file mode 100644 index 0000000000000..f7c49a036568c --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/time/IoTDBTimeRangeDBDataSetPushConsumerIT.java @@ -0,0 +1,180 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.time; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * Start time, end time are both closed intervals. If not specified, the time will be 00:00:00. + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBTimeRangeDBDataSetPushConsumerIT extends AbstractSubscriptionRegressionIT { + private String database = "root.TimeRangeDBDataSetPushConsumer"; + private String device = database + ".d_0"; + private String pattern = database + ".**"; + private String topicName = "topic_TimeRangeDBDataSetPushConsumer"; + private List schemaList = new ArrayList<>(); + private SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s( + topicName, pattern, "2024-01-01T00:00:00+08:00", "2024-03-31T00:00:00+08:00", false); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1704038396000L); // 2023-12-31 23:59:56+08:00 + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("DB_time_range_dataset") + .consumerGroupId("push_time") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .consumeListener( + message -> { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + try { + Tablet tablet = dataSet.getTablet(); + session_dest.insertTablet(tablet); + } catch (StatementExecutionException e) { + throw new RuntimeException(e); + } catch (IoTDBConnectionException e) { + throw new RuntimeException(e); + } + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + AWAIT.untilAsserted( + () -> { + check_count(2, "select count(s_0) from " + device, "Start time boundary data:" + pattern); + }); + + insert_data(System.currentTimeMillis()); // now + AWAIT.untilAsserted( + () -> { + check_count( + 2, "select count(s_0) from " + device, "Write some real-time data after:" + pattern); + }); + + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + AWAIT.untilAsserted( + () -> { + check_count( + 6, "select count(s_0) from " + device, "Data within the time range:" + pattern); + }); + + insert_data(1711814398000L); // 2024-03-30 23:59:58+08:00 + AWAIT.untilAsserted( + () -> { + // Because the end time is 2024-03-31 00:00:00, closed interval + check_count(8, "select count(s_0) from " + device, "End time limit data:" + pattern); + }); + + insert_data(1711900798000L); // 2024-03-31 23:59:58+08:00 + AWAIT.untilAsserted( + () -> { + check_count(8, "select count(s_0) from " + device, "End time limit data:" + pattern); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/time/IoTDBTimeRangeDBTsfilePushConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/time/IoTDBTimeRangeDBTsfilePushConsumerIT.java new file mode 100644 index 0000000000000..2a77ca0256a25 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/pushconsumer/time/IoTDBTimeRangeDBTsfilePushConsumerIT.java @@ -0,0 +1,196 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.pushconsumer.time; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionConsumer; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.AckStrategy; +import org.apache.iotdb.session.subscription.consumer.ConsumeResult; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPushConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.read.TsFileReader; +import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.read.expression.QueryExpression; +import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant.AWAIT; + +/*** + * Start time, end time are both closed intervals. If not specified, the time will be 00:00:00. + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionConsumer.class}) +public class IoTDBTimeRangeDBTsfilePushConsumerIT extends AbstractSubscriptionRegressionIT { + private String database = "root.TimeRangeDBTsfilePushConsumer"; + private String device = database + ".d_0"; + private String pattern = database + ".**"; + private String topicName = "topic_TimeRangeDBTsfilePushConsumer"; + private List schemaList = new ArrayList<>(); + private SubscriptionPushConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s( + topicName, pattern, "2024-01-01T00:00:00+08:00", "2024-03-31T00:00:00+08:00", true); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + session_src.executeNonQueryStatement("flush;"); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + final AtomicInteger rowCount = new AtomicInteger(0); + final AtomicInteger onReceive = new AtomicInteger(0); + // Write data before subscribing + insert_data(1704038396000L); // 2023-12-31 23:59:56+08:00 + consumer = + new SubscriptionPushConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .consumerId("db_time_range_accurate_tsfile") + .consumerGroupId("push_time") + .ackStrategy(AckStrategy.AFTER_CONSUME) + .fileSaveDir("target/push-subscription") + .consumeListener( + message -> { + try { + onReceive.addAndGet(1); + TsFileReader reader = message.getTsFileHandler().openReader(); + List paths = new ArrayList<>(2); + for (int i = 0; i < 2; i++) { + paths.add(new Path(device, "s_" + i, true)); + } + QueryDataSet dataset = reader.query(QueryExpression.create(paths, null)); + while (dataset.hasNext()) { + rowCount.addAndGet(1); + RowRecord next = dataset.next(); + System.out.println(next.getTimestamp() + "," + next.getFields()); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + return ConsumeResult.SUCCESS; + }) + .buildPushConsumer(); + consumer.open(); + // Subscribe + consumer.subscribe(topicName); + subs.getSubscriptions().forEach(System.out::println); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + + AWAIT.untilAsserted( + () -> { + assertGte(onReceive.get(), 1); + assertGte(rowCount.get(), 2); + }); + + insert_data(System.currentTimeMillis()); // now, not in range + AWAIT.untilAsserted( + () -> { + assertGte(onReceive.get(), 1); + assertGte(rowCount.get(), 2); + }); + + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + AWAIT.untilAsserted( + () -> { + assertGte(onReceive.get(), 2); + assertGte(rowCount.get(), 6); + }); + + insert_data(1711814398000L); // 2024-03-30 23:59:58+08:00 + AWAIT.untilAsserted( + () -> { + assertGte(onReceive.get(), 3); + assertGte(rowCount.get(), 8); + }); + + insert_data(1711900798000L); // 2024-03-31 23:59:58+08:00 + AWAIT.untilAsserted( + () -> { + assertGte(onReceive.get(), 3); + assertGte(rowCount.get(), 8); + }); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/topic/IoTDBDataSet1TopicConsumerSpecialIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/topic/IoTDBDataSet1TopicConsumerSpecialIT.java new file mode 100644 index 0000000000000..c4917bf027c61 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/topic/IoTDBDataSet1TopicConsumerSpecialIT.java @@ -0,0 +1,141 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.topic; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionMisc; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/*** + * Sequence-level topic, with start, end, tsfile + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionMisc.class}) +public class IoTDBDataSet1TopicConsumerSpecialIT extends AbstractSubscriptionRegressionIT { + private String database = "root.test.ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz1"; + private String device = database + ".`#01`"; + private String pattern = device + ".`ABH#01`"; + private String topicName = "topic_DataSet1TopicConsumerSpecial"; + private List schemaList = new ArrayList<>(); + private SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s( + topicName, pattern, "2024-01-01T00:00:00+08:00", "2024-03-01T00:00:00+08:00", false); + session_src.createTimeseries( + pattern, TSDataType.INT32, TSEncoding.GORILLA, CompressionType.SNAPPY); + session_src.createTimeseries( + device + ".`BJ-ABH#01`", TSDataType.BOOLEAN, TSEncoding.RLE, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("`ABH#01`", TSDataType.INT32)); + schemaList.add(new MeasurementSchema("`BJ-ABH#01`", TSDataType.BOOLEAN)); + System.out.println("topics:" + subs.getTopics()); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException { + Tablet tablet = new Tablet(device, schemaList, 10); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("`ABH#01`", rowIndex, row * 20 + row); + tablet.addValue("`BJ-ABH#01`", rowIndex, true); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + session_src.executeNonQueryStatement( + "insert into " + + device + + "(time,`ABH#01`,`BJ-ABH#01`)values(1710288000000,313,false);"); // 2024-03-13 + // 08:00:00+08:00 + // Subscribe + consumer = create_pull_consumer("`Group-ABH#01`", "`ABH#01`", false, 0L); + assertEquals(subs.getSubscriptions().size(), 0, "Before subscribing, show subscriptions"); + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + // Consumption data + consume_data(consumer, session_dest); + check_count(4, "select count(`ABH#01`) from " + device, "Consumption data:" + pattern); + // Unsubscribe + consumer.unsubscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 0, "After cancellation, show subscriptions"); + // Subscribe and then write data + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + session_src.executeNonQueryStatement( + "insert into " + + device + + "(time,`ABH#01`,`BJ-ABH#01`)values(1703980800000,1231,false);"); // 2023-12-31 + // 08:00:00+08:00 + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + // Consumption data + consume_data(consumer, session_dest); + check_count(8, "select count(`ABH#01`) from " + device, "consume data again:" + pattern); + // Unsubscribe + consumer.unsubscribe(topicName); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/topic/IoTDBTestTopicNameIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/topic/IoTDBTestTopicNameIT.java new file mode 100644 index 0000000000000..f33dc39c7e9b5 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/topic/IoTDBTestTopicNameIT.java @@ -0,0 +1,138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.topic; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionMisc; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/*** + * Special topic name + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionMisc.class}) +public class IoTDBTestTopicNameIT extends AbstractSubscriptionRegressionIT { + private String database = "root.TestTopicName"; + private String device = database + ".d_0"; + private String pattern = device + ".s_0"; + private String topicName = "`1-group.1-consumer.ts.topic`"; + private List schemaList = new ArrayList<>(); + private SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s(topicName, pattern, null, null, false); + session_src.createTimeseries( + pattern, TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + session_dest.createTimeseries( + pattern, TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZMA2); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException, InterruptedException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += 2000; + } + session_src.insertTablet(tablet); + Thread.sleep(1000); + } + + @Test + public void do_test() + throws InterruptedException, + TException, + IoTDBConnectionException, + IOException, + StatementExecutionException { + consumer = create_pull_consumer("g1", "c1", false, null); + // Write data before subscribing + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + Thread.sleep(1000); + insert_data(System.currentTimeMillis()); + // Consumption data + consume_data(consumer, session_dest); + check_count(10, "select count(s_0) from " + device, "Consumption data:" + pattern); + check_count(0, "select count(s_1) from " + device, "Consumption data: s_1"); + // Unsubscribe + consumer.unsubscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 0, "Show subscriptions after unsubscribe"); + // Subscribe and then write data + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after re-subscribing"); + Thread.sleep(1000); + insert_data(1707782400000L); // 2024-02-13 08:00:00+08:00 + // Consumption data: Progress is not preserved when re-subscribing after cancellation. Full + // synchronization. + consume_data(consumer, session_dest); + check_count(15, "select count(s_0) from " + device, "Consume data again:" + pattern); + check_count(0, "select count(s_1) from " + device, "Consumption data: s_1"); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/user/IoTDBOtherUserConsumerIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/user/IoTDBOtherUserConsumerIT.java new file mode 100644 index 0000000000000..68434836dbd72 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/regression/user/IoTDBOtherUserConsumerIT.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.triple.regression.user; + +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2SubscriptionRegressionMisc; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.subscription.it.triple.regression.AbstractSubscriptionRegressionIT; + +import org.apache.thrift.TException; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.enums.CompressionType; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/*** + * Permission Test: Username currently only serves for connection, no permissions defined. + */ +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2SubscriptionRegressionMisc.class}) +public class IoTDBOtherUserConsumerIT extends AbstractSubscriptionRegressionIT { + private static final String database = "root.test.OtherUserConsumer"; + private static final String device = database + ".d_0"; + private static final String topicName = "topic_OtherUserConsumer"; + private static List schemaList = new ArrayList<>(); + private static final String pattern = "root.**"; + private static final String userName = "other_user"; + private static final String passwd = "other_user"; + private static SubscriptionPullConsumer consumer; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + createDB(database); + createTopic_s(topicName, pattern, null, null, false); + session_src.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_src.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_0", TSDataType.INT64, TSEncoding.GORILLA, CompressionType.LZ4); + session_dest.createTimeseries( + device + ".s_1", TSDataType.DOUBLE, TSEncoding.TS_2DIFF, CompressionType.LZ4); + schemaList.add(new MeasurementSchema("s_0", TSDataType.INT64)); + schemaList.add(new MeasurementSchema("s_1", TSDataType.DOUBLE)); + subs.getTopics().forEach((System.out::println)); + assertTrue(subs.getTopic(topicName).isPresent(), "Create show topics"); + } + + @Override + @After + public void tearDown() throws Exception { + session_src.executeNonQueryStatement("drop user " + userName); + try { + consumer.close(); + } catch (Exception e) { + } + subs.dropTopic(topicName); + dropDB(database); + super.tearDown(); + } + + private void insert_data(long timestamp) + throws IoTDBConnectionException, StatementExecutionException, InterruptedException { + Tablet tablet = new Tablet(device, schemaList, 5); + int rowIndex = 0; + for (int row = 0; row < 5; row++) { + rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, timestamp); + tablet.addValue("s_0", rowIndex, row * 20L + row); + tablet.addValue("s_1", rowIndex, row + 2.45); + timestamp += row * 2000; + } + session_src.insertTablet(tablet); + Thread.sleep(1000); + } + + // @Test + public void testPrivilege() throws IoTDBConnectionException, StatementExecutionException { + session_src.executeNonQueryStatement("create user " + userName + " '" + passwd + "';"); + session_src.executeNonQueryStatement("grant read,write on root.** to user " + userName); + } + + // @Test + // TODO: Failed to fetch all endpoints, only the admin user can perform this operation... + public void testNormal() + throws TException, + IoTDBConnectionException, + IOException, + StatementExecutionException, + InterruptedException { + session_src.executeNonQueryStatement("create user " + userName + " '" + passwd + "';"); + session_src.executeNonQueryStatement("grant read,write on root.** to user " + userName); + consumer = + new SubscriptionPullConsumer.Builder() + .host(SRC_HOST) + .port(SRC_PORT) + .username(userName) + .password(passwd) + .buildPullConsumer(); + consumer.open(); + insert_data(1706659200000L); // 2024-01-31 08:00:00+08:00 + // Subscribe + consumer.subscribe(topicName); + assertEquals(subs.getSubscriptions().size(), 1, "show subscriptions after subscription"); + Thread.sleep(1000); + subs.getSubscriptions().forEach(System.out::println); + // Consumption data + consume_data(consumer, session_dest); + check_count(4, "select count(s_0) from " + device, "Consumption Data: s_0"); + check_count(4, "select count(s_1) from " + device, "Consumption Data: s_1"); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportDataTestIT.java b/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportDataTestIT.java index f04f387fd381a..f082391769dd1 100644 --- a/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportDataTestIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportDataTestIT.java @@ -18,7 +18,7 @@ */ package org.apache.iotdb.tools.it; -import org.apache.iotdb.cli.it.AbstractScript; +import org.apache.iotdb.cli.it.AbstractScriptIT; import org.apache.iotdb.isession.ISession; import org.apache.iotdb.it.env.EnvFactory; import org.apache.iotdb.it.framework.IoTDBTestRunner; @@ -40,7 +40,7 @@ @RunWith(IoTDBTestRunner.class) @Category({LocalStandaloneIT.class, ClusterIT.class}) -public class ExportDataTestIT extends AbstractScript { +public class ExportDataTestIT extends AbstractScriptIT { private static String ip; private static String port; @@ -89,6 +89,8 @@ protected void testOnWindows() throws IOException { "root", "-pw", "root", + "-ft", + "csv", "-t", "target", "-q", @@ -115,6 +117,8 @@ protected void testOnWindows() throws IOException { "root", "-pw", "root", + "-ft", + "csv", "-t", "target", "-q", @@ -143,7 +147,7 @@ protected void testOnWindows() throws IOException { "root", "-t", "target", - "-type", + "-ft", "sql", "-q", "select * from root.test.t2 where time > 1 and time < 1000000000000", @@ -170,6 +174,8 @@ protected void testOnUnix() throws IOException { "root", "-pw", "root", + "-ft", + "csv", "-t", "target", "-q", @@ -195,6 +201,8 @@ protected void testOnUnix() throws IOException { "root", "-t", "target", + "-ft", + "csv", "-q", "select * from root.**"); builder1.environment().put("CLASSPATH", libPath); @@ -218,7 +226,7 @@ protected void testOnUnix() throws IOException { "root", "-t", "target", - "-type", + "-ft", "sql", "-q", "select * from root.test.t2 where time > 1 and time < 1000000000000"); diff --git a/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportSchemaTestIT.java b/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportSchemaTestIT.java index f8a4cdae171b4..cc520b5e80970 100644 --- a/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportSchemaTestIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportSchemaTestIT.java @@ -18,7 +18,7 @@ */ package org.apache.iotdb.tools.it; -import org.apache.iotdb.cli.it.AbstractScript; +import org.apache.iotdb.cli.it.AbstractScriptIT; import org.apache.iotdb.isession.ISession; import org.apache.iotdb.it.env.EnvFactory; import org.apache.iotdb.it.framework.IoTDBTestRunner; @@ -41,7 +41,7 @@ @RunWith(IoTDBTestRunner.class) @Category({LocalStandaloneIT.class, ClusterIT.class}) -public class ExportSchemaTestIT extends AbstractScript { +public class ExportSchemaTestIT extends AbstractScriptIT { private static String ip; private static String port; diff --git a/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportTsFileTestIT.java b/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportTsFileTestIT.java index 5994d4d554eea..88b426d9a4af9 100644 --- a/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportTsFileTestIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportTsFileTestIT.java @@ -19,7 +19,7 @@ package org.apache.iotdb.tools.it; -import org.apache.iotdb.cli.it.AbstractScript; +import org.apache.iotdb.cli.it.AbstractScriptIT; import org.apache.iotdb.isession.ISession; import org.apache.iotdb.it.env.EnvFactory; import org.apache.iotdb.it.framework.IoTDBTestRunner; @@ -41,7 +41,7 @@ @RunWith(IoTDBTestRunner.class) @Category({LocalStandaloneIT.class, ClusterIT.class}) -public class ExportTsFileTestIT extends AbstractScript { +public class ExportTsFileTestIT extends AbstractScriptIT { private static String ip; private static String port; @@ -52,7 +52,15 @@ public class ExportTsFileTestIT extends AbstractScript { @BeforeClass public static void setUp() throws Exception { + // enable subscription + EnvFactory.getEnv() + .getConfig() + .getCommonConfig() + .setSubscriptionEnabled(true) + .setPipeMemoryManagementEnabled(false) + .setIsPipeEnableMemoryCheck(false); EnvFactory.getEnv().initClusterEnvironment(); + ip = EnvFactory.getEnv().getIP(); port = EnvFactory.getEnv().getPort(); toolsPath = EnvFactory.getEnv().getToolsPath(); @@ -98,7 +106,7 @@ protected void testOnWindows() throws IOException { "exit", "%^errorlevel%"); builder.environment().put("CLASSPATH", libPath); - testOutput(builder, output, 1); + testOutput(builder, output, 0); prepareData(); @@ -148,7 +156,7 @@ protected void testOnUnix() throws IOException { "-q", "select * from root.**"); builder.environment().put("CLASSPATH", libPath); - testOutput(builder, output, 1); + testOutput(builder, output, 0); prepareData(); diff --git a/integration-test/src/test/java/org/apache/iotdb/tools/it/ImportDataTestIT.java b/integration-test/src/test/java/org/apache/iotdb/tools/it/ImportDataTestIT.java index 391b726f6d732..d2f7c95aacd7b 100644 --- a/integration-test/src/test/java/org/apache/iotdb/tools/it/ImportDataTestIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/tools/it/ImportDataTestIT.java @@ -19,7 +19,7 @@ package org.apache.iotdb.tools.it; -import org.apache.iotdb.cli.it.AbstractScript; +import org.apache.iotdb.cli.it.AbstractScriptIT; import org.apache.iotdb.it.env.EnvFactory; import org.apache.iotdb.it.framework.IoTDBTestRunner; import org.apache.iotdb.itbase.category.ClusterIT; @@ -36,7 +36,7 @@ @RunWith(IoTDBTestRunner.class) @Category({LocalStandaloneIT.class, ClusterIT.class}) -public class ImportDataTestIT extends AbstractScript { +public class ImportDataTestIT extends AbstractScriptIT { private static String ip; @@ -74,7 +74,7 @@ public void test() throws IOException { @Override protected void testOnWindows() throws IOException { final String[] output = { - "The file name must end with \"csv\" or \"txt\"!", + "Source file or directory ./csv/ does not exist", }; ProcessBuilder builder = new ProcessBuilder( @@ -89,19 +89,21 @@ protected void testOnWindows() throws IOException { "root", "-pw", "root", + "-ft", + "csv", "-s", - "./", + "./csv/", "&", "exit", "%^errorlevel%"); builder.environment().put("CLASSPATH", libPath); - testOutput(builder, output, 0); + testOutput(builder, output, 1); } @Override protected void testOnUnix() throws IOException { final String[] output = { - "The file name must end with \"csv\" or \"txt\"!", + "Source file or directory ./csv/ does not exist", }; ProcessBuilder builder = new ProcessBuilder( @@ -115,9 +117,11 @@ protected void testOnUnix() throws IOException { "root", "-pw", "root", + "-ft", + "csv", "-s", - "./"); + "./csv/"); builder.environment().put("CLASSPATH", libPath); - testOutput(builder, output, 0); + testOutput(builder, output, 1); } } diff --git a/integration-test/src/test/java/org/apache/iotdb/tools/it/ImportSchemaTestIT.java b/integration-test/src/test/java/org/apache/iotdb/tools/it/ImportSchemaTestIT.java index ea7e1ba07f13f..06eb3f4eb342e 100644 --- a/integration-test/src/test/java/org/apache/iotdb/tools/it/ImportSchemaTestIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/tools/it/ImportSchemaTestIT.java @@ -19,7 +19,7 @@ package org.apache.iotdb.tools.it; -import org.apache.iotdb.cli.it.AbstractScript; +import org.apache.iotdb.cli.it.AbstractScriptIT; import org.apache.iotdb.it.env.EnvFactory; import org.apache.iotdb.it.framework.IoTDBTestRunner; import org.apache.iotdb.itbase.category.ClusterIT; @@ -36,7 +36,7 @@ @RunWith(IoTDBTestRunner.class) @Category({LocalStandaloneIT.class, ClusterIT.class}) -public class ImportSchemaTestIT extends AbstractScript { +public class ImportSchemaTestIT extends AbstractScriptIT { private static String ip; diff --git a/integration-test/src/test/java/org/apache/iotdb/util/MagicUtils.java b/integration-test/src/test/java/org/apache/iotdb/util/MagicUtils.java new file mode 100644 index 0000000000000..e6213a72b71be --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/util/MagicUtils.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.util; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Proxy; + +public class MagicUtils { + + private static Logger LOGGER = LoggerFactory.getLogger(MagicUtils.class); + + /** + * Ignore all exceptions during close() + * + * @param t target object + * @return object which will close without exception + */ + public static T makeItCloseQuietly(T t) { + InvocationHandler handler = + (proxy, method, args) -> { + try { + if (method.getName().equals("close")) { + try { + method.invoke(t, args); + } catch (Throwable e) { + LOGGER.warn("Exception happens during close(): ", e); + } + return null; + } else { + return method.invoke(t, args); + } + } catch (InvocationTargetException e) { + throw e.getTargetException(); + } + }; + return (T) + Proxy.newProxyInstance( + t.getClass().getClassLoader(), t.getClass().getInterfaces(), handler); + } +} diff --git a/integration-test/src/test/resources/ainode-example/config.yaml b/integration-test/src/test/resources/ainode-example/config.yaml new file mode 100644 index 0000000000000..996e572cf39a6 --- /dev/null +++ b/integration-test/src/test/resources/ainode-example/config.yaml @@ -0,0 +1,5 @@ +configs: + input_shape: [7, 3] + output_shape: [7, 3] + input_type: ["float32", "float32","float32"] + output_type: ["float32", "float32","float32"] diff --git a/integration-test/src/test/resources/ainode-example/model.pt b/integration-test/src/test/resources/ainode-example/model.pt new file mode 100644 index 0000000000000..67d4aec6999f1 Binary files /dev/null and b/integration-test/src/test/resources/ainode-example/model.pt differ diff --git a/integration-test/src/test/resources/logback-test.xml b/integration-test/src/test/resources/logback-test.xml index 95a4d1c5c9cb2..ba595ff975232 100644 --- a/integration-test/src/test/resources/logback-test.xml +++ b/integration-test/src/test/resources/logback-test.xml @@ -49,7 +49,7 @@ - + diff --git a/iotdb-api/external-api/pom.xml b/iotdb-api/external-api/pom.xml index 860e1453757cb..a4da52b38d409 100644 --- a/iotdb-api/external-api/pom.xml +++ b/iotdb-api/external-api/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-api - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT external-api IoTDB: API: External API diff --git a/iotdb-api/pipe-api/pom.xml b/iotdb-api/pipe-api/pom.xml index c0e4338b3ef1f..88988bfeb6573 100644 --- a/iotdb-api/pipe-api/pom.xml +++ b/iotdb-api/pipe-api/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-api - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT pipe-api IoTDB: API: Pipe API diff --git a/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/customizer/configuration/PipeRuntimeEnvironment.java b/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/customizer/configuration/PipeRuntimeEnvironment.java index 455d293dccc3b..8395568791cfc 100644 --- a/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/customizer/configuration/PipeRuntimeEnvironment.java +++ b/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/customizer/configuration/PipeRuntimeEnvironment.java @@ -24,4 +24,6 @@ public interface PipeRuntimeEnvironment { String getPipeName(); long getCreationTime(); + + int getRegionId(); } diff --git a/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/customizer/parameter/PipeParameterValidator.java b/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/customizer/parameter/PipeParameterValidator.java index 1fa0046ccdab3..99d547a23c6d2 100644 --- a/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/customizer/parameter/PipeParameterValidator.java +++ b/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/customizer/parameter/PipeParameterValidator.java @@ -23,6 +23,10 @@ import org.apache.iotdb.pipe.api.exception.PipeParameterNotValidException; import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; public class PipeParameterValidator { @@ -36,6 +40,40 @@ public PipeParameters getParameters() { return parameters; } + /** + * Validates whether the attributes entered by the user contain at least one attribute from + * lhsAttributes or rhsAttributes (if required), but not both. + * + * @param lhsAttributes list of left-hand side synonym attributes + * @param rhsAttributes list of right-hand side synonym attributes + * @param isRequired specifies whether at least one attribute from lhsAttributes or rhsAttributes + * must be provided + * @throws PipeParameterNotValidException if both lhsAttributes and rhsAttributes are provided + * @throws PipeAttributeNotProvidedException if isRequired is true and neither lhsAttributes nor + * rhsAttributes are provided + * @return the instance of PipeParameterValidator for method chaining + */ + public PipeParameterValidator validateSynonymAttributes( + final List lhsAttributes, + final List rhsAttributes, + final boolean isRequired) { + final boolean lhsExistence = lhsAttributes.stream().anyMatch(parameters::hasAttribute); + final boolean rhsExistence = rhsAttributes.stream().anyMatch(parameters::hasAttribute); + if (lhsExistence && rhsExistence) { + throw new PipeParameterNotValidException( + String.format( + "Cannot specify both %s and %s at the same time", lhsAttributes, rhsAttributes)); + } + if (isRequired && !lhsExistence && !rhsExistence) { + throw new PipeAttributeNotProvidedException( + Stream.concat(lhsAttributes.stream(), rhsAttributes.stream()) + .collect( + Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)) + .toString()); + } + return this; + } + /** * Validates whether the attributes entered by the user contain an attribute whose key is * attributeKey. @@ -83,7 +121,7 @@ public PipeParameterValidator validateAttributeValueRange( * @throws PipeParameterNotValidException if the given argument is not valid */ public PipeParameterValidator validate( - final PipeParameterValidator.SingleObjectValidationRule validationRule, + final SingleObjectValidationRule validationRule, final String messageToThrow, final Object argument) throws PipeParameterNotValidException { @@ -107,7 +145,7 @@ public interface SingleObjectValidationRule { * @throws PipeParameterNotValidException if the given arguments are not valid */ public PipeParameterValidator validate( - final PipeParameterValidator.MultipleObjectsValidationRule validationRule, + final MultipleObjectsValidationRule validationRule, final String messageToThrow, final Object... arguments) throws PipeParameterNotValidException { diff --git a/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/customizer/parameter/PipeParameters.java b/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/customizer/parameter/PipeParameters.java index 1646423b133f6..3dcb2d19b0a78 100644 --- a/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/customizer/parameter/PipeParameters.java +++ b/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/customizer/parameter/PipeParameters.java @@ -71,6 +71,10 @@ public boolean hasAnyAttributes(final String... keys) { return false; } + public void addAttribute(final String key, String values) { + attributes.put(KeyReducer.reduce(key), values); + } + public String getString(final String key) { final String value = attributes.get(key); return value != null ? value : attributes.get(KeyReducer.reduce(key)); @@ -379,6 +383,7 @@ public static class ValueHider { static { KEYS.add("ssl.trust-store-pwd"); + KEYS.add("password"); } static String hide(final String key, final String value) { diff --git a/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/event/dml/insertion/TsFileInsertionEvent.java b/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/event/dml/insertion/TsFileInsertionEvent.java index 3d04186724270..4c7fffcfba517 100644 --- a/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/event/dml/insertion/TsFileInsertionEvent.java +++ b/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/event/dml/insertion/TsFileInsertionEvent.java @@ -21,6 +21,8 @@ import org.apache.iotdb.pipe.api.event.Event; +import java.io.File; + /** * {@link TsFileInsertionEvent} is used to define the event of writing TsFile. Event data stores in * disks, which is compressed and encoded, and requires IO cost for computational processing. @@ -34,4 +36,12 @@ public interface TsFileInsertionEvent extends Event, AutoCloseable { * @return {@code Iterable} the list of {@link TabletInsertionEvent} */ Iterable toTabletInsertionEvents(); + + /** + * Get the file that stores the data of this {@link TsFileInsertionEvent}. The file is compressed + * and encoded, and requires IO cost for computational processing. + * + * @return the file that stores the data of this {@link TsFileInsertionEvent} + */ + File getTsFile(); } diff --git a/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/exception/PipeConnectionException.java b/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/exception/PipeConnectionException.java index dc9d5e32968d0..ded27cc433a31 100644 --- a/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/exception/PipeConnectionException.java +++ b/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/exception/PipeConnectionException.java @@ -24,11 +24,11 @@ public class PipeConnectionException extends PipeException { public static final String CONNECTION_ERROR_FORMATTER = "Error occurred while connecting to receiver %s:%s, please check network connectivity or SSL configurations when enable SSL transmission"; - public PipeConnectionException(String message) { + public PipeConnectionException(final String message) { super(message); } - public PipeConnectionException(String message, Throwable cause) { + public PipeConnectionException(final String message, final Throwable cause) { super(message, cause); } } diff --git a/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/exception/PipeException.java b/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/exception/PipeException.java index 3291f87be0de9..7be6d6a5560ce 100644 --- a/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/exception/PipeException.java +++ b/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/exception/PipeException.java @@ -23,17 +23,17 @@ public class PipeException extends RuntimeException { private final long timeStamp; - public PipeException(String message) { + public PipeException(final String message) { super(message); this.timeStamp = System.currentTimeMillis(); } - public PipeException(String message, long timeStamp) { + public PipeException(final String message, final long timeStamp) { super(message); this.timeStamp = timeStamp; } - public PipeException(String message, Throwable cause) { + public PipeException(final String message, final Throwable cause) { super(message, cause); this.timeStamp = System.currentTimeMillis(); } diff --git a/iotdb-api/pom.xml b/iotdb-api/pom.xml index f29b64894b6f2..15e396f5423f1 100644 --- a/iotdb-api/pom.xml +++ b/iotdb-api/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-parent - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT iotdb-api pom diff --git a/iotdb-api/trigger-api/pom.xml b/iotdb-api/trigger-api/pom.xml index 49b192aebe70c..fd6d8fc88dfda 100644 --- a/iotdb-api/trigger-api/pom.xml +++ b/iotdb-api/trigger-api/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-api - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT trigger-api IoTDB: API: Trigger API diff --git a/iotdb-api/udf-api/pom.xml b/iotdb-api/udf-api/pom.xml index f6b5897c826b9..7a6f859f6d2bf 100644 --- a/iotdb-api/udf-api/pom.xml +++ b/iotdb-api/udf-api/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-api - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT udf-api IoTDB: API: UDF API diff --git a/iotdb-client/cli/pom.xml b/iotdb-client/cli/pom.xml index cc62e9e8d8d54..4aa744f8c889d 100644 --- a/iotdb-client/cli/pom.xml +++ b/iotdb-client/cli/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-client - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT iotdb-cli IoTDB: Client: CLI @@ -37,37 +37,37 @@ org.apache.iotdb iotdb-session - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-jdbc - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-antlr - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb node-commons - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-server - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb isession - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb service-rpc - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.tsfile @@ -82,7 +82,17 @@ org.apache.iotdb iotdb-thrift - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT + + + org.apache.iotdb + iotdb-thrift-commons + 1.3.4-SNAPSHOT + + + org.apache.iotdb + pipe-api + 1.3.4-SNAPSHOT org.slf4j diff --git a/iotdb-client/cli/src/assembly/resources/tools/backup.bat b/iotdb-client/cli/src/assembly/resources/tools/backup.bat index 3008a74733e4e..e7974164b08ec 100644 --- a/iotdb-client/cli/src/assembly/resources/tools/backup.bat +++ b/iotdb-client/cli/src/assembly/resources/tools/backup.bat @@ -102,7 +102,7 @@ echo Starting IoTDB Client Data Back Script echo ------------------------------------------ set CLASSPATH="%IOTDB_HOME%\lib\*" -if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.tool.IoTDBDataBackTool +if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.tool.backup.IoTDBDataBackTool set logsDir="%IOTDB_HOME%\logs" if not exist "%logsDir%" ( diff --git a/iotdb-client/cli/src/assembly/resources/tools/backup.sh b/iotdb-client/cli/src/assembly/resources/tools/backup.sh index 7ef5cdc5777f3..d65392cdcca14 100644 --- a/iotdb-client/cli/src/assembly/resources/tools/backup.sh +++ b/iotdb-client/cli/src/assembly/resources/tools/backup.sh @@ -126,7 +126,7 @@ for f in ${IOTDB_HOME}/lib/*.jar; do CLASSPATH=${CLASSPATH}":"$f done -MAIN_CLASS=org.apache.iotdb.tool.IoTDBDataBackTool +MAIN_CLASS=org.apache.iotdb.tool.backup.IoTDBDataBackTool logs_dir="${IOTDB_HOME}/logs" diff --git a/iotdb-client/cli/src/assembly/resources/tools/export-data.bat b/iotdb-client/cli/src/assembly/resources/tools/export-data.bat index c3acfff5f92ee..2178482f36910 100644 --- a/iotdb-client/cli/src/assembly/resources/tools/export-data.bat +++ b/iotdb-client/cli/src/assembly/resources/tools/export-data.bat @@ -21,9 +21,9 @@ title IoTDB Export -echo ```````````````````````````````````````````````` -echo Starting IoTDB Client Export Script -echo ```````````````````````````````````````````````` +@REM echo ```````````````````````````````````````````````` +@REM echo Starting IoTDB Client Export Script +@REM echo ```````````````````````````````````````````````` if "%OS%" == "Windows_NT" setlocal @@ -31,7 +31,7 @@ pushd %~dp0.. if NOT DEFINED IOTDB_HOME set IOTDB_HOME=%CD% popd -if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.tool.ExportData +if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.tool.data.ExportData if NOT DEFINED JAVA_HOME goto :err @REM ----------------------------------------------------------------------------- diff --git a/iotdb-client/cli/src/assembly/resources/tools/export-data.sh b/iotdb-client/cli/src/assembly/resources/tools/export-data.sh index 8aa7491b945b2..4d69035e9ca88 100644 --- a/iotdb-client/cli/src/assembly/resources/tools/export-data.sh +++ b/iotdb-client/cli/src/assembly/resources/tools/export-data.sh @@ -18,9 +18,9 @@ # under the License. # -echo ------------------------------------------ -echo Starting IoTDB Client Export Script -echo ------------------------------------------ +#echo ------------------------------------------ +#echo Starting IoTDB Client Export Script +#echo ------------------------------------------ if [ -z "${IOTDB_INCLUDE}" ]; then #do nothing @@ -53,7 +53,7 @@ for f in ${IOTDB_HOME}/lib/*.jar; do CLASSPATH=${CLASSPATH}":"$f done -MAIN_CLASS=org.apache.iotdb.tool.ExportData +MAIN_CLASS=org.apache.iotdb.tool.data.ExportData "$JAVA" -DIOTDB_HOME=${IOTDB_HOME} -cp "$CLASSPATH" "$MAIN_CLASS" "$@" exit $? \ No newline at end of file diff --git a/iotdb-client/cli/src/assembly/resources/tools/export-schema.bat b/iotdb-client/cli/src/assembly/resources/tools/export-schema.bat index 8ed2b81f5c757..dab5dfaf66799 100644 --- a/iotdb-client/cli/src/assembly/resources/tools/export-schema.bat +++ b/iotdb-client/cli/src/assembly/resources/tools/export-schema.bat @@ -31,7 +31,7 @@ pushd %~dp0.. if NOT DEFINED IOTDB_HOME set IOTDB_HOME=%CD% popd -if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.tool.ExportSchema +if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.tool.schema.ExportSchema if NOT DEFINED JAVA_HOME goto :err @REM ----------------------------------------------------------------------------- diff --git a/iotdb-client/cli/src/assembly/resources/tools/export-schema.sh b/iotdb-client/cli/src/assembly/resources/tools/export-schema.sh index b65a7d3950ada..e4d18590799c4 100644 --- a/iotdb-client/cli/src/assembly/resources/tools/export-schema.sh +++ b/iotdb-client/cli/src/assembly/resources/tools/export-schema.sh @@ -51,7 +51,7 @@ fi CLASSPATH=${IOTDB_HOME}/lib/* -MAIN_CLASS=org.apache.iotdb.tool.ExportSchema +MAIN_CLASS=org.apache.iotdb.tool.schema.ExportSchema "$JAVA" -DIOTDB_HOME=${IOTDB_HOME} -cp "$CLASSPATH" "$MAIN_CLASS" "$@" exit $? \ No newline at end of file diff --git a/iotdb-client/cli/src/assembly/resources/tools/export-tsfile.bat b/iotdb-client/cli/src/assembly/resources/tools/export-tsfile.bat index 350e806a76957..2c85f42bd6278 100644 --- a/iotdb-client/cli/src/assembly/resources/tools/export-tsfile.bat +++ b/iotdb-client/cli/src/assembly/resources/tools/export-tsfile.bat @@ -31,7 +31,7 @@ pushd %~dp0.. if NOT DEFINED IOTDB_HOME set IOTDB_HOME=%CD% popd -if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.tool.ExportTsFile +if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.tool.tsfile.ExportTsFile if NOT DEFINED JAVA_HOME goto :err @REM ----------------------------------------------------------------------------- diff --git a/iotdb-client/cli/src/assembly/resources/tools/export-tsfile.sh b/iotdb-client/cli/src/assembly/resources/tools/export-tsfile.sh index a19eec09f4f51..ccc9df3e3a491 100644 --- a/iotdb-client/cli/src/assembly/resources/tools/export-tsfile.sh +++ b/iotdb-client/cli/src/assembly/resources/tools/export-tsfile.sh @@ -53,7 +53,7 @@ for f in ${IOTDB_HOME}/lib/*.jar; do CLASSPATH=${CLASSPATH}":"$f done -MAIN_CLASS=org.apache.iotdb.tool.ExportTsFile +MAIN_CLASS=org.apache.iotdb.tool.tsfile.ExportTsFile "$JAVA" -DIOTDB_HOME=${IOTDB_HOME} -cp "$CLASSPATH" "$MAIN_CLASS" "$@" exit $? \ No newline at end of file diff --git a/iotdb-client/cli/src/assembly/resources/tools/import-data.bat b/iotdb-client/cli/src/assembly/resources/tools/import-data.bat index cb34c5e897a6d..cbb71dcf68d27 100644 --- a/iotdb-client/cli/src/assembly/resources/tools/import-data.bat +++ b/iotdb-client/cli/src/assembly/resources/tools/import-data.bat @@ -21,9 +21,9 @@ title IoTDB Import -echo ```````````````````````````````````````````````` -echo Starting IoTDB Client Import Script -echo ```````````````````````````````````````````````` +@REM echo ```````````````````````````````````````````````` +@REM echo Starting IoTDB Client Import Script +@REM echo ```````````````````````````````````````````````` if "%OS%" == "Windows_NT" setlocal @@ -31,7 +31,7 @@ pushd %~dp0.. if NOT DEFINED IOTDB_HOME set IOTDB_HOME=%CD% popd -if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.tool.ImportData +if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.tool.data.ImportData if NOT DEFINED JAVA_HOME goto :err @REM ----------------------------------------------------------------------------- diff --git a/iotdb-client/cli/src/assembly/resources/tools/import-data.sh b/iotdb-client/cli/src/assembly/resources/tools/import-data.sh index 97102b854c713..14789a44a1088 100644 --- a/iotdb-client/cli/src/assembly/resources/tools/import-data.sh +++ b/iotdb-client/cli/src/assembly/resources/tools/import-data.sh @@ -18,9 +18,9 @@ # under the License. # -echo ------------------------------------------ -echo Starting IoTDB Client Import Script -echo ------------------------------------------ +#echo ------------------------------------------ +#echo Starting IoTDB Client Import Script +#echo ------------------------------------------ if [ -z "${IOTDB_INCLUDE}" ]; then #do nothing @@ -53,7 +53,7 @@ for f in ${IOTDB_HOME}/lib/*.jar; do CLASSPATH=${CLASSPATH}":"$f done -MAIN_CLASS=org.apache.iotdb.tool.ImportData +MAIN_CLASS=org.apache.iotdb.tool.data.ImportData "$JAVA" -DIOTDB_HOME=${IOTDB_HOME} -cp "$CLASSPATH" "$MAIN_CLASS" "$@" exit $? \ No newline at end of file diff --git a/iotdb-client/cli/src/assembly/resources/tools/import-schema.bat b/iotdb-client/cli/src/assembly/resources/tools/import-schema.bat index 46a8d5abf6ea1..fbf5236128be6 100644 --- a/iotdb-client/cli/src/assembly/resources/tools/import-schema.bat +++ b/iotdb-client/cli/src/assembly/resources/tools/import-schema.bat @@ -31,7 +31,7 @@ pushd %~dp0.. if NOT DEFINED IOTDB_HOME set IOTDB_HOME=%CD% popd -if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.tool.ImportSchema +if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.tool.schema.ImportSchema if NOT DEFINED JAVA_HOME goto :err @REM ----------------------------------------------------------------------------- diff --git a/iotdb-client/cli/src/assembly/resources/tools/import-schema.sh b/iotdb-client/cli/src/assembly/resources/tools/import-schema.sh index ca91293e235c0..3954446beb2b8 100644 --- a/iotdb-client/cli/src/assembly/resources/tools/import-schema.sh +++ b/iotdb-client/cli/src/assembly/resources/tools/import-schema.sh @@ -51,7 +51,7 @@ fi CLASSPATH=${IOTDB_HOME}/lib/* -MAIN_CLASS=org.apache.iotdb.tool.ImportSchema +MAIN_CLASS=org.apache.iotdb.tool.schema.ImportSchema "$JAVA" -DIOTDB_HOME=${IOTDB_HOME} -cp "$CLASSPATH" "$MAIN_CLASS" "$@" exit $? \ No newline at end of file diff --git a/iotdb-client/cli/src/assembly/resources/tools/load-tsfile.bat b/iotdb-client/cli/src/assembly/resources/tools/load-tsfile.bat index bd1f4c9170cb0..52ae0a46b7674 100755 --- a/iotdb-client/cli/src/assembly/resources/tools/load-tsfile.bat +++ b/iotdb-client/cli/src/assembly/resources/tools/load-tsfile.bat @@ -30,7 +30,7 @@ pushd %~dp0.. if NOT DEFINED IOTDB_HOME set IOTDB_HOME=%CD% popd -if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.tool.ImportTsFile +if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.tool.tsfile.ImportTsFile if NOT DEFINED JAVA_HOME goto :err @REM ----------------------------------------------------------------------------- diff --git a/iotdb-client/cli/src/assembly/resources/tools/load-tsfile.sh b/iotdb-client/cli/src/assembly/resources/tools/load-tsfile.sh index b7ded896c029e..820ca5fcc2d1c 100755 --- a/iotdb-client/cli/src/assembly/resources/tools/load-tsfile.sh +++ b/iotdb-client/cli/src/assembly/resources/tools/load-tsfile.sh @@ -33,7 +33,7 @@ PARAMETERS=$@ IOTDB_CLI_CONF=${IOTDB_HOME}/conf -MAIN_CLASS=org.apache.iotdb.tool.ImportTsFile +MAIN_CLASS=org.apache.iotdb.tool.tsfile.ImportTsFile CLASSPATH="" for f in ${IOTDB_HOME}/lib/*.jar; do diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/AbstractCli.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/AbstractCli.java index 0da407e00f00f..4fddf30abf0b4 100644 --- a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/AbstractCli.java +++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/AbstractCli.java @@ -26,7 +26,7 @@ import org.apache.iotdb.rpc.IoTDBConnectionException; import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.service.rpc.thrift.ServerProperties; -import org.apache.iotdb.tool.ImportData; +import org.apache.iotdb.tool.data.ImportData; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Option; diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/Cli.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/Cli.java index bca535ad3bb19..99efb30054878 100644 --- a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/Cli.java +++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/Cli.java @@ -202,9 +202,7 @@ private static void receiveCommands(CliContext ctx) throws TException { } } } catch (SQLException e) { - ctx.getErr() - .printf( - "%s: %s Host is %s, port is %s.%n", IOTDB_ERROR_PREFIX, e.getMessage(), host, port); + ctx.getErr().printf("%s: %s%n", IOTDB_ERROR_PREFIX, e.getMessage()); ctx.exit(CODE_ERROR); } } @@ -223,6 +221,11 @@ private static boolean readerReadLine(CliContext ctx, IoTDBConnection connection } catch (EndOfFileException e) { // Exit on EOF (usually by pressing CTRL+D). ctx.exit(CODE_OK); + } catch (IllegalArgumentException e) { + if (e.getMessage().contains("history")) { + return false; + } + throw e; } return false; } diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/utils/JlineUtils.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/utils/JlineUtils.java index d46d72bcdcd9d..d7b061f5c1ec2 100644 --- a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/utils/JlineUtils.java +++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/utils/JlineUtils.java @@ -36,6 +36,8 @@ import java.io.IOException; import java.util.Objects; import java.util.Set; +import java.util.logging.Level; +import java.util.logging.Logger; import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -54,6 +56,8 @@ private JlineUtils() {} public static LineReader getLineReader(CliContext ctx, String username, String host, String port) throws IOException { + Logger.getLogger("org.jline").setLevel(Level.OFF); + // Defaulting to a dumb terminal when a supported terminal can not be correctly created // see https://github.com/jline/jline3/issues/291 Terminal terminal; diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/AbstractDataTool.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/AbstractDataTool.java deleted file mode 100644 index 1e689e903c1ab..0000000000000 --- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/AbstractDataTool.java +++ /dev/null @@ -1,304 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.tool; - -import org.apache.iotdb.cli.utils.IoTPrinter; -import org.apache.iotdb.exception.ArgsErrorException; -import org.apache.iotdb.rpc.IoTDBConnectionException; -import org.apache.iotdb.rpc.StatementExecutionException; -import org.apache.iotdb.session.Session; - -import org.apache.commons.cli.CommandLine; -import org.apache.commons.cli.Option; -import org.apache.commons.cli.Options; -import org.apache.commons.csv.CSVFormat; -import org.apache.commons.csv.CSVPrinter; -import org.apache.commons.csv.QuoteMode; -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.io.PrintWriter; -import java.time.ZoneId; -import java.util.List; - -public abstract class AbstractDataTool { - - protected static final String HOST_ARGS = "h"; - protected static final String HOST_NAME = "host"; - protected static final String HOST_DEFAULT_VALUE = "127.0.0.1"; - - protected static final String HELP_ARGS = "help"; - - protected static final String PORT_ARGS = "p"; - protected static final String PORT_NAME = "port"; - protected static final String PORT_DEFAULT_VALUE = "6667"; - - protected static final String PW_ARGS = "pw"; - protected static final String PW_NAME = "password"; - protected static final String PW_DEFAULT_VALUE = "root"; - - protected static final String USERNAME_ARGS = "u"; - protected static final String USERNAME_NAME = "username"; - protected static final String USERNAME_DEFAULT_VALUE = "root"; - - protected static final String TIME_FORMAT_ARGS = "tf"; - protected static final String TIME_FORMAT_NAME = "timeformat"; - - protected static final String TIME_ZONE_ARGS = "tz"; - protected static final String TIME_ZONE_NAME = "timeZone"; - - protected static final String TIMEOUT_ARGS = "timeout"; - protected static final String TIMEOUT_NAME = "timeout"; - protected static final int MAX_HELP_CONSOLE_WIDTH = 92; - protected static final String[] TIME_FORMAT = - new String[] {"default", "long", "number", "timestamp"}; - protected static final String[] STRING_TIME_FORMAT = - new String[] { - "yyyy-MM-dd HH:mm:ss.SSSX", - "yyyy/MM/dd HH:mm:ss.SSSX", - "yyyy.MM.dd HH:mm:ss.SSSX", - "yyyy-MM-dd HH:mm:ssX", - "yyyy/MM/dd HH:mm:ssX", - "yyyy.MM.dd HH:mm:ssX", - "yyyy-MM-dd HH:mm:ss.SSSz", - "yyyy/MM/dd HH:mm:ss.SSSz", - "yyyy.MM.dd HH:mm:ss.SSSz", - "yyyy-MM-dd HH:mm:ssz", - "yyyy/MM/dd HH:mm:ssz", - "yyyy.MM.dd HH:mm:ssz", - "yyyy-MM-dd HH:mm:ss.SSS", - "yyyy/MM/dd HH:mm:ss.SSS", - "yyyy.MM.dd HH:mm:ss.SSS", - "yyyy-MM-dd HH:mm:ss", - "yyyy/MM/dd HH:mm:ss", - "yyyy.MM.dd HH:mm:ss", - "yyyy-MM-dd'T'HH:mm:ss.SSSX", - "yyyy/MM/dd'T'HH:mm:ss.SSSX", - "yyyy.MM.dd'T'HH:mm:ss.SSSX", - "yyyy-MM-dd'T'HH:mm:ssX", - "yyyy/MM/dd'T'HH:mm:ssX", - "yyyy.MM.dd'T'HH:mm:ssX", - "yyyy-MM-dd'T'HH:mm:ss.SSSz", - "yyyy/MM/dd'T'HH:mm:ss.SSSz", - "yyyy.MM.dd'T'HH:mm:ss.SSSz", - "yyyy-MM-dd'T'HH:mm:ssz", - "yyyy/MM/dd'T'HH:mm:ssz", - "yyyy.MM.dd'T'HH:mm:ssz", - "yyyy-MM-dd'T'HH:mm:ss.SSS", - "yyyy/MM/dd'T'HH:mm:ss.SSS", - "yyyy.MM.dd'T'HH:mm:ss.SSS", - "yyyy-MM-dd'T'HH:mm:ss", - "yyyy/MM/dd'T'HH:mm:ss", - "yyyy.MM.dd'T'HH:mm:ss" - }; - protected static final int CODE_OK = 0; - protected static final int CODE_ERROR = 1; - - protected static String host; - protected static String port; - protected static String username; - protected static String password; - protected static ZoneId zoneId; - - protected static String timeZoneID; - protected static String timeFormat; - protected static String exportType; - protected static String aligned; - protected static Session session; - - private static final IoTPrinter ioTPrinter = new IoTPrinter(System.out); - private static final Logger LOGGER = LoggerFactory.getLogger(AbstractDataTool.class); - - protected AbstractDataTool() {} - - protected static String checkRequiredArg( - String arg, String name, CommandLine commandLine, String defaultValue) - throws ArgsErrorException { - String str = commandLine.getOptionValue(arg); - if (str == null) { - if (StringUtils.isNotBlank(defaultValue)) { - return defaultValue; - } - String msg = String.format("Required values for option '%s' not provided", name); - LOGGER.info(msg); - LOGGER.info("Use -help for more information"); - throw new ArgsErrorException(msg); - } - return str; - } - - protected static void setTimeZone() throws IoTDBConnectionException, StatementExecutionException { - if (timeZoneID != null) { - session.setTimeZone(timeZoneID); - } - zoneId = ZoneId.of(session.getTimeZone()); - } - - protected static void parseBasicParams(CommandLine commandLine) throws ArgsErrorException { - host = checkRequiredArg(HOST_ARGS, HOST_NAME, commandLine, HOST_DEFAULT_VALUE); - port = checkRequiredArg(PORT_ARGS, PORT_NAME, commandLine, PORT_DEFAULT_VALUE); - username = checkRequiredArg(USERNAME_ARGS, USERNAME_NAME, commandLine, USERNAME_DEFAULT_VALUE); - password = commandLine.getOptionValue(PW_ARGS, PW_DEFAULT_VALUE); - } - - protected static boolean checkTimeFormat() { - for (String format : TIME_FORMAT) { - if (timeFormat.equals(format)) { - return true; - } - } - for (String format : STRING_TIME_FORMAT) { - if (timeFormat.equals(format)) { - return true; - } - } - LOGGER.info( - "Input time format {} is not supported, " - + "please input like yyyy-MM-dd\\ HH:mm:ss.SSS or yyyy-MM-dd'T'HH:mm:ss.SSS%n", - timeFormat); - return false; - } - - protected static Options createNewOptions() { - Options options = new Options(); - - Option opHost = - Option.builder(HOST_ARGS) - .longOpt(HOST_NAME) - .argName(HOST_NAME) - .hasArg() - .desc("Host Name (optional)") - .build(); - options.addOption(opHost); - - Option opPort = - Option.builder(PORT_ARGS) - .longOpt(PORT_NAME) - .argName(PORT_NAME) - .hasArg() - .desc("Port (optional)") - .build(); - options.addOption(opPort); - - Option opUsername = - Option.builder(USERNAME_ARGS) - .longOpt(USERNAME_NAME) - .argName(USERNAME_NAME) - .hasArg() - .desc("Username (optional)") - .build(); - options.addOption(opUsername); - - Option opPassword = - Option.builder(PW_ARGS) - .longOpt(PW_NAME) - .optionalArg(true) - .argName(PW_NAME) - .hasArg() - .desc("Password (optional)") - .build(); - options.addOption(opPassword); - return options; - } - - /** - * write data to CSV file. - * - * @param headerNames the header names of CSV file - * @param records the records of CSV file - * @param filePath the directory to save the file - */ - public static Boolean writeCsvFile( - List headerNames, List> records, String filePath) { - try { - final CSVPrinterWrapper csvPrinterWrapper = new CSVPrinterWrapper(filePath); - if (headerNames != null) { - csvPrinterWrapper.printRecord(headerNames); - } - for (List CsvRecord : records) { - csvPrinterWrapper.printRecord(CsvRecord); - } - csvPrinterWrapper.flush(); - csvPrinterWrapper.close(); - return true; - } catch (IOException e) { - ioTPrinter.printException(e); - return false; - } - } - - static class CSVPrinterWrapper { - private final String filePath; - private final CSVFormat csvFormat; - private CSVPrinter csvPrinter; - - public CSVPrinterWrapper(String filePath) { - this.filePath = filePath; - this.csvFormat = - CSVFormat.Builder.create(CSVFormat.DEFAULT) - .setHeader() - .setSkipHeaderRecord(true) - .setEscape('\\') - .setQuoteMode(QuoteMode.NONE) - .build(); - } - - public void printRecord(final Iterable values) throws IOException { - if (csvPrinter == null) { - csvPrinter = csvFormat.print(new PrintWriter(filePath)); - } - csvPrinter.printRecord(values); - } - - public void print(Object value) { - if (csvPrinter == null) { - try { - csvPrinter = csvFormat.print(new PrintWriter(filePath)); - } catch (IOException e) { - ioTPrinter.printException(e); - return; - } - } - try { - csvPrinter.print(value); - } catch (IOException e) { - ioTPrinter.printException(e); - } - } - - public void println() throws IOException { - csvPrinter.println(); - } - - public void close() throws IOException { - if (csvPrinter != null) { - csvPrinter.close(); - } - } - - public void flush() throws IOException { - if (csvPrinter != null) { - csvPrinter.flush(); - } - } - } -} diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ImportData.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ImportData.java deleted file mode 100644 index a16cce67a2ed2..0000000000000 --- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ImportData.java +++ /dev/null @@ -1,1069 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.tool; - -import org.apache.iotdb.cli.utils.IoTPrinter; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.utils.PathUtils; -import org.apache.iotdb.db.queryengine.common.header.ColumnHeaderConstant; -import org.apache.iotdb.db.utils.DateTimeUtils; -import org.apache.iotdb.db.utils.constant.SqlConstant; -import org.apache.iotdb.exception.ArgsErrorException; -import org.apache.iotdb.isession.SessionDataSet; -import org.apache.iotdb.rpc.IoTDBConnectionException; -import org.apache.iotdb.rpc.StatementExecutionException; -import org.apache.iotdb.session.Session; - -import org.apache.commons.cli.CommandLine; -import org.apache.commons.cli.CommandLineParser; -import org.apache.commons.cli.DefaultParser; -import org.apache.commons.cli.HelpFormatter; -import org.apache.commons.cli.Option; -import org.apache.commons.cli.Options; -import org.apache.commons.csv.CSVFormat; -import org.apache.commons.csv.CSVParser; -import org.apache.commons.csv.CSVRecord; -import org.apache.commons.lang3.ObjectUtils; -import org.apache.commons.lang3.StringUtils; -import org.apache.thrift.annotation.Nullable; -import org.apache.tsfile.common.constant.TsFileConstant; -import org.apache.tsfile.enums.TSDataType; -import org.apache.tsfile.read.common.Field; -import org.apache.tsfile.read.common.RowRecord; - -import java.io.BufferedReader; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileReader; -import java.io.FileWriter; -import java.io.IOException; -import java.io.InputStreamReader; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static org.apache.tsfile.enums.TSDataType.STRING; -import static org.apache.tsfile.enums.TSDataType.TEXT; - -public class ImportData extends AbstractDataTool { - - private static final String FILE_ARGS = "s"; - private static final String FILE_NAME = "sourceFileOrFolder"; - - private static final String FAILED_FILE_ARGS = "fd"; - private static final String FAILED_FILE_NAME = "failed file directory"; - - private static final String BATCH_POINT_SIZE_ARGS = "batch"; - private static final String BATCH_POINT_SIZE_NAME = "batch point size"; - - private static final String ALIGNED_ARGS = "aligned"; - private static final String ALIGNED_NAME = "use the aligned interface"; - - private static final String CSV_SUFFIXS = "csv"; - private static final String TXT_SUFFIXS = "txt"; - - private static final String SQL_SUFFIXS = "sql"; - - private static final String TIMESTAMP_PRECISION_ARGS = "tp"; - private static final String TIMESTAMP_PRECISION_NAME = "timestamp precision (ms/us/ns)"; - - private static final String TYPE_INFER_ARGS = "typeInfer"; - private static final String TYPE_INFER_ARGS_NAME = "type infer"; - - private static final String LINES_PER_FAILED_FILE_ARGS = "lpf"; - private static final String LINES_PER_FAILED_FILE_ARGS_NAME = "linesPerFailedFile"; - - private static final String TSFILEDB_CLI_PREFIX = "ImportData"; - - private static String targetPath; - private static String failedFileDirectory = null; - private static int linesPerFailedFile = 10000; - private static Boolean aligned = false; - - private static String timeColumn = "Time"; - private static String deviceColumn = "Device"; - - private static int batchPointSize = 100_000; - - private static String timestampPrecision = "ms"; - - private static final String DATATYPE_BOOLEAN = "boolean"; - private static final String DATATYPE_INT = "int"; - private static final String DATATYPE_LONG = "long"; - private static final String DATATYPE_FLOAT = "float"; - private static final String DATATYPE_DOUBLE = "double"; - private static final String DATATYPE_TIMESTAMP = "timestamp"; - private static final String DATATYPE_DATE = "date"; - private static final String DATATYPE_BLOB = "blob"; - private static final String DATATYPE_NAN = "NaN"; - private static final String DATATYPE_TEXT = "text"; - - private static final String DATATYPE_NULL = "null"; - private static int fetchSize = 1000; - - private static final String INSERT_CSV_MEET_ERROR_MSG = "Meet error when insert csv because "; - - private static final Map TYPE_INFER_KEY_DICT = new HashMap<>(); - - static { - TYPE_INFER_KEY_DICT.put(DATATYPE_BOOLEAN, TSDataType.BOOLEAN); - TYPE_INFER_KEY_DICT.put(DATATYPE_INT, TSDataType.FLOAT); - TYPE_INFER_KEY_DICT.put(DATATYPE_LONG, TSDataType.DOUBLE); - TYPE_INFER_KEY_DICT.put(DATATYPE_FLOAT, TSDataType.FLOAT); - TYPE_INFER_KEY_DICT.put(DATATYPE_DOUBLE, TSDataType.DOUBLE); - TYPE_INFER_KEY_DICT.put(DATATYPE_TIMESTAMP, TSDataType.TIMESTAMP); - TYPE_INFER_KEY_DICT.put(DATATYPE_DATE, TSDataType.TIMESTAMP); - TYPE_INFER_KEY_DICT.put(DATATYPE_BLOB, TSDataType.TEXT); - TYPE_INFER_KEY_DICT.put(DATATYPE_NAN, TSDataType.DOUBLE); - } - - private static final Map TYPE_INFER_VALUE_DICT = new HashMap<>(); - - static { - TYPE_INFER_VALUE_DICT.put(DATATYPE_BOOLEAN, TSDataType.BOOLEAN); - TYPE_INFER_VALUE_DICT.put(DATATYPE_INT, TSDataType.INT32); - TYPE_INFER_VALUE_DICT.put(DATATYPE_LONG, TSDataType.INT64); - TYPE_INFER_VALUE_DICT.put(DATATYPE_FLOAT, TSDataType.FLOAT); - TYPE_INFER_VALUE_DICT.put(DATATYPE_DOUBLE, TSDataType.DOUBLE); - TYPE_INFER_VALUE_DICT.put(DATATYPE_TIMESTAMP, TSDataType.TIMESTAMP); - TYPE_INFER_VALUE_DICT.put(DATATYPE_DATE, TSDataType.TIMESTAMP); - TYPE_INFER_VALUE_DICT.put(DATATYPE_BLOB, TSDataType.TEXT); - TYPE_INFER_VALUE_DICT.put(DATATYPE_TEXT, TSDataType.TEXT); - } - - private static final IoTPrinter ioTPrinter = new IoTPrinter(System.out); - - /** - * create the commandline options. - * - * @return object Options - */ - private static Options createOptions() { - Options options = createNewOptions(); - - Option opFile = - Option.builder(FILE_ARGS) - .required() - .argName(FILE_NAME) - .hasArg() - .desc( - "If input a file path, load a csv file, " - + "otherwise load all csv file under this directory (required)") - .build(); - options.addOption(opFile); - - Option opFailedFile = - Option.builder(FAILED_FILE_ARGS) - .argName(FAILED_FILE_NAME) - .hasArg() - .desc( - "Specifying a directory to save failed file, default YOUR_CSV_FILE_PATH (optional)") - .build(); - options.addOption(opFailedFile); - - Option opAligned = - Option.builder(ALIGNED_ARGS) - .argName(ALIGNED_NAME) - .hasArg() - .desc("Whether to use the interface of aligned(only csv optional)") - .build(); - options.addOption(opAligned); - - Option opHelp = - Option.builder(HELP_ARGS) - .longOpt(HELP_ARGS) - .hasArg(false) - .desc("Display help information") - .build(); - options.addOption(opHelp); - - Option opTimeZone = - Option.builder(TIME_ZONE_ARGS) - .argName(TIME_ZONE_NAME) - .hasArg() - .desc("Time Zone eg. +08:00 or -01:00 (optional)") - .build(); - options.addOption(opTimeZone); - - Option opBatchPointSize = - Option.builder(BATCH_POINT_SIZE_ARGS) - .argName(BATCH_POINT_SIZE_NAME) - .hasArg() - .desc("100000 (optional)") - .build(); - options.addOption(opBatchPointSize); - - Option opTimestampPrecision = - Option.builder(TIMESTAMP_PRECISION_ARGS) - .argName(TIMESTAMP_PRECISION_NAME) - .hasArg() - .desc("Timestamp precision (ms/us/ns)") - .build(); - - options.addOption(opTimestampPrecision); - - Option opTypeInfer = - Option.builder(TYPE_INFER_ARGS) - .argName(TYPE_INFER_ARGS_NAME) - .numberOfArgs(5) - .hasArgs() - .valueSeparator(',') - .desc("Define type info by option:\"boolean=text,int=long, ...") - .build(); - options.addOption(opTypeInfer); - - Option opFailedLinesPerFile = - Option.builder(LINES_PER_FAILED_FILE_ARGS) - .argName(LINES_PER_FAILED_FILE_ARGS_NAME) - .hasArgs() - .desc("Lines per failed file") - .build(); - options.addOption(opFailedLinesPerFile); - - return options; - } - - /** - * parse optional params - * - * @param commandLine - */ - private static void parseSpecialParams(CommandLine commandLine) throws ArgsErrorException { - timeZoneID = commandLine.getOptionValue(TIME_ZONE_ARGS); - targetPath = commandLine.getOptionValue(FILE_ARGS); - if (commandLine.getOptionValue(BATCH_POINT_SIZE_ARGS) != null) { - batchPointSize = Integer.parseInt(commandLine.getOptionValue(BATCH_POINT_SIZE_ARGS)); - } - if (commandLine.getOptionValue(FAILED_FILE_ARGS) != null) { - failedFileDirectory = commandLine.getOptionValue(FAILED_FILE_ARGS); - File file = new File(failedFileDirectory); - if (!file.isDirectory()) { - file.mkdir(); - failedFileDirectory = file.getAbsolutePath() + File.separator; - } else if (!failedFileDirectory.endsWith("/") && !failedFileDirectory.endsWith("\\")) { - failedFileDirectory += File.separator; - } - } - if (commandLine.getOptionValue(ALIGNED_ARGS) != null) { - aligned = Boolean.valueOf(commandLine.getOptionValue(ALIGNED_ARGS)); - } - - if (commandLine.getOptionValue(TIMESTAMP_PRECISION_ARGS) != null) { - timestampPrecision = commandLine.getOptionValue(TIMESTAMP_PRECISION_ARGS); - } - final String[] opTypeInferValues = commandLine.getOptionValues(TYPE_INFER_ARGS); - if (opTypeInferValues != null && opTypeInferValues.length > 0) { - for (String opTypeInferValue : opTypeInferValues) { - if (opTypeInferValue.contains("=")) { - final String[] typeInfoExpressionArr = opTypeInferValue.split("="); - final String key = typeInfoExpressionArr[0]; - final String value = typeInfoExpressionArr[1]; - applyTypeInferArgs(key, value); - } - } - } - if (commandLine.getOptionValue(LINES_PER_FAILED_FILE_ARGS) != null) { - linesPerFailedFile = Integer.parseInt(commandLine.getOptionValue(LINES_PER_FAILED_FILE_ARGS)); - } - } - - private static void applyTypeInferArgs(String key, String value) throws ArgsErrorException { - if (!TYPE_INFER_KEY_DICT.containsKey(key)) { - throw new ArgsErrorException("Unknown type infer key: " + key); - } - if (!TYPE_INFER_VALUE_DICT.containsKey(value)) { - throw new ArgsErrorException("Unknown type infer value: " + value); - } - if (key.equals(DATATYPE_NAN) - && !(value.equals(DATATYPE_FLOAT) - || value.equals(DATATYPE_DOUBLE) - || value.equals(DATATYPE_TEXT))) { - throw new ArgsErrorException("NaN can not convert to " + value); - } - if (key.equals(DATATYPE_BOOLEAN) - && !(value.equals(DATATYPE_BOOLEAN) || value.equals(DATATYPE_TEXT))) { - throw new ArgsErrorException("Boolean can not convert to " + value); - } - final TSDataType srcType = TYPE_INFER_VALUE_DICT.get(key); - final TSDataType dstType = TYPE_INFER_VALUE_DICT.get(value); - if (dstType.getType() < srcType.getType()) { - throw new ArgsErrorException(key + " can not convert to " + value); - } - TYPE_INFER_KEY_DICT.put(key, TYPE_INFER_VALUE_DICT.get(value)); - } - - public static void main(String[] args) throws IoTDBConnectionException { - Options options = createOptions(); - HelpFormatter hf = new HelpFormatter(); - hf.setOptionComparator(null); - hf.setWidth(MAX_HELP_CONSOLE_WIDTH); - CommandLine commandLine = null; - CommandLineParser parser = new DefaultParser(); - - if (args == null || args.length == 0) { - ioTPrinter.println("Too few params input, please check the following hint."); - hf.printHelp(TSFILEDB_CLI_PREFIX, options, true); - System.exit(CODE_ERROR); - } - try { - commandLine = parser.parse(options, args); - } catch (org.apache.commons.cli.ParseException e) { - ioTPrinter.println("Parse error: " + e.getMessage()); - hf.printHelp(TSFILEDB_CLI_PREFIX, options, true); - System.exit(CODE_ERROR); - } - if (commandLine.hasOption(HELP_ARGS)) { - hf.printHelp(TSFILEDB_CLI_PREFIX, options, true); - System.exit(CODE_ERROR); - } - - try { - parseBasicParams(commandLine); - String filename = commandLine.getOptionValue(FILE_ARGS); - if (filename == null) { - hf.printHelp(TSFILEDB_CLI_PREFIX, options, true); - System.exit(CODE_ERROR); - } - parseSpecialParams(commandLine); - } catch (ArgsErrorException e) { - ioTPrinter.println("Args error: " + e.getMessage()); - System.exit(CODE_ERROR); - } catch (Exception e) { - ioTPrinter.println("Encounter an error, because: " + e.getMessage()); - System.exit(CODE_ERROR); - } - - System.exit( - importFromTargetPath( - host, Integer.parseInt(port), username, password, targetPath, timeZoneID)); - } - - /** - * Specifying a CSV file or a directory including CSV files that you want to import. This method - * can be offered to console cli to implement importing CSV file by command. - * - * @param host - * @param port - * @param username - * @param password - * @param targetPath a CSV file or a directory including CSV files - * @param timeZone - * @return the status code - * @throws IoTDBConnectionException - */ - @SuppressWarnings({"squid:S2093"}) // ignore try-with-resources - public static int importFromTargetPath( - String host, int port, String username, String password, String targetPath, String timeZone) - throws IoTDBConnectionException { - try { - session = new Session(host, port, username, password, false); - session.open(false); - timeZoneID = timeZone; - setTimeZone(); - - File file = new File(targetPath); - if (file.isFile()) { - if (file.getName().endsWith(SQL_SUFFIXS)) { - importFromSqlFile(file); - } else { - importFromSingleFile(file); - } - } else if (file.isDirectory()) { - File[] files = file.listFiles(); - if (files == null) { - return CODE_OK; - } - - for (File subFile : files) { - if (subFile.isFile()) { - if (subFile.getName().endsWith(SQL_SUFFIXS)) { - importFromSqlFile(subFile); - } else { - importFromSingleFile(subFile); - } - } - } - } else { - ioTPrinter.println("File not found!"); - return CODE_ERROR; - } - } catch (IoTDBConnectionException | StatementExecutionException e) { - ioTPrinter.println("Encounter an error when connecting to server, because " + e.getMessage()); - return CODE_ERROR; - } finally { - if (session != null) { - session.close(); - } - } - return CODE_OK; - } - - /** - * import the CSV file and load headers and records. - * - * @param file the File object of the CSV file that you want to import. - */ - private static void importFromSingleFile(File file) { - if (file.getName().endsWith(CSV_SUFFIXS) || file.getName().endsWith(TXT_SUFFIXS)) { - try { - CSVParser csvRecords = readCsvFile(file.getAbsolutePath()); - List headerNames = csvRecords.getHeaderNames(); - Stream records = csvRecords.stream(); - if (headerNames.isEmpty()) { - ioTPrinter.println("Empty file!"); - return; - } - if (!timeColumn.equalsIgnoreCase(filterBomHeader(headerNames.get(0)))) { - ioTPrinter.println("The first field of header must be `Time`!"); - return; - } - String failedFilePath = null; - if (failedFileDirectory == null) { - failedFilePath = file.getAbsolutePath() + ".failed"; - } else { - failedFilePath = failedFileDirectory + file.getName() + ".failed"; - } - if (!deviceColumn.equalsIgnoreCase(headerNames.get(1))) { - writeDataAlignedByTime(headerNames, records, failedFilePath); - } else { - writeDataAlignedByDevice(headerNames, records, failedFilePath); - } - } catch (IOException | IllegalPathException e) { - ioTPrinter.println("CSV file read exception because: " + e.getMessage()); - } - } else { - ioTPrinter.println("The file name must end with \"csv\" or \"txt\"!"); - } - } - - @SuppressWarnings("java:S2259") - private static void importFromSqlFile(File file) { - ArrayList> failedRecords = new ArrayList<>(); - String failedFilePath = null; - if (failedFileDirectory == null) { - failedFilePath = file.getAbsolutePath() + ".failed"; - } else { - failedFilePath = failedFileDirectory + file.getName() + ".failed"; - } - try (BufferedReader br = new BufferedReader(new FileReader(file.getAbsolutePath()))) { - String sql; - while ((sql = br.readLine()) != null) { - try { - session.executeNonQueryStatement(sql); - } catch (IoTDBConnectionException | StatementExecutionException e) { - failedRecords.add(Arrays.asList(sql)); - } - } - ioTPrinter.println(file.getName() + " Import completely!"); - } catch (IOException e) { - ioTPrinter.println("SQL file read exception because: " + e.getMessage()); - } - if (!failedRecords.isEmpty()) { - FileWriter writer = null; - try { - writer = new FileWriter(failedFilePath); - for (List failedRecord : failedRecords) { - writer.write(failedRecord.get(0).toString() + "\n"); - } - } catch (IOException e) { - ioTPrinter.println("Cannot dump fail result because: " + e.getMessage()); - } finally { - if (ObjectUtils.isNotEmpty(writer)) { - try { - writer.flush(); - writer.close(); - } catch (IOException e) { - } - } - } - } - } - - /** - * if the data is aligned by time, the data will be written by this method. - * - * @param headerNames the header names of CSV file - * @param records the records of CSV file - * @param failedFilePath the directory to save the failed files - */ - @SuppressWarnings("squid:S3776") - private static void writeDataAlignedByTime( - List headerNames, Stream records, String failedFilePath) - throws IllegalPathException { - HashMap> deviceAndMeasurementNames = new HashMap<>(); - HashMap headerTypeMap = new HashMap<>(); - HashMap headerNameMap = new HashMap<>(); - parseHeaders(headerNames, deviceAndMeasurementNames, headerTypeMap, headerNameMap); - - Set devices = deviceAndMeasurementNames.keySet(); - if (headerTypeMap.isEmpty()) { - queryType(devices, headerTypeMap, "Time"); - } - - List deviceIds = new ArrayList<>(); - List times = new ArrayList<>(); - List> measurementsList = new ArrayList<>(); - List> typesList = new ArrayList<>(); - List> valuesList = new ArrayList<>(); - - AtomicReference hasStarted = new AtomicReference<>(false); - AtomicInteger pointSize = new AtomicInteger(0); - - ArrayList> failedRecords = new ArrayList<>(); - - records.forEach( - recordObj -> { - if (Boolean.FALSE.equals(hasStarted.get())) { - hasStarted.set(true); - } else if (pointSize.get() >= batchPointSize) { - writeAndEmptyDataSet(deviceIds, times, typesList, valuesList, measurementsList, 3); - pointSize.set(0); - } - - boolean isFail = false; - - for (Map.Entry> entry : deviceAndMeasurementNames.entrySet()) { - String deviceId = entry.getKey(); - List measurementNames = entry.getValue(); - ArrayList types = new ArrayList<>(); - ArrayList values = new ArrayList<>(); - ArrayList measurements = new ArrayList<>(); - for (String measurement : measurementNames) { - String header = deviceId + "." + measurement; - String value = recordObj.get(headerNameMap.get(header)); - if (!"".equals(value)) { - TSDataType type; - if (!headerTypeMap.containsKey(header)) { - type = typeInfer(value); - if (type != null) { - headerTypeMap.put(header, type); - } else { - ioTPrinter.printf( - "Line '%s', column '%s': '%s' unknown type%n", - recordObj.getRecordNumber(), header, value); - isFail = true; - } - } - type = headerTypeMap.get(header); - if (type != null) { - Object valueTrans = typeTrans(value, type); - if (valueTrans == null) { - isFail = true; - ioTPrinter.printf( - "Line '%s', column '%s': '%s' can't convert to '%s'%n", - recordObj.getRecordNumber(), header, value, type); - } else { - measurements.add(header.replace(deviceId + '.', "")); - types.add(type); - values.add(valueTrans); - pointSize.getAndIncrement(); - } - } - } - } - if (!measurements.isEmpty()) { - times.add(parseTimestamp(recordObj.get(timeColumn))); - deviceIds.add(deviceId); - typesList.add(types); - valuesList.add(values); - measurementsList.add(measurements); - } - } - if (isFail) { - failedRecords.add(recordObj.stream().collect(Collectors.toList())); - } - }); - if (!deviceIds.isEmpty()) { - writeAndEmptyDataSet(deviceIds, times, typesList, valuesList, measurementsList, 3); - pointSize.set(0); - } - - if (!failedRecords.isEmpty()) { - writeFailedLinesFile(headerNames, failedFilePath, failedRecords); - } - if (Boolean.TRUE.equals(hasStarted.get())) { - ioTPrinter.println("Import completely!"); - } else { - ioTPrinter.println("No records!"); - } - } - - /** - * if the data is aligned by device, the data will be written by this method. - * - * @param headerNames the header names of CSV file - * @param records the records of CSV file - * @param failedFilePath the directory to save the failed files - */ - @SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity warning - private static void writeDataAlignedByDevice( - List headerNames, Stream records, String failedFilePath) - throws IllegalPathException { - HashMap headerTypeMap = new HashMap<>(); - HashMap headerNameMap = new HashMap<>(); - parseHeaders(headerNames, null, headerTypeMap, headerNameMap); - - AtomicReference deviceName = new AtomicReference<>(null); - - HashSet typeQueriedDevice = new HashSet<>(); - - // the data that interface need - List times = new ArrayList<>(); - List> typesList = new ArrayList<>(); - List> valuesList = new ArrayList<>(); - List> measurementsList = new ArrayList<>(); - - AtomicInteger pointSize = new AtomicInteger(0); - - ArrayList> failedRecords = new ArrayList<>(); - - records.forEach( - recordObj -> { - // only run in first record - if (deviceName.get() == null) { - deviceName.set(recordObj.get(1)); - } else if (!Objects.equals(deviceName.get(), recordObj.get(1))) { - // if device changed - writeAndEmptyDataSet( - deviceName.get(), times, typesList, valuesList, measurementsList, 3); - deviceName.set(recordObj.get(1)); - pointSize.set(0); - } else if (pointSize.get() >= batchPointSize) { - // insert a batch - writeAndEmptyDataSet( - deviceName.get(), times, typesList, valuesList, measurementsList, 3); - pointSize.set(0); - } - - // the data of the record - ArrayList types = new ArrayList<>(); - ArrayList values = new ArrayList<>(); - ArrayList measurements = new ArrayList<>(); - - AtomicReference isFail = new AtomicReference<>(false); - - // read data from record - for (Map.Entry headerNameEntry : headerNameMap.entrySet()) { - // headerNameWithoutType is equal to headerName if the CSV column do not have data type. - String headerNameWithoutType = headerNameEntry.getKey(); - String headerName = headerNameEntry.getValue(); - String value = recordObj.get(headerName); - if (!"".equals(value)) { - TSDataType type; - // Get the data type directly if the CSV column have data type. - if (!headerTypeMap.containsKey(headerNameWithoutType)) { - boolean hasResult = false; - // query the data type in iotdb - if (!typeQueriedDevice.contains(deviceName.get())) { - if (headerTypeMap.isEmpty()) { - Set devices = new HashSet<>(); - devices.add(deviceName.get()); - queryType(devices, headerTypeMap, deviceColumn); - } - typeQueriedDevice.add(deviceName.get()); - } - type = typeInfer(value); - if (type != null) { - headerTypeMap.put(headerNameWithoutType, type); - } else { - ioTPrinter.printf( - "Line '%s', column '%s': '%s' unknown type%n", - recordObj.getRecordNumber(), headerNameWithoutType, value); - isFail.set(true); - } - } - type = headerTypeMap.get(headerNameWithoutType); - if (type != null) { - Object valueTrans = typeTrans(value, type); - if (valueTrans == null) { - isFail.set(true); - ioTPrinter.printf( - "Line '%s', column '%s': '%s' can't convert to '%s'%n", - recordObj.getRecordNumber(), headerNameWithoutType, value, type); - } else { - values.add(valueTrans); - measurements.add(headerNameWithoutType); - types.add(type); - pointSize.getAndIncrement(); - } - } - } - } - if (Boolean.TRUE.equals(isFail.get())) { - failedRecords.add(recordObj.stream().collect(Collectors.toList())); - } - if (!measurements.isEmpty()) { - times.add(parseTimestamp(recordObj.get(timeColumn))); - typesList.add(types); - valuesList.add(values); - measurementsList.add(measurements); - } - }); - if (!times.isEmpty()) { - writeAndEmptyDataSet(deviceName.get(), times, typesList, valuesList, measurementsList, 3); - pointSize.set(0); - } - if (!failedRecords.isEmpty()) { - writeFailedLinesFile(headerNames, failedFilePath, failedRecords); - } - ioTPrinter.println("Import completely!"); - } - - private static void writeFailedLinesFile( - List headerNames, String failedFilePath, ArrayList> failedRecords) { - int fileIndex = 0; - int from = 0; - int failedRecordsSize = failedRecords.size(); - int restFailedRecords = failedRecordsSize; - while (from < failedRecordsSize) { - int step = Math.min(restFailedRecords, linesPerFailedFile); - writeCsvFile( - headerNames, - failedRecords.subList(from, from + step), - failedFilePath + "_" + fileIndex++); - from += step; - restFailedRecords -= step; - } - } - - private static void writeAndEmptyDataSet( - String device, - List times, - List> typesList, - List> valuesList, - List> measurementsList, - int retryTime) { - try { - if (Boolean.FALSE.equals(aligned)) { - session.insertRecordsOfOneDevice(device, times, measurementsList, typesList, valuesList); - } else { - session.insertAlignedRecordsOfOneDevice( - device, times, measurementsList, typesList, valuesList); - } - } catch (IoTDBConnectionException e) { - if (retryTime > 0) { - try { - session.open(); - } catch (IoTDBConnectionException ex) { - ioTPrinter.println(INSERT_CSV_MEET_ERROR_MSG + e.getMessage()); - } - writeAndEmptyDataSet(device, times, typesList, valuesList, measurementsList, --retryTime); - } - } catch (StatementExecutionException e) { - ioTPrinter.println(INSERT_CSV_MEET_ERROR_MSG + e.getMessage()); - try { - session.close(); - } catch (IoTDBConnectionException ex) { - // do nothing - } - System.exit(1); - } finally { - times.clear(); - typesList.clear(); - valuesList.clear(); - measurementsList.clear(); - } - } - - private static void writeAndEmptyDataSet( - List deviceIds, - List times, - List> typesList, - List> valuesList, - List> measurementsList, - int retryTime) { - try { - if (Boolean.FALSE.equals(aligned)) { - session.insertRecords(deviceIds, times, measurementsList, typesList, valuesList); - } else { - session.insertAlignedRecords(deviceIds, times, measurementsList, typesList, valuesList); - } - } catch (IoTDBConnectionException e) { - if (retryTime > 0) { - try { - session.open(); - } catch (IoTDBConnectionException ex) { - ioTPrinter.println(INSERT_CSV_MEET_ERROR_MSG + e.getMessage()); - } - writeAndEmptyDataSet( - deviceIds, times, typesList, valuesList, measurementsList, --retryTime); - } - } catch (StatementExecutionException e) { - ioTPrinter.println(INSERT_CSV_MEET_ERROR_MSG + e.getMessage()); - try { - session.close(); - } catch (IoTDBConnectionException ex) { - // do nothing - } - System.exit(1); - } finally { - deviceIds.clear(); - times.clear(); - typesList.clear(); - valuesList.clear(); - measurementsList.clear(); - } - } - - /** - * read data from the CSV file - * - * @param path - * @return CSVParser csv parser - * @throws IOException when reading the csv file failed. - */ - private static CSVParser readCsvFile(String path) throws IOException { - return CSVFormat.Builder.create(CSVFormat.DEFAULT) - .setHeader() - .setSkipHeaderRecord(true) - .setQuote('`') - .setEscape('\\') - .setIgnoreEmptyLines(true) - .build() - .parse(new InputStreamReader(new FileInputStream(path))); - } - - /** - * parse deviceNames, measurementNames(aligned by time), headerType from headers - * - * @param headerNames - * @param deviceAndMeasurementNames - * @param headerTypeMap - * @param headerNameMap - */ - @SuppressWarnings( - "squid:S135") // ignore for loops should not contain more than a single "break" or "continue" - // statement - private static void parseHeaders( - List headerNames, - @Nullable HashMap> deviceAndMeasurementNames, - HashMap headerTypeMap, - HashMap headerNameMap) - throws IllegalPathException { - String regex = "(?<=\\()\\S+(?=\\))"; - Pattern pattern = Pattern.compile(regex); - for (String headerName : headerNames) { - if ("Time".equalsIgnoreCase(filterBomHeader(headerName))) { - timeColumn = headerName; - continue; - } else if ("Device".equalsIgnoreCase(headerName)) { - deviceColumn = headerName; - continue; - } - Matcher matcher = pattern.matcher(headerName); - String type; - String headerNameWithoutType; - if (matcher.find()) { - type = matcher.group(); - headerNameWithoutType = headerName.replace("(" + type + ")", "").replaceAll("\\s+", ""); - headerNameMap.put(headerNameWithoutType, headerName); - headerTypeMap.put(headerNameWithoutType, getType(type)); - } else { - headerNameWithoutType = headerName; - headerNameMap.put(headerName, headerName); - } - String[] split = PathUtils.splitPathToDetachedNodes(headerNameWithoutType); - String measurementName = split[split.length - 1]; - String deviceName = StringUtils.join(Arrays.copyOfRange(split, 0, split.length - 1), '.'); - if (deviceAndMeasurementNames != null) { - deviceAndMeasurementNames.putIfAbsent(deviceName, new ArrayList<>()); - deviceAndMeasurementNames.get(deviceName).add(measurementName); - } - } - } - - /** - * query data type of timeseries from IoTDB - * - * @param deviceNames - * @param headerTypeMap - * @param alignedType - * @throws IoTDBConnectionException - * @throws StatementExecutionException - */ - private static void queryType( - Set deviceNames, HashMap headerTypeMap, String alignedType) { - for (String deviceName : deviceNames) { - String sql = "show timeseries " + deviceName + ".*"; - SessionDataSet sessionDataSet = null; - try { - sessionDataSet = session.executeQueryStatement(sql); - int tsIndex = sessionDataSet.getColumnNames().indexOf(ColumnHeaderConstant.TIMESERIES); - int dtIndex = sessionDataSet.getColumnNames().indexOf(ColumnHeaderConstant.DATATYPE); - while (sessionDataSet.hasNext()) { - RowRecord rowRecord = sessionDataSet.next(); - List fields = rowRecord.getFields(); - String timeseries = fields.get(tsIndex).getStringValue(); - String dataType = fields.get(dtIndex).getStringValue(); - if (Objects.equals(alignedType, "Time")) { - headerTypeMap.put(timeseries, getType(dataType)); - } else if (Objects.equals(alignedType, deviceColumn)) { - String[] split = PathUtils.splitPathToDetachedNodes(timeseries); - String measurement = split[split.length - 1]; - headerTypeMap.put(measurement, getType(dataType)); - } - } - } catch (StatementExecutionException | IllegalPathException | IoTDBConnectionException e) { - ioTPrinter.println( - "Meet error when query the type of timeseries because " + e.getMessage()); - try { - session.close(); - } catch (IoTDBConnectionException ex) { - // do nothing - } - System.exit(1); - } - } - } - - /** - * return the TSDataType - * - * @param typeStr - * @return - */ - private static TSDataType getType(String typeStr) { - try { - return TSDataType.valueOf(typeStr); - } catch (Exception e) { - return null; - } - } - - /** - * if data type of timeseries is not defined in headers of schema, this method will be called to - * do type inference - * - * @param strValue - * @return - */ - private static TSDataType typeInfer(String strValue) { - if (strValue.contains("\"")) { - return strValue.length() <= 512 + 2 ? STRING : TEXT; - } - if (isBoolean(strValue)) { - return TYPE_INFER_KEY_DICT.get(DATATYPE_BOOLEAN); - } else if (isNumber(strValue)) { - if (!strValue.contains(TsFileConstant.PATH_SEPARATOR)) { - if (isConvertFloatPrecisionLack(StringUtils.trim(strValue))) { - return TYPE_INFER_KEY_DICT.get(DATATYPE_LONG); - } - return TYPE_INFER_KEY_DICT.get(DATATYPE_INT); - } else { - return TYPE_INFER_KEY_DICT.get(DATATYPE_FLOAT); - } - } else if (DATATYPE_NULL.equals(strValue) || DATATYPE_NULL.toUpperCase().equals(strValue)) { - return null; - // "NaN" is returned if the NaN Literal is given in Parser - } else if (DATATYPE_NAN.equals(strValue)) { - return TYPE_INFER_KEY_DICT.get(DATATYPE_NAN); - } else if (strValue.length() <= 512) { - return STRING; - } else { - return TEXT; - } - } - - static boolean isNumber(String s) { - if (s == null || s.equals(DATATYPE_NAN)) { - return false; - } - try { - Double.parseDouble(s); - } catch (NumberFormatException e) { - return false; - } - return true; - } - - private static boolean isBoolean(String s) { - return s.equalsIgnoreCase(SqlConstant.BOOLEAN_TRUE) - || s.equalsIgnoreCase(SqlConstant.BOOLEAN_FALSE); - } - - private static boolean isConvertFloatPrecisionLack(String s) { - return Long.parseLong(s) > (2 << 24); - } - - /** - * @param value - * @param type - * @return - */ - private static Object typeTrans(String value, TSDataType type) { - try { - switch (type) { - case TEXT: - case STRING: - if (value.startsWith("\"") && value.endsWith("\"")) { - return value.substring(1, value.length() - 1); - } - return value; - case BOOLEAN: - if (!"true".equalsIgnoreCase(value) && !"false".equalsIgnoreCase(value)) { - return null; - } - return Boolean.parseBoolean(value); - case INT32: - return Integer.parseInt(value); - case INT64: - return Long.parseLong(value); - case FLOAT: - return Float.parseFloat(value); - case DOUBLE: - return Double.parseDouble(value); - default: - return null; - } - } catch (NumberFormatException e) { - return null; - } - } - - private static long parseTimestamp(String str) { - long timestamp; - try { - timestamp = Long.parseLong(str); - } catch (NumberFormatException e) { - timestamp = DateTimeUtils.convertDatetimeStrToLong(str, zoneId, timestampPrecision); - } - return timestamp; - } - - private static String filterBomHeader(String s) { - byte[] bom = {(byte) 0xEF, (byte) 0xBB, (byte) 0xBF}; - byte[] bytes = Arrays.copyOf(s.getBytes(), 3); - if (Arrays.equals(bom, bytes)) { - return s.substring(1); - } - return s; - } -} diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/IoTDBDataBackTool.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/backup/IoTDBDataBackTool.java similarity index 99% rename from iotdb-client/cli/src/main/java/org/apache/iotdb/tool/IoTDBDataBackTool.java rename to iotdb-client/cli/src/main/java/org/apache/iotdb/tool/backup/IoTDBDataBackTool.java index 7cc3556562b9d..62f27acbf622e 100644 --- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/IoTDBDataBackTool.java +++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/backup/IoTDBDataBackTool.java @@ -17,11 +17,12 @@ * under the License. */ -package org.apache.iotdb.tool; +package org.apache.iotdb.tool.backup; import org.apache.iotdb.commons.conf.CommonConfig; import org.apache.iotdb.commons.conf.IoTDBConstant; import org.apache.iotdb.db.conf.IoTDBDescriptor; +import org.apache.iotdb.tool.data.AbstractDataTool; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/data/AbstractDataTool.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/data/AbstractDataTool.java new file mode 100644 index 0000000000000..11f4ed56effcb --- /dev/null +++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/data/AbstractDataTool.java @@ -0,0 +1,1455 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.tool.data; + +import org.apache.iotdb.cli.utils.IoTPrinter; +import org.apache.iotdb.commons.exception.IllegalPathException; +import org.apache.iotdb.commons.utils.PathUtils; +import org.apache.iotdb.db.queryengine.common.header.ColumnHeaderConstant; +import org.apache.iotdb.db.utils.DateTimeUtils; +import org.apache.iotdb.db.utils.constant.SqlConstant; +import org.apache.iotdb.exception.ArgsErrorException; +import org.apache.iotdb.isession.SessionDataSet; +import org.apache.iotdb.isession.pool.SessionDataSetWrapper; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.Session; +import org.apache.iotdb.session.pool.SessionPool; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Option; +import org.apache.commons.cli.Options; +import org.apache.commons.csv.CSVFormat; +import org.apache.commons.csv.CSVParser; +import org.apache.commons.csv.CSVPrinter; +import org.apache.commons.csv.CSVRecord; +import org.apache.commons.csv.QuoteMode; +import org.apache.commons.lang3.ObjectUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.thrift.annotation.Nullable; +import org.apache.tsfile.common.constant.TsFileConstant; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.read.common.Field; +import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.utils.Binary; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.PrintWriter; +import java.time.LocalDate; +import java.time.ZoneId; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.atomic.LongAdder; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.apache.tsfile.enums.TSDataType.STRING; +import static org.apache.tsfile.enums.TSDataType.TEXT; + +public abstract class AbstractDataTool { + + protected static final String FILE_TYPE_ARGS = "ft"; + protected static final String FILE_TYPE_NAME = "file_type"; + protected static final String FILE_TYPE_ARGS_NAME = "format"; + + protected static final String HOST_ARGS = "h"; + protected static final String HOST_NAME = "host"; + protected static final String HOST_DEFAULT_VALUE = "127.0.0.1"; + + protected static final String HELP_ARGS = "help"; + + protected static final String PORT_ARGS = "p"; + protected static final String PORT_NAME = "port"; + protected static final String PORT_DEFAULT_VALUE = "6667"; + + protected static final String PW_ARGS = "pw"; + protected static final String PW_NAME = "password"; + protected static final String PW_DEFAULT_VALUE = "root"; + + protected static final String USERNAME_ARGS = "u"; + protected static final String USERNAME_NAME = "username"; + protected static final String USERNAME_DEFAULT_VALUE = "root"; + + protected static final String TIME_FORMAT_ARGS = "tf"; + protected static final String TIME_FORMAT_NAME = "time_format"; + + protected static final String TIME_ZONE_ARGS = "tz"; + protected static final String TIME_ZONE_NAME = "timezone"; + protected static final String TIMEOUT_ARGS = "timeout"; + protected static final String TIMEOUT_NAME = "query_timeout"; + protected static final int MAX_HELP_CONSOLE_WIDTH = 92; + protected static final String[] TIME_FORMAT = + new String[] {"default", "long", "number", "timestamp"}; + protected static final String[] STRING_TIME_FORMAT = + new String[] { + "yyyy-MM-dd HH:mm:ss.SSSX", + "yyyy/MM/dd HH:mm:ss.SSSX", + "yyyy.MM.dd HH:mm:ss.SSSX", + "yyyy-MM-dd HH:mm:ssX", + "yyyy/MM/dd HH:mm:ssX", + "yyyy.MM.dd HH:mm:ssX", + "yyyy-MM-dd HH:mm:ss.SSSz", + "yyyy/MM/dd HH:mm:ss.SSSz", + "yyyy.MM.dd HH:mm:ss.SSSz", + "yyyy-MM-dd HH:mm:ssz", + "yyyy/MM/dd HH:mm:ssz", + "yyyy.MM.dd HH:mm:ssz", + "yyyy-MM-dd HH:mm:ss.SSS", + "yyyy/MM/dd HH:mm:ss.SSS", + "yyyy.MM.dd HH:mm:ss.SSS", + "yyyy-MM-dd HH:mm:ss", + "yyyy/MM/dd HH:mm:ss", + "yyyy.MM.dd HH:mm:ss", + "yyyy-MM-dd'T'HH:mm:ss.SSSX", + "yyyy/MM/dd'T'HH:mm:ss.SSSX", + "yyyy.MM.dd'T'HH:mm:ss.SSSX", + "yyyy-MM-dd'T'HH:mm:ssX", + "yyyy/MM/dd'T'HH:mm:ssX", + "yyyy.MM.dd'T'HH:mm:ssX", + "yyyy-MM-dd'T'HH:mm:ss.SSSz", + "yyyy/MM/dd'T'HH:mm:ss.SSSz", + "yyyy.MM.dd'T'HH:mm:ss.SSSz", + "yyyy-MM-dd'T'HH:mm:ssz", + "yyyy/MM/dd'T'HH:mm:ssz", + "yyyy.MM.dd'T'HH:mm:ssz", + "yyyy-MM-dd'T'HH:mm:ss.SSS", + "yyyy/MM/dd'T'HH:mm:ss.SSS", + "yyyy.MM.dd'T'HH:mm:ss.SSS", + "yyyy-MM-dd'T'HH:mm:ss", + "yyyy/MM/dd'T'HH:mm:ss", + "yyyy.MM.dd'T'HH:mm:ss" + }; + protected static final String INSERT_CSV_MEET_ERROR_MSG = "Meet error when insert csv because "; + protected static final String CSV_SUFFIXS = "csv"; + protected static final String TXT_SUFFIXS = "txt"; + protected static final String SQL_SUFFIXS = "sql"; + protected static final String TSFILE_SUFFIXS = "tsfile"; + protected static final String TSFILEDB_CLI_DIVIDE = "-------------------"; + protected static final String COLON = ": "; + protected static final String MINUS = "-"; + protected static String failedFileDirectory = null; + protected static String timeColumn = "Time"; + protected static String deviceColumn = "Device"; + protected static int linesPerFailedFile = 10000; + protected static String timestampPrecision = "ms"; + protected static final int CODE_OK = 0; + protected static final int CODE_ERROR = 1; + + protected static String host; + protected static String port; + protected static String username; + protected static String password; + protected static ZoneId zoneId = ZoneId.systemDefault(); + protected static String timeZoneID; + protected static String timeFormat; + protected static String exportType; + protected static Boolean aligned; + protected static Session session; + protected static final LongAdder loadFileSuccessfulNum = new LongAdder(); + + protected static final String DATATYPE_BOOLEAN = "boolean"; + protected static final String DATATYPE_INT = "int"; + protected static final String DATATYPE_LONG = "long"; + protected static final String DATATYPE_FLOAT = "float"; + protected static final String DATATYPE_DOUBLE = "double"; + protected static final String DATATYPE_TIMESTAMP = "timestamp"; + protected static final String DATATYPE_DATE = "date"; + protected static final String DATATYPE_BLOB = "blob"; + protected static final String DATATYPE_NAN = "NaN"; + protected static final String DATATYPE_TEXT = "text"; + protected static final String DATATYPE_STRING = "string"; + + protected static final String DATATYPE_NULL = "null"; + protected static int batchPointSize = 100_000; + + protected static final Map TYPE_INFER_KEY_DICT = new HashMap<>(); + + static { + TYPE_INFER_KEY_DICT.put(DATATYPE_BOOLEAN, TSDataType.BOOLEAN); + TYPE_INFER_KEY_DICT.put(DATATYPE_INT, TSDataType.FLOAT); + TYPE_INFER_KEY_DICT.put(DATATYPE_LONG, TSDataType.DOUBLE); + TYPE_INFER_KEY_DICT.put(DATATYPE_FLOAT, TSDataType.FLOAT); + TYPE_INFER_KEY_DICT.put(DATATYPE_DOUBLE, TSDataType.DOUBLE); + TYPE_INFER_KEY_DICT.put(DATATYPE_TIMESTAMP, TSDataType.TIMESTAMP); + TYPE_INFER_KEY_DICT.put(DATATYPE_DATE, TSDataType.TIMESTAMP); + TYPE_INFER_KEY_DICT.put(DATATYPE_BLOB, TSDataType.BLOB); + TYPE_INFER_KEY_DICT.put(DATATYPE_NAN, TSDataType.DOUBLE); + TYPE_INFER_KEY_DICT.put(DATATYPE_STRING, TSDataType.STRING); + } + + protected static final Map TYPE_INFER_VALUE_DICT = new HashMap<>(); + + static { + TYPE_INFER_VALUE_DICT.put(DATATYPE_BOOLEAN, TSDataType.BOOLEAN); + TYPE_INFER_VALUE_DICT.put(DATATYPE_INT, TSDataType.INT32); + TYPE_INFER_VALUE_DICT.put(DATATYPE_LONG, TSDataType.INT64); + TYPE_INFER_VALUE_DICT.put(DATATYPE_FLOAT, TSDataType.FLOAT); + TYPE_INFER_VALUE_DICT.put(DATATYPE_DOUBLE, TSDataType.DOUBLE); + TYPE_INFER_VALUE_DICT.put(DATATYPE_TIMESTAMP, TSDataType.TIMESTAMP); + TYPE_INFER_VALUE_DICT.put(DATATYPE_DATE, TSDataType.DATE); + TYPE_INFER_VALUE_DICT.put(DATATYPE_BLOB, TSDataType.BLOB); + TYPE_INFER_VALUE_DICT.put(DATATYPE_TEXT, TSDataType.TEXT); + TYPE_INFER_VALUE_DICT.put(DATATYPE_STRING, TSDataType.STRING); + } + + private static final IoTPrinter ioTPrinter = new IoTPrinter(System.out); + private static final Logger LOGGER = LoggerFactory.getLogger(AbstractDataTool.class); + + protected AbstractDataTool() {} + + protected static String checkRequiredArg( + String arg, String name, CommandLine commandLine, String defaultValue) + throws ArgsErrorException { + String str = commandLine.getOptionValue(arg); + if (str == null) { + if (StringUtils.isNotBlank(defaultValue)) { + return defaultValue; + } + String msg = String.format("Required values for option '%s' not provided", name); + LOGGER.info(msg); + LOGGER.info("Use -help for more information"); + throw new ArgsErrorException(msg); + } + return str; + } + + protected static void setTimeZone() throws IoTDBConnectionException, StatementExecutionException { + if (timeZoneID != null) { + session.setTimeZone(timeZoneID); + } + zoneId = ZoneId.of(session.getTimeZone()); + } + + protected static void parseBasicParams(CommandLine commandLine) throws ArgsErrorException { + host = checkRequiredArg(HOST_ARGS, HOST_NAME, commandLine, HOST_DEFAULT_VALUE); + port = checkRequiredArg(PORT_ARGS, PORT_NAME, commandLine, PORT_DEFAULT_VALUE); + username = checkRequiredArg(USERNAME_ARGS, USERNAME_NAME, commandLine, USERNAME_DEFAULT_VALUE); + password = commandLine.getOptionValue(PW_ARGS, PW_DEFAULT_VALUE); + } + + protected static void printHelpOptions( + String cmdLineHead, + String cmdLineSyntax, + HelpFormatter hf, + Options tsFileOptions, + Options csvOptions, + Options sqlOptions, + boolean printFileType) { + ioTPrinter.println(TSFILEDB_CLI_DIVIDE + "\n" + cmdLineSyntax + "\n" + TSFILEDB_CLI_DIVIDE); + if (StringUtils.isNotBlank(cmdLineHead)) { + ioTPrinter.println(cmdLineHead); + } + final String usageName = cmdLineSyntax.replaceAll(" ", ""); + if (ObjectUtils.isNotEmpty(tsFileOptions)) { + if (printFileType) { + ioTPrinter.println( + '\n' + FILE_TYPE_NAME + COLON + TSFILE_SUFFIXS + '\n' + TSFILEDB_CLI_DIVIDE); + } + hf.printHelp(usageName, tsFileOptions, true); + } + if (ObjectUtils.isNotEmpty(csvOptions)) { + if (printFileType) { + ioTPrinter.println( + '\n' + FILE_TYPE_NAME + COLON + CSV_SUFFIXS + '\n' + TSFILEDB_CLI_DIVIDE); + } + hf.printHelp(usageName, csvOptions, true); + } + if (ObjectUtils.isNotEmpty(sqlOptions)) { + if (printFileType) { + ioTPrinter.println( + '\n' + FILE_TYPE_NAME + COLON + SQL_SUFFIXS + '\n' + TSFILEDB_CLI_DIVIDE); + } + hf.printHelp(usageName, sqlOptions, true); + } + } + + protected static boolean checkTimeFormat() { + for (String format : TIME_FORMAT) { + if (timeFormat.equals(format)) { + return true; + } + } + for (String format : STRING_TIME_FORMAT) { + if (timeFormat.equals(format)) { + return true; + } + } + LOGGER.info( + "Input time format {} is not supported, " + + "please input like yyyy-MM-dd\\ HH:mm:ss.SSS or yyyy-MM-dd'T'HH:mm:ss.SSS%n", + timeFormat); + return false; + } + + protected static Options createImportOptions() { + Options options = new Options(); + Option opFileType = + Option.builder(FILE_TYPE_ARGS) + .longOpt(FILE_TYPE_NAME) + .argName(FILE_TYPE_ARGS_NAME) + .required() + .hasArg() + .desc("Types of imported files: CSV, SQL, TSfile (required)") + .build(); + options.addOption(opFileType); + return createNewOptions(options); + } + + protected static Options createExportOptions() { + Options options = new Options(); + Option opFileType = + Option.builder(FILE_TYPE_ARGS) + .longOpt(FILE_TYPE_NAME) + .argName(FILE_TYPE_ARGS_NAME) + .required() + .hasArg() + .desc("Export file type ?You can choose tsfile)、csv) or sql) . (required)") + .build(); + options.addOption(opFileType); + return createNewOptions(options); + } + + protected static Options createNewOptions(Options options) { + Option opHost = + Option.builder(HOST_ARGS) + .longOpt(HOST_NAME) + .argName(HOST_NAME) + .hasArg() + .desc("Host Name (optional)") + .build(); + options.addOption(opHost); + + Option opPort = + Option.builder(PORT_ARGS) + .longOpt(PORT_NAME) + .argName(PORT_NAME) + .hasArg() + .desc("Port (optional)") + .build(); + options.addOption(opPort); + + Option opUsername = + Option.builder(USERNAME_ARGS) + .longOpt(USERNAME_NAME) + .argName(USERNAME_NAME) + .hasArg() + .desc("Username (optional)") + .build(); + options.addOption(opUsername); + + Option opPassword = + Option.builder(PW_ARGS) + .longOpt(PW_NAME) + .optionalArg(true) + .argName(PW_NAME) + .hasArg() + .desc("Password (optional)") + .build(); + options.addOption(opPassword); + return options; + } + + /** + * if the data is aligned by device, the data will be written by this method. + * + * @param headerNames the header names of CSV file + * @param records the records of CSV file + * @param failedFilePath the directory to save the failed files + */ + @SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity warning + protected static void writeDataAlignedByDevice( + SessionPool sessionPool, + List headerNames, + Stream records, + String failedFilePath) + throws IllegalPathException { + HashMap headerTypeMap = new HashMap<>(); + HashMap headerNameMap = new HashMap<>(); + parseHeaders(headerNames, null, headerTypeMap, headerNameMap); + + AtomicReference deviceName = new AtomicReference<>(null); + + HashSet typeQueriedDevice = new HashSet<>(); + + // the data that interface need + List times = new ArrayList<>(); + List> typesList = new ArrayList<>(); + List> valuesList = new ArrayList<>(); + List> measurementsList = new ArrayList<>(); + + AtomicInteger pointSize = new AtomicInteger(0); + + ArrayList> failedRecords = new ArrayList<>(); + + records.forEach( + recordObj -> { + // only run in first record + if (deviceName.get() == null) { + deviceName.set(recordObj.get(1)); + } else if (!Objects.equals(deviceName.get(), recordObj.get(1))) { + // if device changed + writeAndEmptyDataSet( + sessionPool, deviceName.get(), times, typesList, valuesList, measurementsList, 3); + deviceName.set(recordObj.get(1)); + pointSize.set(0); + } else if (pointSize.get() >= batchPointSize) { + // insert a batch + writeAndEmptyDataSet( + sessionPool, deviceName.get(), times, typesList, valuesList, measurementsList, 3); + pointSize.set(0); + } + + // the data of the record + ArrayList types = new ArrayList<>(); + ArrayList values = new ArrayList<>(); + ArrayList measurements = new ArrayList<>(); + + AtomicReference isFail = new AtomicReference<>(false); + + // read data from record + for (Map.Entry headerNameEntry : headerNameMap.entrySet()) { + // headerNameWithoutType is equal to headerName if the CSV column do not have data type. + String headerNameWithoutType = headerNameEntry.getKey(); + String headerName = headerNameEntry.getValue(); + String value = recordObj.get(headerName); + if (!"".equals(value)) { + TSDataType type; + // Get the data type directly if the CSV column have data type. + if (!headerTypeMap.containsKey(headerNameWithoutType)) { + boolean hasResult = false; + // query the data type in iotdb + if (!typeQueriedDevice.contains(deviceName.get())) { + if (headerTypeMap.isEmpty()) { + Set devices = new HashSet<>(); + devices.add(deviceName.get()); + queryType(sessionPool, devices, headerTypeMap, deviceColumn); + } + typeQueriedDevice.add(deviceName.get()); + } + type = typeInfer(value); + if (type != null) { + headerTypeMap.put(headerNameWithoutType, type); + } else { + ioTPrinter.printf( + "Line '%s', column '%s': '%s' unknown type%n", + recordObj.getRecordNumber(), headerNameWithoutType, value); + isFail.set(true); + } + } + type = headerTypeMap.get(headerNameWithoutType); + if (type != null) { + Object valueTrans = typeTrans(value, type); + if (valueTrans == null) { + isFail.set(true); + ioTPrinter.printf( + "Line '%s', column '%s': '%s' can't convert to '%s'%n", + recordObj.getRecordNumber(), headerNameWithoutType, value, type); + } else { + values.add(valueTrans); + measurements.add(headerNameWithoutType); + types.add(type); + pointSize.getAndIncrement(); + } + } + } + } + if (Boolean.TRUE.equals(isFail.get())) { + failedRecords.add(recordObj.stream().collect(Collectors.toList())); + } + if (!measurements.isEmpty()) { + times.add(parseTimestamp(recordObj.get(timeColumn))); + typesList.add(types); + valuesList.add(values); + measurementsList.add(measurements); + } + }); + if (!times.isEmpty()) { + writeAndEmptyDataSet( + sessionPool, deviceName.get(), times, typesList, valuesList, measurementsList, 3); + pointSize.set(0); + } + if (!failedRecords.isEmpty()) { + writeFailedLinesFile(headerNames, failedFilePath, failedRecords); + } + // ioTPrinter.println("Import completely!"); + } + + private static void writeAndEmptyDataSet( + SessionPool sessionPool, + String device, + List times, + List> typesList, + List> valuesList, + List> measurementsList, + int retryTime) { + try { + if (Boolean.FALSE.equals(aligned)) { + sessionPool.insertRecordsOfOneDevice( + device, times, measurementsList, typesList, valuesList); + } else { + sessionPool.insertAlignedRecordsOfOneDevice( + device, times, measurementsList, typesList, valuesList); + } + } catch (IoTDBConnectionException e) { + if (retryTime > 0) { + writeAndEmptyDataSet( + sessionPool, device, times, typesList, valuesList, measurementsList, --retryTime); + } + } catch (StatementExecutionException e) { + ioTPrinter.println(INSERT_CSV_MEET_ERROR_MSG + e.getMessage()); + System.exit(1); + } finally { + times.clear(); + typesList.clear(); + valuesList.clear(); + measurementsList.clear(); + } + } + + private static void writeAndEmptyDataSet( + Session session, + List deviceIds, + List times, + List> typesList, + List> valuesList, + List> measurementsList, + int retryTime) { + try { + if (Boolean.FALSE.equals(aligned)) { + session.insertRecords(deviceIds, times, measurementsList, typesList, valuesList); + } else { + session.insertAlignedRecords(deviceIds, times, measurementsList, typesList, valuesList); + } + } catch (IoTDBConnectionException e) { + if (retryTime > 0) { + try { + session.open(); + } catch (IoTDBConnectionException ex) { + ioTPrinter.println(INSERT_CSV_MEET_ERROR_MSG + e.getMessage()); + } + writeAndEmptyDataSet( + session, deviceIds, times, typesList, valuesList, measurementsList, --retryTime); + } + } catch (StatementExecutionException e) { + ioTPrinter.println(INSERT_CSV_MEET_ERROR_MSG + e.getMessage()); + try { + session.close(); + } catch (IoTDBConnectionException ex) { + // do nothing + } + System.exit(1); + } finally { + deviceIds.clear(); + times.clear(); + typesList.clear(); + valuesList.clear(); + measurementsList.clear(); + } + } + + private static void writeAndEmptyDataSet( + Session session, + String device, + List times, + List> typesList, + List> valuesList, + List> measurementsList, + int retryTime) { + try { + if (Boolean.FALSE.equals(aligned)) { + session.insertRecordsOfOneDevice(device, times, measurementsList, typesList, valuesList); + } else { + session.insertAlignedRecordsOfOneDevice( + device, times, measurementsList, typesList, valuesList); + } + } catch (IoTDBConnectionException e) { + if (retryTime > 0) { + try { + session.open(); + } catch (IoTDBConnectionException ex) { + ioTPrinter.println(INSERT_CSV_MEET_ERROR_MSG + e.getMessage()); + } + writeAndEmptyDataSet( + session, device, times, typesList, valuesList, measurementsList, --retryTime); + } + } catch (StatementExecutionException e) { + ioTPrinter.println(INSERT_CSV_MEET_ERROR_MSG + e.getMessage()); + try { + session.close(); + } catch (IoTDBConnectionException ex) { + // do nothing + } + System.exit(1); + } finally { + times.clear(); + typesList.clear(); + valuesList.clear(); + measurementsList.clear(); + } + } + + private static void writeAndEmptyDataSet( + SessionPool sessionPool, + List deviceIds, + List times, + List> typesList, + List> valuesList, + List> measurementsList, + int retryTime) { + try { + if (Boolean.FALSE.equals(aligned)) { + sessionPool.insertRecords(deviceIds, times, measurementsList, typesList, valuesList); + } else { + sessionPool.insertAlignedRecords(deviceIds, times, measurementsList, typesList, valuesList); + } + } catch (IoTDBConnectionException e) { + if (retryTime > 0) { + writeAndEmptyDataSet( + sessionPool, deviceIds, times, typesList, valuesList, measurementsList, --retryTime); + } + } catch (StatementExecutionException e) { + ioTPrinter.println(INSERT_CSV_MEET_ERROR_MSG + e.getMessage()); + System.exit(1); + } finally { + deviceIds.clear(); + times.clear(); + typesList.clear(); + valuesList.clear(); + measurementsList.clear(); + } + } + + private static void writeFailedLinesFile( + List headerNames, String failedFilePath, ArrayList> failedRecords) { + int fileIndex = 0; + int from = 0; + int failedRecordsSize = failedRecords.size(); + int restFailedRecords = failedRecordsSize; + while (from < failedRecordsSize) { + int step = Math.min(restFailedRecords, linesPerFailedFile); + writeCsvFile( + headerNames, + failedRecords.subList(from, from + step), + failedFilePath + "_" + fileIndex++); + from += step; + restFailedRecords -= step; + } + } + + /** + * if data type of timeseries is not defined in headers of schema, this method will be called to + * do type inference + * + * @param strValue + * @return + */ + private static TSDataType typeInfer(String strValue) { + if (strValue.contains("\"")) { + return strValue.length() <= 512 + 2 ? STRING : TEXT; + } + if (isBoolean(strValue)) { + return TYPE_INFER_KEY_DICT.get(DATATYPE_BOOLEAN); + } else if (isTimeStamp(strValue)) { + return TYPE_INFER_KEY_DICT.get(DATATYPE_TIMESTAMP); + } else if (isNumber(strValue)) { + if (!strValue.contains(TsFileConstant.PATH_SEPARATOR)) { + if (isConvertFloatPrecisionLack(StringUtils.trim(strValue))) { + return TYPE_INFER_KEY_DICT.get(DATATYPE_LONG); + } + return TYPE_INFER_KEY_DICT.get(DATATYPE_INT); + } else { + return TYPE_INFER_KEY_DICT.get(DATATYPE_FLOAT); + } + } else if (DATATYPE_NULL.equals(strValue) || DATATYPE_NULL.toUpperCase().equals(strValue)) { + return null; + // "NaN" is returned if the NaN Literal is given in Parser + } else if (DATATYPE_NAN.equals(strValue)) { + return TYPE_INFER_KEY_DICT.get(DATATYPE_NAN); + } else if (isDate(strValue)) { + return TYPE_INFER_KEY_DICT.get(DATATYPE_DATE); + } else if (isBlob(strValue)) { + return TYPE_INFER_KEY_DICT.get(DATATYPE_BLOB); + } else if (strValue.length() <= 512) { + return STRING; + } else { + return TEXT; + } + } + + private static boolean isDate(String s) { + return s.equalsIgnoreCase(DATATYPE_DATE); + } + + private static boolean isTimeStamp(String s) { + return s.equalsIgnoreCase(DATATYPE_TIMESTAMP); + } + + static boolean isNumber(String s) { + if (s == null || s.equals(DATATYPE_NAN)) { + return false; + } + try { + Double.parseDouble(s); + } catch (NumberFormatException e) { + return false; + } + return true; + } + + private static boolean isBlob(String s) { + return s.length() >= 3 && s.startsWith("X'") && s.endsWith("'"); + } + + private static boolean isBoolean(String s) { + return s.equalsIgnoreCase(SqlConstant.BOOLEAN_TRUE) + || s.equalsIgnoreCase(SqlConstant.BOOLEAN_FALSE); + } + + private static boolean isConvertFloatPrecisionLack(String s) { + return Long.parseLong(s) > (2 << 24); + } + + /** + * @param value + * @param type + * @return + */ + private static Object typeTrans(String value, TSDataType type) { + try { + switch (type) { + case TEXT: + case STRING: + if (value.startsWith("\"") && value.endsWith("\"")) { + return value.substring(1, value.length() - 1); + } + return value; + case BOOLEAN: + if (!"true".equalsIgnoreCase(value) && !"false".equalsIgnoreCase(value)) { + return null; + } + return Boolean.parseBoolean(value); + case INT32: + return Integer.parseInt(value); + case INT64: + return Long.parseLong(value); + case FLOAT: + return Float.parseFloat(value); + case DOUBLE: + return Double.parseDouble(value); + case TIMESTAMP: + return Long.parseLong(value); + case DATE: + return LocalDate.parse(value); + case BLOB: + return new Binary(parseHexStringToByteArray(value.replaceFirst("0x", ""))); + default: + return null; + } + } catch (NumberFormatException e) { + return null; + } + } + + private static byte[] parseHexStringToByteArray(String hexString) { + byte[] bytes = new byte[hexString.length() / 2]; + for (int i = 0; i < hexString.length(); i += 2) { + int value = Integer.parseInt(hexString.substring(i, i + 2), 16); + bytes[i / 2] = (byte) value; + } + return bytes; + } + + private static long parseTimestamp(String str) { + long timestamp; + try { + timestamp = Long.parseLong(str); + } catch (NumberFormatException e) { + timestamp = DateTimeUtils.convertDatetimeStrToLong(str, zoneId, timestampPrecision); + } + return timestamp; + } + + /** + * query data type of timeseries from IoTDB + * + * @param deviceNames + * @param headerTypeMap + * @param alignedType + * @throws IoTDBConnectionException + * @throws StatementExecutionException + */ + private static void queryType( + SessionPool sessionPool, + Set deviceNames, + HashMap headerTypeMap, + String alignedType) { + for (String deviceName : deviceNames) { + String sql = "show timeseries " + deviceName + ".*"; + try (SessionDataSetWrapper sessionDataSetWrapper = sessionPool.executeQueryStatement(sql)) { + int tsIndex = + sessionDataSetWrapper.getColumnNames().indexOf(ColumnHeaderConstant.TIMESERIES); + int dtIndex = sessionDataSetWrapper.getColumnNames().indexOf(ColumnHeaderConstant.DATATYPE); + while (sessionDataSetWrapper.hasNext()) { + RowRecord rowRecord = sessionDataSetWrapper.next(); + List fields = rowRecord.getFields(); + String timeseries = fields.get(tsIndex).getStringValue(); + String dataType = fields.get(dtIndex).getStringValue(); + if (Objects.equals(alignedType, "Time")) { + headerTypeMap.put(timeseries, getType(dataType)); + } else if (Objects.equals(alignedType, deviceColumn)) { + String[] split = PathUtils.splitPathToDetachedNodes(timeseries); + String measurement = split[split.length - 1]; + headerTypeMap.put(measurement, getType(dataType)); + } + } + } catch (StatementExecutionException | IllegalPathException | IoTDBConnectionException e) { + ioTPrinter.println( + "Meet error when query the type of timeseries because " + e.getMessage()); + System.exit(1); + } + } + } + + /** + * query data type of timeseries from IoTDB + * + * @param deviceNames + * @param headerTypeMap + * @param alignedType + * @throws IoTDBConnectionException + * @throws StatementExecutionException + */ + private static void queryType( + Session session, + Set deviceNames, + HashMap headerTypeMap, + String alignedType) { + for (String deviceName : deviceNames) { + String sql = "show timeseries " + deviceName + ".*"; + SessionDataSet sessionDataSet = null; + try { + sessionDataSet = session.executeQueryStatement(sql); + int tsIndex = sessionDataSet.getColumnNames().indexOf(ColumnHeaderConstant.TIMESERIES); + int dtIndex = sessionDataSet.getColumnNames().indexOf(ColumnHeaderConstant.DATATYPE); + while (sessionDataSet.hasNext()) { + RowRecord rowRecord = sessionDataSet.next(); + List fields = rowRecord.getFields(); + String timeseries = fields.get(tsIndex).getStringValue(); + String dataType = fields.get(dtIndex).getStringValue(); + if (Objects.equals(alignedType, "Time")) { + headerTypeMap.put(timeseries, getType(dataType)); + } else if (Objects.equals(alignedType, deviceColumn)) { + String[] split = PathUtils.splitPathToDetachedNodes(timeseries); + String measurement = split[split.length - 1]; + headerTypeMap.put(measurement, getType(dataType)); + } + } + } catch (StatementExecutionException | IllegalPathException | IoTDBConnectionException e) { + ioTPrinter.println( + "Meet error when query the type of timeseries because " + e.getMessage()); + try { + session.close(); + } catch (IoTDBConnectionException ex) { + // do nothing + } + System.exit(1); + } + } + } + + /** + * parse deviceNames, measurementNames(aligned by time), headerType from headers + * + * @param headerNames + * @param deviceAndMeasurementNames + * @param headerTypeMap + * @param headerNameMap + */ + @SuppressWarnings( + "squid:S135") // ignore for loops should not contain more than a single "break" or "continue" + // statement + private static void parseHeaders( + List headerNames, + @Nullable HashMap> deviceAndMeasurementNames, + HashMap headerTypeMap, + HashMap headerNameMap) + throws IllegalPathException { + String regex = "(?<=\\()\\S+(?=\\))"; + Pattern pattern = Pattern.compile(regex); + for (String headerName : headerNames) { + if ("Time".equalsIgnoreCase(filterBomHeader(headerName))) { + timeColumn = headerName; + continue; + } else if ("Device".equalsIgnoreCase(headerName)) { + deviceColumn = headerName; + continue; + } + Matcher matcher = pattern.matcher(headerName); + String type; + String headerNameWithoutType; + if (matcher.find()) { + type = matcher.group(); + headerNameWithoutType = headerName.replace("(" + type + ")", "").replaceAll("\\s+", ""); + headerNameMap.put(headerNameWithoutType, headerName); + headerTypeMap.put(headerNameWithoutType, getType(type)); + } else { + headerNameWithoutType = headerName; + headerNameMap.put(headerName, headerName); + } + String[] split = PathUtils.splitPathToDetachedNodes(headerNameWithoutType); + String measurementName = split[split.length - 1]; + String deviceName = StringUtils.join(Arrays.copyOfRange(split, 0, split.length - 1), '.'); + if (deviceAndMeasurementNames != null) { + deviceAndMeasurementNames.putIfAbsent(deviceName, new ArrayList<>()); + deviceAndMeasurementNames.get(deviceName).add(measurementName); + } + } + } + + /** + * return the TSDataType + * + * @param typeStr + * @return + */ + private static TSDataType getType(String typeStr) { + try { + return TSDataType.valueOf(typeStr); + } catch (Exception e) { + return null; + } + } + + /** + * if the data is aligned by time, the data will be written by this method. + * + * @param headerNames the header names of CSV file + * @param records the records of CSV file + * @param failedFilePath the directory to save the failed files + */ + @SuppressWarnings("squid:S3776") + protected static void writeDataAlignedByTime( + SessionPool sessionPool, + List headerNames, + Stream records, + String failedFilePath) + throws IllegalPathException { + HashMap> deviceAndMeasurementNames = new HashMap<>(); + HashMap headerTypeMap = new HashMap<>(); + HashMap headerNameMap = new HashMap<>(); + parseHeaders(headerNames, deviceAndMeasurementNames, headerTypeMap, headerNameMap); + + Set devices = deviceAndMeasurementNames.keySet(); + if (headerTypeMap.isEmpty()) { + queryType(sessionPool, devices, headerTypeMap, "Time"); + } + + List deviceIds = new ArrayList<>(); + List times = new ArrayList<>(); + List> measurementsList = new ArrayList<>(); + List> typesList = new ArrayList<>(); + List> valuesList = new ArrayList<>(); + + AtomicReference hasStarted = new AtomicReference<>(false); + AtomicInteger pointSize = new AtomicInteger(0); + + ArrayList> failedRecords = new ArrayList<>(); + + records.forEach( + recordObj -> { + if (Boolean.FALSE.equals(hasStarted.get())) { + hasStarted.set(true); + } else if (pointSize.get() >= batchPointSize) { + writeAndEmptyDataSet( + sessionPool, deviceIds, times, typesList, valuesList, measurementsList, 3); + pointSize.set(0); + } + + boolean isFail = false; + + for (Map.Entry> entry : deviceAndMeasurementNames.entrySet()) { + String deviceId = entry.getKey(); + List measurementNames = entry.getValue(); + ArrayList types = new ArrayList<>(); + ArrayList values = new ArrayList<>(); + ArrayList measurements = new ArrayList<>(); + for (String measurement : measurementNames) { + String header = deviceId + "." + measurement; + String value = recordObj.get(headerNameMap.get(header)); + if (!"".equals(value)) { + TSDataType type; + if (!headerTypeMap.containsKey(header)) { + queryType(sessionPool, header, headerTypeMap); + if (!headerTypeMap.containsKey(header)) { + type = typeInfer(value); + if (type != null) { + headerTypeMap.put(header, type); + } else { + ioTPrinter.printf( + "Line '%s', column '%s': '%s' unknown type%n", + recordObj.getRecordNumber(), header, value); + isFail = true; + } + } + } + type = headerTypeMap.get(header); + if (type != null) { + Object valueTrans = typeTrans(value, type); + if (valueTrans == null) { + isFail = true; + ioTPrinter.printf( + "Line '%s', column '%s': '%s' can't convert to '%s'%n", + recordObj.getRecordNumber(), header, value, type); + } else { + measurements.add(header.replace(deviceId + '.', "")); + types.add(type); + values.add(valueTrans); + pointSize.getAndIncrement(); + } + } + } + } + if (!measurements.isEmpty()) { + times.add(parseTimestamp(recordObj.get(timeColumn))); + deviceIds.add(deviceId); + typesList.add(types); + valuesList.add(values); + measurementsList.add(measurements); + } + } + if (isFail) { + failedRecords.add(recordObj.stream().collect(Collectors.toList())); + } + }); + if (!deviceIds.isEmpty()) { + writeAndEmptyDataSet( + sessionPool, deviceIds, times, typesList, valuesList, measurementsList, 3); + pointSize.set(0); + } + + if (!failedRecords.isEmpty()) { + writeFailedLinesFile(headerNames, failedFilePath, failedRecords); + } + // if (Boolean.TRUE.equals(hasStarted.get())) { + // ioTPrinter.println("Import completely!"); + // } else { + // ioTPrinter.println("No records!"); + // } + } + + private static void queryType( + SessionPool sessionPool, String series, HashMap headerTypeMap) { + String sql = "show timeseries " + series; + try (SessionDataSetWrapper sessionDataSetWrapper = sessionPool.executeQueryStatement(sql)) { + int tsIndex = sessionDataSetWrapper.getColumnNames().indexOf(ColumnHeaderConstant.TIMESERIES); + int dtIndex = sessionDataSetWrapper.getColumnNames().indexOf(ColumnHeaderConstant.DATATYPE); + while (sessionDataSetWrapper.hasNext()) { + RowRecord rowRecord = sessionDataSetWrapper.next(); + List fields = rowRecord.getFields(); + String timeseries = fields.get(tsIndex).getStringValue(); + String dataType = fields.get(dtIndex).getStringValue(); + if (Objects.equals(series, timeseries)) { + headerTypeMap.put(timeseries, getType(dataType)); + } + } + } catch (StatementExecutionException | IoTDBConnectionException e) { + ioTPrinter.println("Meet error when query the type of timeseries because " + e.getMessage()); + System.exit(1); + } + } + + /** + * if the data is aligned by device, the data will be written by this method. + * + * @param headerNames the header names of CSV file + * @param records the records of CSV file + * @param failedFilePath the directory to save the failed files + */ + @SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity warning + protected static void writeDataAlignedByDevice( + Session session, List headerNames, Stream records, String failedFilePath) + throws IllegalPathException { + HashMap headerTypeMap = new HashMap<>(); + HashMap headerNameMap = new HashMap<>(); + parseHeaders(headerNames, null, headerTypeMap, headerNameMap); + + AtomicReference deviceName = new AtomicReference<>(null); + + HashSet typeQueriedDevice = new HashSet<>(); + + // the data that interface need + List times = new ArrayList<>(); + List> typesList = new ArrayList<>(); + List> valuesList = new ArrayList<>(); + List> measurementsList = new ArrayList<>(); + + AtomicInteger pointSize = new AtomicInteger(0); + + ArrayList> failedRecords = new ArrayList<>(); + + records.forEach( + recordObj -> { + // only run in first record + if (deviceName.get() == null) { + deviceName.set(recordObj.get(1)); + } else if (!Objects.equals(deviceName.get(), recordObj.get(1))) { + // if device changed + writeAndEmptyDataSet( + session, deviceName.get(), times, typesList, valuesList, measurementsList, 3); + deviceName.set(recordObj.get(1)); + pointSize.set(0); + } else if (pointSize.get() >= batchPointSize) { + // insert a batch + writeAndEmptyDataSet( + session, deviceName.get(), times, typesList, valuesList, measurementsList, 3); + pointSize.set(0); + } + + // the data of the record + ArrayList types = new ArrayList<>(); + ArrayList values = new ArrayList<>(); + ArrayList measurements = new ArrayList<>(); + + AtomicReference isFail = new AtomicReference<>(false); + + // read data from record + for (Map.Entry headerNameEntry : headerNameMap.entrySet()) { + // headerNameWithoutType is equal to headerName if the CSV column do not have data type. + String headerNameWithoutType = headerNameEntry.getKey(); + String headerName = headerNameEntry.getValue(); + String value = recordObj.get(headerName); + if (!"".equals(value)) { + TSDataType type; + // Get the data type directly if the CSV column have data type. + if (!headerTypeMap.containsKey(headerNameWithoutType)) { + // query the data type in iotdb + if (!typeQueriedDevice.contains(deviceName.get())) { + if (headerTypeMap.isEmpty()) { + Set devices = new HashSet<>(); + devices.add(deviceName.get()); + queryType(session, devices, headerTypeMap, deviceColumn); + } + typeQueriedDevice.add(deviceName.get()); + } + if (!headerTypeMap.containsKey(headerNameWithoutType)) { + type = typeInfer(value); + if (type != null) { + headerTypeMap.put(headerNameWithoutType, type); + } else { + ioTPrinter.printf( + "Line '%s', column '%s': '%s' unknown type%n", + recordObj.getRecordNumber(), headerNameWithoutType, value); + isFail.set(true); + } + } + } + type = headerTypeMap.get(headerNameWithoutType); + if (type != null) { + Object valueTrans = typeTrans(value, type); + if (valueTrans == null) { + isFail.set(true); + ioTPrinter.printf( + "Line '%s', column '%s': '%s' can't convert to '%s'%n", + recordObj.getRecordNumber(), headerNameWithoutType, value, type); + } else { + values.add(valueTrans); + measurements.add(headerNameWithoutType); + types.add(type); + pointSize.getAndIncrement(); + } + } + } + } + if (Boolean.TRUE.equals(isFail.get())) { + failedRecords.add(recordObj.stream().collect(Collectors.toList())); + } + if (!measurements.isEmpty()) { + times.add(parseTimestamp(recordObj.get(timeColumn))); + typesList.add(types); + valuesList.add(values); + measurementsList.add(measurements); + } + }); + if (!times.isEmpty()) { + writeAndEmptyDataSet( + session, deviceName.get(), times, typesList, valuesList, measurementsList, 3); + pointSize.set(0); + } + if (!failedRecords.isEmpty()) { + writeFailedLinesFile(headerNames, failedFilePath, failedRecords); + } + ioTPrinter.println("Import completely!"); + } + + /** + * if the data is aligned by time, the data will be written by this method. + * + * @param headerNames the header names of CSV file + * @param records the records of CSV file + * @param failedFilePath the directory to save the failed files + */ + @SuppressWarnings("squid:S3776") + protected static void writeDataAlignedByTime( + Session session, List headerNames, Stream records, String failedFilePath) + throws IllegalPathException { + HashMap> deviceAndMeasurementNames = new HashMap<>(); + HashMap headerTypeMap = new HashMap<>(); + HashMap headerNameMap = new HashMap<>(); + parseHeaders(headerNames, deviceAndMeasurementNames, headerTypeMap, headerNameMap); + + Set devices = deviceAndMeasurementNames.keySet(); + if (headerTypeMap.isEmpty()) { + queryType(session, devices, headerTypeMap, "Time"); + } + + List deviceIds = new ArrayList<>(); + List times = new ArrayList<>(); + List> measurementsList = new ArrayList<>(); + List> typesList = new ArrayList<>(); + List> valuesList = new ArrayList<>(); + + AtomicReference hasStarted = new AtomicReference<>(false); + AtomicInteger pointSize = new AtomicInteger(0); + + ArrayList> failedRecords = new ArrayList<>(); + + records.forEach( + recordObj -> { + if (Boolean.FALSE.equals(hasStarted.get())) { + hasStarted.set(true); + } else if (pointSize.get() >= batchPointSize) { + writeAndEmptyDataSet( + session, deviceIds, times, typesList, valuesList, measurementsList, 3); + pointSize.set(0); + } + + boolean isFail = false; + + for (Map.Entry> entry : deviceAndMeasurementNames.entrySet()) { + String deviceId = entry.getKey(); + List measurementNames = entry.getValue(); + ArrayList types = new ArrayList<>(); + ArrayList values = new ArrayList<>(); + ArrayList measurements = new ArrayList<>(); + for (String measurement : measurementNames) { + String header = deviceId + "." + measurement; + String value = recordObj.get(headerNameMap.get(header)); + if (!"".equals(value)) { + TSDataType type; + if (!headerTypeMap.containsKey(header)) { + type = typeInfer(value); + if (type != null) { + headerTypeMap.put(header, type); + } else { + ioTPrinter.printf( + "Line '%s', column '%s': '%s' unknown type%n", + recordObj.getRecordNumber(), header, value); + isFail = true; + } + } + type = headerTypeMap.get(header); + if (type != null) { + Object valueTrans = typeTrans(value, type); + if (valueTrans == null) { + isFail = true; + ioTPrinter.printf( + "Line '%s', column '%s': '%s' can't convert to '%s'%n", + recordObj.getRecordNumber(), header, value, type); + } else { + measurements.add(header.replace(deviceId + '.', "")); + types.add(type); + values.add(valueTrans); + pointSize.getAndIncrement(); + } + } + } + } + if (!measurements.isEmpty()) { + times.add(parseTimestamp(recordObj.get(timeColumn))); + deviceIds.add(deviceId); + typesList.add(types); + valuesList.add(values); + measurementsList.add(measurements); + } + } + if (isFail) { + failedRecords.add(recordObj.stream().collect(Collectors.toList())); + } + }); + if (!deviceIds.isEmpty()) { + writeAndEmptyDataSet(session, deviceIds, times, typesList, valuesList, measurementsList, 3); + pointSize.set(0); + } + + if (!failedRecords.isEmpty()) { + writeFailedLinesFile(headerNames, failedFilePath, failedRecords); + } + if (Boolean.TRUE.equals(hasStarted.get())) { + ioTPrinter.println("Import completely!"); + } else { + ioTPrinter.println("No records!"); + } + } + + protected static String filterBomHeader(String s) { + byte[] bom = {(byte) 0xEF, (byte) 0xBB, (byte) 0xBF}; + byte[] bytes = Arrays.copyOf(s.getBytes(), 3); + if (Arrays.equals(bom, bytes)) { + return s.substring(1); + } + return s; + } + + protected static Options createHelpOptions() { + final Options options = new Options(); + Option opHelp = + Option.builder(HELP_ARGS) + .longOpt(HELP_ARGS) + .hasArg() + .desc("Display help information") + .build(); + options.addOption(opHelp); + + Option opFileType = + Option.builder(FILE_TYPE_ARGS) + .longOpt(FILE_TYPE_NAME) + .argName(FILE_TYPE_ARGS_NAME) + .hasArg() + .desc("Export file type ?You can choose tsfile)、csv) or sql) . (required)") + .build(); + options.addOption(opFileType); + return options; + } + + /** + * read data from the CSV file + * + * @param path + * @return CSVParser csv parser + * @throws IOException when reading the csv file failed. + */ + protected static CSVParser readCsvFile(String path) throws IOException { + return CSVFormat.Builder.create(CSVFormat.DEFAULT) + .setHeader() + .setSkipHeaderRecord(true) + .setQuote('`') + .setEscape('\\') + .setIgnoreEmptyLines(true) + .build() + .parse(new InputStreamReader(new FileInputStream(path))); + } + + /** + * write data to CSV file. + * + * @param headerNames the header names of CSV file + * @param records the records of CSV file + * @param filePath the directory to save the file + */ + public static Boolean writeCsvFile( + List headerNames, List> records, String filePath) { + try { + final CSVPrinterWrapper csvPrinterWrapper = new CSVPrinterWrapper(filePath); + if (headerNames != null) { + csvPrinterWrapper.printRecord(headerNames); + } + for (List CsvRecord : records) { + csvPrinterWrapper.printRecord(CsvRecord); + } + csvPrinterWrapper.flush(); + csvPrinterWrapper.close(); + return true; + } catch (IOException e) { + ioTPrinter.printException(e); + return false; + } + } + + static class CSVPrinterWrapper { + private final String filePath; + private final CSVFormat csvFormat; + private CSVPrinter csvPrinter; + + public CSVPrinterWrapper(String filePath) { + this.filePath = filePath; + this.csvFormat = + CSVFormat.Builder.create(CSVFormat.DEFAULT) + .setHeader() + .setSkipHeaderRecord(true) + .setEscape('\\') + .setQuoteMode(QuoteMode.NONE) + .build(); + } + + public void printRecord(final Iterable values) throws IOException { + if (csvPrinter == null) { + csvPrinter = csvFormat.print(new PrintWriter(filePath)); + } + csvPrinter.printRecord(values); + } + + public void print(Object value) { + if (csvPrinter == null) { + try { + csvPrinter = csvFormat.print(new PrintWriter(filePath)); + } catch (IOException e) { + ioTPrinter.printException(e); + return; + } + } + try { + csvPrinter.print(value); + } catch (IOException e) { + ioTPrinter.printException(e); + } + } + + public void println() throws IOException { + csvPrinter.println(); + } + + public void close() throws IOException { + if (csvPrinter != null) { + csvPrinter.close(); + } + } + + public void flush() throws IOException { + if (csvPrinter != null) { + csvPrinter.flush(); + } + } + } +} diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/data/AsyncImportData.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/data/AsyncImportData.java new file mode 100644 index 0000000000000..6500ef670834f --- /dev/null +++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/data/AsyncImportData.java @@ -0,0 +1,160 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.tool.data; + +import org.apache.iotdb.cli.utils.IoTPrinter; +import org.apache.iotdb.commons.exception.IllegalPathException; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.pool.SessionPool; + +import org.apache.commons.csv.CSVParser; +import org.apache.commons.csv.CSVRecord; +import org.apache.commons.lang3.ObjectUtils; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Stream; + +public class AsyncImportData extends AbstractDataTool implements Runnable { + + private static final IoTPrinter ioTPrinter = new IoTPrinter(System.out); + private static SessionPool sessionPool; + + protected static void setTimeZone() throws IoTDBConnectionException, StatementExecutionException { + if (timeZoneID != null) { + sessionPool.setTimeZone(timeZoneID); + zoneId = sessionPool.getZoneId(); + } + } + + @Override + public void run() { + String filePath; + try { + while ((filePath = ImportDataScanTool.pollFromQueue()) != null) { + File file = new File(filePath); + if (file.getName().endsWith(SQL_SUFFIXS)) { + importFromSqlFile(file); + } else { + importFromSingleFile(file); + } + } + } catch (Exception e) { + ioTPrinter.println("Unexpected error occurred: " + e.getMessage()); + } + } + + protected static void processSuccessFile() { + loadFileSuccessfulNum.increment(); + } + + @SuppressWarnings("java:S2259") + private static void importFromSqlFile(File file) { + ArrayList> failedRecords = new ArrayList<>(); + String failedFilePath; + if (failedFileDirectory == null) { + failedFilePath = file.getAbsolutePath() + ".failed"; + } else { + failedFilePath = failedFileDirectory + file.getName() + ".failed"; + } + try (BufferedReader br = new BufferedReader(new FileReader(file.getAbsolutePath()))) { + String sql; + while ((sql = br.readLine()) != null) { + try { + sessionPool.executeNonQueryStatement(sql); + } catch (IoTDBConnectionException | StatementExecutionException e) { + failedRecords.add(Arrays.asList(sql)); + } + } + processSuccessFile(); + } catch (IOException e) { + ioTPrinter.println("SQL file read exception because: " + e.getMessage()); + } + if (!failedRecords.isEmpty()) { + FileWriter writer = null; + try { + writer = new FileWriter(failedFilePath); + for (List failedRecord : failedRecords) { + writer.write(failedRecord.get(0).toString() + "\n"); + } + } catch (IOException e) { + ioTPrinter.println("Cannot dump fail result because: " + e.getMessage()); + } finally { + if (ObjectUtils.isNotEmpty(writer)) { + try { + writer.flush(); + writer.close(); + } catch (IOException e) { + ; + } + } + } + } + } + + private static void importFromSingleFile(File file) { + if (file.getName().endsWith(CSV_SUFFIXS) || file.getName().endsWith(TXT_SUFFIXS)) { + try { + CSVParser csvRecords = readCsvFile(file.getAbsolutePath()); + List headerNames = csvRecords.getHeaderNames(); + Stream records = csvRecords.stream(); + if (headerNames.isEmpty()) { + ioTPrinter.println("Empty file!"); + return; + } + if (!timeColumn.equalsIgnoreCase(filterBomHeader(headerNames.get(0)))) { + ioTPrinter.println("The first field of header must be `Time`!"); + return; + } + String failedFilePath; + if (failedFileDirectory == null) { + failedFilePath = file.getAbsolutePath() + ".failed"; + } else { + failedFilePath = failedFileDirectory + file.getName() + ".failed"; + } + if (!deviceColumn.equalsIgnoreCase(headerNames.get(1))) { + writeDataAlignedByTime(sessionPool, headerNames, records, failedFilePath); + } else { + writeDataAlignedByDevice(sessionPool, headerNames, records, failedFilePath); + } + processSuccessFile(); + } catch (IOException | IllegalPathException e) { + ioTPrinter.println("CSV file read exception because: " + e.getMessage()); + } + } else { + ioTPrinter.println("The file name must end with \"csv\" or \"txt\"!"); + } + } + + public static void setSessionPool(SessionPool sessionPool) { + AsyncImportData.sessionPool = sessionPool; + } + + public static void setAligned(Boolean aligned) { + AsyncImportData.aligned = aligned; + } +} diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ExportData.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/data/ExportData.java similarity index 59% rename from iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ExportData.java rename to iotdb-client/cli/src/main/java/org/apache/iotdb/tool/data/ExportData.java index 08918bcf2af28..67ee5f6d46e7e 100644 --- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ExportData.java +++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/data/ExportData.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.tool; +package org.apache.iotdb.tool.data; import org.apache.iotdb.cli.type.ExitType; import org.apache.iotdb.cli.utils.CliContext; @@ -29,6 +29,7 @@ import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.StatementExecutionException; import org.apache.iotdb.session.Session; +import org.apache.iotdb.tool.tsfile.ExportTsFile; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; @@ -39,11 +40,13 @@ import org.apache.commons.cli.ParseException; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.ObjectUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.thrift.TException; import org.apache.tsfile.enums.TSDataType; import org.apache.tsfile.read.common.Field; import org.apache.tsfile.read.common.Path; import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.utils.BytesUtils; import org.jline.reader.LineReader; import java.io.BufferedReader; @@ -52,9 +55,12 @@ import java.io.FileWriter; import java.io.IOException; import java.time.Instant; +import java.time.LocalDate; +import java.time.ZoneId; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Locale; import java.util.regex.Matcher; @@ -70,36 +76,35 @@ public class ExportData extends AbstractDataTool { private static final String TARGET_DIR_ARGS = "t"; - private static final String TARGET_DIR_NAME = "targetDirectory"; + private static final String TARGET_DIR_NAME = "target"; + private static final String TARGET_DIR_ARGS_NAME = "target_directory"; - private static final String TARGET_FILE_ARGS = "tfn"; - private static final String TARGET_FILE_NAME = "targetFileName"; + private static final String TARGET_FILE_ARGS = "pfn"; + private static final String TARGET_FILE_NAME = "prefix_file_name"; private static final String SQL_FILE_ARGS = "s"; private static final String SQL_FILE_NAME = "sourceSqlFile"; - private static final String DATA_TYPE_ARGS = "datatype"; + private static final String DATA_TYPE_ARGS = "dt"; private static final String DATA_TYPE_NAME = "datatype"; private static final String QUERY_COMMAND_ARGS = "q"; - private static final String QUERY_COMMAND_NAME = "queryCommand"; - - private static final String EXPORT_TYPE_ARGS = "type"; - - private static final String EXPORT_TYPE_NAME = "exportType"; + private static final String QUERY_COMMAND_NAME = "query"; + private static final String QUERY_COMMAND_ARGS_NAME = "query_command"; private static final String EXPORT_SQL_TYPE_NAME = "sql"; private static final String ALIGNED_ARGS = "aligned"; - private static final String ALIGNED_NAME = "export aligned insert sql"; + private static final String ALIGNED_NAME = "export_aligned"; + private static final String ALIGNED_ARGS_NAME = "export aligned insert sql"; private static final String LINES_PER_FILE_ARGS = "lpf"; - private static final String LINES_PER_FILE_ARGS_NAME = "linesPerFile"; + private static final String LINES_PER_FILE_NAME = "lines_per_file"; - private static final String TSFILEDB_CLI_PREFIX = "ExportData"; + private static final String TSFILEDB_CLI_PREFIX = "Export Data"; private static final String DUMP_FILE_NAME_DEFAULT = "dump"; private static String targetFile = DUMP_FILE_NAME_DEFAULT; - + private static Session session; private static String targetDirectory; private static Boolean needDataTypePrinted; @@ -113,8 +118,14 @@ public class ExportData extends AbstractDataTool { private static long timeout = -1; private static Boolean aligned = false; + private static String fileType = null; private static final IoTPrinter ioTPrinter = new IoTPrinter(System.out); + private static final String TSFILEDB_CLI_HEAD = + "Please obtain help information for the corresponding data type based on different parameters, for example:\n" + + "./export_data.sh -help tsfile\n" + + "./export_data.sh -help sql\n" + + "./export_data.sh -help csv"; @SuppressWarnings({ "squid:S3776", @@ -122,7 +133,10 @@ public class ExportData extends AbstractDataTool { }) // Suppress high Cognitive Complexity warning, ignore try-with-resources /* main function of export csv tool. */ public static void main(String[] args) { - Options options = createOptions(); + Options helpOptions = createHelpOptions(); + Options tsFileOptions = createTsFileOptions(); + Options csvOptions = createCsvOptions(); + Options sqlOptions = createSqlOptions(); HelpFormatter hf = new HelpFormatter(); CommandLine commandLine = null; CommandLineParser parser = new DefaultParser(); @@ -130,19 +144,110 @@ public static void main(String[] args) { hf.setWidth(MAX_HELP_CONSOLE_WIDTH); if (args == null || args.length == 0) { - ioTPrinter.println("Too few params input, please check the following hint."); - hf.printHelp(TSFILEDB_CLI_PREFIX, options, true); + printHelpOptions( + TSFILEDB_CLI_HEAD, TSFILEDB_CLI_PREFIX, hf, tsFileOptions, csvOptions, sqlOptions, true); System.exit(CODE_ERROR); } try { - commandLine = parser.parse(options, args); + commandLine = parser.parse(helpOptions, args, true); } catch (ParseException e) { - ioTPrinter.println(e.getMessage()); - hf.printHelp(TSFILEDB_CLI_PREFIX, options, true); + printHelpOptions( + TSFILEDB_CLI_HEAD, TSFILEDB_CLI_PREFIX, hf, tsFileOptions, csvOptions, sqlOptions, true); System.exit(CODE_ERROR); } - if (commandLine.hasOption(HELP_ARGS)) { - hf.printHelp(TSFILEDB_CLI_PREFIX, options, true); + final List argList = Arrays.asList(args); + int helpIndex = argList.indexOf(MINUS + HELP_ARGS); + int ftIndex = argList.indexOf(MINUS + FILE_TYPE_ARGS); + if (ftIndex < 0) { + ftIndex = argList.indexOf(MINUS + FILE_TYPE_NAME); + } + if (helpIndex >= 0) { + fileType = argList.get(helpIndex + 1); + if (StringUtils.isNotBlank(fileType)) { + if (TSFILE_SUFFIXS.equalsIgnoreCase(fileType)) { + printHelpOptions(null, TSFILEDB_CLI_PREFIX, hf, tsFileOptions, null, null, false); + } else if (CSV_SUFFIXS.equalsIgnoreCase(fileType)) { + printHelpOptions(null, TSFILEDB_CLI_PREFIX, hf, null, csvOptions, null, false); + } else if (SQL_SUFFIXS.equalsIgnoreCase(fileType)) { + printHelpOptions(null, TSFILEDB_CLI_PREFIX, hf, null, null, sqlOptions, false); + } else { + ioTPrinter.println(String.format("File type %s is not support", fileType)); + printHelpOptions( + TSFILEDB_CLI_HEAD, + TSFILEDB_CLI_PREFIX, + hf, + tsFileOptions, + csvOptions, + sqlOptions, + true); + } + } else { + printHelpOptions( + TSFILEDB_CLI_HEAD, + TSFILEDB_CLI_PREFIX, + hf, + tsFileOptions, + csvOptions, + sqlOptions, + true); + } + System.exit(CODE_ERROR); + } else if (ftIndex >= 0) { + fileType = argList.get(ftIndex + 1); + if (StringUtils.isNotBlank(fileType)) { + if (TSFILE_SUFFIXS.equalsIgnoreCase(fileType)) { + try { + commandLine = parser.parse(tsFileOptions, args, true); + ExportTsFile exportTsFile = new ExportTsFile(commandLine); + exportTsFile.exportTsfile(CODE_OK); + } catch (ParseException e) { + ioTPrinter.println("Parse error: " + e.getMessage()); + printHelpOptions(null, TSFILEDB_CLI_PREFIX, hf, tsFileOptions, null, null, false); + System.exit(CODE_ERROR); + } + } else if (CSV_SUFFIXS.equalsIgnoreCase(fileType)) { + try { + commandLine = parser.parse(csvOptions, args, true); + } catch (ParseException e) { + ioTPrinter.println("Parse error: " + e.getMessage()); + printHelpOptions(null, TSFILEDB_CLI_PREFIX, hf, null, csvOptions, null, false); + System.exit(CODE_ERROR); + } + } else if (SQL_SUFFIXS.equalsIgnoreCase(fileType)) { + try { + commandLine = parser.parse(sqlOptions, args, true); + } catch (ParseException e) { + ioTPrinter.println("Parse error: " + e.getMessage()); + printHelpOptions(null, TSFILEDB_CLI_PREFIX, hf, null, null, sqlOptions, false); + System.exit(CODE_ERROR); + } + } else { + ioTPrinter.println(String.format("File type %s is not support", fileType)); + printHelpOptions( + TSFILEDB_CLI_HEAD, + TSFILEDB_CLI_PREFIX, + hf, + tsFileOptions, + csvOptions, + sqlOptions, + true); + System.exit(CODE_ERROR); + } + } else { + printHelpOptions( + TSFILEDB_CLI_HEAD, + TSFILEDB_CLI_PREFIX, + hf, + tsFileOptions, + csvOptions, + sqlOptions, + true); + System.exit(CODE_ERROR); + } + } else { + ioTPrinter.println( + String.format( + "Invalid args: Required values for option '%s' not provided", FILE_TYPE_NAME)); System.exit(CODE_ERROR); } int exitCode = CODE_OK; @@ -208,73 +313,95 @@ public static void main(String[] args) { System.exit(exitCode); } - private static void parseSpecialParams(CommandLine commandLine) throws ArgsErrorException { - targetDirectory = checkRequiredArg(TARGET_DIR_ARGS, TARGET_DIR_NAME, commandLine, null); - targetFile = commandLine.getOptionValue(TARGET_FILE_ARGS); - needDataTypePrinted = Boolean.valueOf(commandLine.getOptionValue(DATA_TYPE_ARGS)); - queryCommand = commandLine.getOptionValue(QUERY_COMMAND_ARGS); - exportType = commandLine.getOptionValue(EXPORT_TYPE_ARGS); - String timeoutString = commandLine.getOptionValue(TIMEOUT_ARGS); - if (timeoutString != null) { - timeout = Long.parseLong(timeoutString); - } - if (needDataTypePrinted == null) { - needDataTypePrinted = true; - } - if (targetFile == null) { - targetFile = DUMP_FILE_NAME_DEFAULT; - } - timeFormat = commandLine.getOptionValue(TIME_FORMAT_ARGS); - if (timeFormat == null) { - timeFormat = "default"; - } - timeZoneID = commandLine.getOptionValue(TIME_ZONE_ARGS); - if (!targetDirectory.endsWith("/") && !targetDirectory.endsWith("\\")) { - targetDirectory += File.separator; - } - if (commandLine.getOptionValue(LINES_PER_FILE_ARGS) != null) { - linesPerFile = Integer.parseInt(commandLine.getOptionValue(LINES_PER_FILE_ARGS)); - } - if (commandLine.getOptionValue(ALIGNED_ARGS) != null) { - aligned = Boolean.valueOf(commandLine.getOptionValue(ALIGNED_ARGS)); - } + private static Options createTsFileOptions() { + Options options = createExportOptions(); + + Option opFile = + Option.builder(TARGET_DIR_ARGS) + .required() + .longOpt(TARGET_DIR_NAME) + .argName(TARGET_DIR_ARGS_NAME) + .hasArg() + .desc("Target File Directory (required)") + .build(); + options.addOption(opFile); + + Option opOnSuccess = + Option.builder(TARGET_FILE_ARGS) + .longOpt(TARGET_FILE_NAME) + .argName(TARGET_FILE_NAME) + .hasArg() + .desc("Export file name (optional)") + .build(); + options.addOption(opOnSuccess); + + Option opQuery = + Option.builder(QUERY_COMMAND_ARGS) + .longOpt(QUERY_COMMAND_NAME) + .argName(QUERY_COMMAND_ARGS_NAME) + .hasArg() + .desc("The query command that you want to execute. (optional)") + .build(); + options.addOption(opQuery); + + Option opTimeOut = + Option.builder(TIMEOUT_ARGS) + .longOpt(TIMEOUT_NAME) + .argName(TIMEOUT_NAME) + .hasArg() + .desc("Timeout for session query (optional)") + .build(); + options.addOption(opTimeOut); + + return options; } - /** - * commandline option create. - * - * @return object Options - */ - private static Options createOptions() { - Options options = createNewOptions(); + private static Options createCsvOptions() { + Options options = createExportOptions(); - Option opTargetFile = + Option opFile = Option.builder(TARGET_DIR_ARGS) .required() - .argName(TARGET_DIR_NAME) + .longOpt(TARGET_DIR_NAME) + .argName(TARGET_DIR_ARGS_NAME) .hasArg() .desc("Target File Directory (required)") .build(); - options.addOption(opTargetFile); + options.addOption(opFile); - Option targetFileName = + Option opOnSuccess = Option.builder(TARGET_FILE_ARGS) + .longOpt(TARGET_FILE_NAME) .argName(TARGET_FILE_NAME) .hasArg() .desc("Export file name (optional)") .build(); - options.addOption(targetFileName); + options.addOption(opOnSuccess); - Option opSqlFile = - Option.builder(SQL_FILE_ARGS) - .argName(SQL_FILE_NAME) + Option opDataType = + Option.builder(DATA_TYPE_ARGS) + .longOpt(DATA_TYPE_NAME) + .argName(DATA_TYPE_NAME) .hasArg() - .desc("SQL File Path (optional)") + .desc( + "Will the data type of timeseries be printed in the head line of the CSV file?" + + '\n' + + "You can choose true) or false) . (optional)") .build(); - options.addOption(opSqlFile); + options.addOption(opDataType); + + Option opLinesPerFile = + Option.builder(LINES_PER_FILE_ARGS) + .longOpt(LINES_PER_FILE_NAME) + .argName(LINES_PER_FILE_NAME) + .hasArg() + .desc("Lines per dump file.(optional)") + .build(); + options.addOption(opLinesPerFile); Option opTimeFormat = Option.builder(TIME_FORMAT_ARGS) + .longOpt(TIME_FORMAT_NAME) .argName(TIME_FORMAT_NAME) .hasArg() .desc( @@ -286,73 +413,160 @@ private static Options createOptions() { Option opTimeZone = Option.builder(TIME_ZONE_ARGS) + .longOpt(TIMEOUT_NAME) .argName(TIME_ZONE_NAME) .hasArg() .desc("Time Zone eg. +08:00 or -01:00 (optional)") .build(); options.addOption(opTimeZone); - Option opDataType = - Option.builder(DATA_TYPE_ARGS) - .argName(DATA_TYPE_NAME) - .hasArg() - .desc( - "Will the data type of timeseries be printed in the head line of the CSV file?" - + '\n' - + "You can choose true) or false) . (optional)") - .build(); - options.addOption(opDataType); - Option opQuery = Option.builder(QUERY_COMMAND_ARGS) - .argName(QUERY_COMMAND_NAME) + .longOpt(QUERY_COMMAND_NAME) + .argName(QUERY_COMMAND_ARGS_NAME) .hasArg() .desc("The query command that you want to execute. (optional)") .build(); options.addOption(opQuery); - Option opTypeQuery = - Option.builder(EXPORT_TYPE_ARGS) - .argName(EXPORT_TYPE_NAME) + Option opTimeOut = + Option.builder(TIMEOUT_ARGS) + .longOpt(TIMEOUT_NAME) + .argName(TIMEOUT_NAME) + .hasArg() + .desc("Timeout for session query (optional)") + .build(); + options.addOption(opTimeOut); + + return options; + } + + private static Options createSqlOptions() { + Options options = createExportOptions(); + + Option opFile = + Option.builder(TARGET_DIR_ARGS) + .required() + .longOpt(TARGET_DIR_NAME) + .argName(TARGET_DIR_ARGS_NAME) + .hasArg() + .desc("Target File Directory (required)") + .build(); + options.addOption(opFile); + + Option opOnSuccess = + Option.builder(TARGET_FILE_ARGS) + .longOpt(TARGET_FILE_NAME) + .argName(TARGET_FILE_NAME) .hasArg() - .desc("Export file type ?" + '\n' + "You can choose csv) or sql) . (optional)") + .desc("Export file name (optional)") .build(); - options.addOption(opTypeQuery); + options.addOption(opOnSuccess); Option opAligned = Option.builder(ALIGNED_ARGS) - .argName(ALIGNED_NAME) - .hasArg() - .desc("Whether export to sql of aligned (only sql optional)") + .longOpt(ALIGNED_NAME) + .argName(ALIGNED_ARGS_NAME) + .hasArgs() + .desc("Whether export to sql of aligned (optional)") .build(); options.addOption(opAligned); + Option opTimeFormat = + Option.builder(TIME_FORMAT_ARGS) + .longOpt(TIME_FORMAT_NAME) + .argName(TIME_FORMAT_NAME) + .hasArg() + .desc( + "Output time Format in csv file. " + + "You can choose 1) timestamp, number, long 2) ISO8601, default 3) " + + "user-defined pattern like yyyy-MM-dd HH:mm:ss, default ISO8601.\n OutPut timestamp in sql file, No matter what time format is set(optional)") + .build(); + options.addOption(opTimeFormat); + + Option opTimeZone = + Option.builder(TIME_ZONE_ARGS) + .longOpt(TIME_ZONE_NAME) + .argName(TIME_ZONE_NAME) + .hasArg() + .desc("Time Zone eg. +08:00 or -01:00 (optional)") + .build(); + options.addOption(opTimeZone); + Option opLinesPerFile = Option.builder(LINES_PER_FILE_ARGS) - .argName(LINES_PER_FILE_ARGS_NAME) + .longOpt(LINES_PER_FILE_NAME) + .argName(LINES_PER_FILE_NAME) .hasArg() - .desc("Lines per dump file.") + .desc("Lines per dump file.(optional)") .build(); options.addOption(opLinesPerFile); - Option opHelp = - Option.builder(HELP_ARGS) - .longOpt(HELP_ARGS) - .hasArg(false) - .desc("Display help information") + Option opQuery = + Option.builder(QUERY_COMMAND_ARGS) + .longOpt(QUERY_COMMAND_NAME) + .argName(QUERY_COMMAND_ARGS_NAME) + .hasArg() + .desc("The query command that you want to execute. (optional)") .build(); - options.addOption(opHelp); + options.addOption(opQuery); - Option opTimeout = + Option opTimeOut = Option.builder(TIMEOUT_ARGS) .longOpt(TIMEOUT_NAME) + .argName(TIMEOUT_NAME) .hasArg() - .desc("Timeout for session query") + .desc("Timeout for session query (optional)") .build(); - options.addOption(opTimeout); + options.addOption(opTimeOut); + return options; } + private static void parseSpecialParams(CommandLine commandLine) throws ArgsErrorException { + targetDirectory = checkRequiredArg(TARGET_DIR_ARGS, TARGET_DIR_NAME, commandLine, null); + targetFile = commandLine.getOptionValue(TARGET_FILE_ARGS); + needDataTypePrinted = Boolean.valueOf(commandLine.getOptionValue(DATA_TYPE_ARGS)); + queryCommand = commandLine.getOptionValue(QUERY_COMMAND_ARGS); + exportType = commandLine.getOptionValue(FILE_TYPE_ARGS); + String timeoutString = commandLine.getOptionValue(TIMEOUT_ARGS); + if (timeoutString != null) { + timeout = Long.parseLong(timeoutString); + } + if (needDataTypePrinted == null) { + needDataTypePrinted = true; + } + if (targetFile == null) { + targetFile = DUMP_FILE_NAME_DEFAULT; + } + timeFormat = commandLine.getOptionValue(TIME_FORMAT_ARGS); + if (timeFormat == null) { + timeFormat = "default"; + } + timeZoneID = commandLine.getOptionValue(TIME_ZONE_ARGS); + if (!targetDirectory.endsWith("/") && !targetDirectory.endsWith("\\")) { + targetDirectory += File.separator; + } + final File file = new File(targetDirectory); + if (!file.isDirectory() && !file.mkdirs()) { + ioTPrinter.println(String.format("Failed to create directories %s", targetDirectory)); + System.exit(CODE_ERROR); + } + if (commandLine.getOptionValue(LINES_PER_FILE_ARGS) != null) { + linesPerFile = Integer.parseInt(commandLine.getOptionValue(LINES_PER_FILE_ARGS)); + } + if (commandLine.getOptionValue(ALIGNED_ARGS) != null) { + aligned = Boolean.valueOf(commandLine.getOptionValue(ALIGNED_ARGS)); + } + } + + protected static void setTimeZone() throws IoTDBConnectionException, StatementExecutionException { + if (timeZoneID != null) { + session.setTimeZone(timeZoneID); + } + zoneId = ZoneId.of(session.getTimeZone()); + } + /** * This method will be called, if the query commands are written in a sql file. * @@ -448,7 +662,7 @@ public static void writeCsvFile( while (i++ < linesPerFile) { if (sessionDataSet.hasNext()) { RowRecord rowRecord = sessionDataSet.next(); - if (rowRecord.getTimestamp() != 0) { + if ("Time".equals(headers.get(0))) { csvPrinterWrapper.print(timeTrans(rowRecord.getTimestamp())); } rowRecord @@ -457,10 +671,24 @@ public static void writeCsvFile( field -> { String fieldStringValue = field.getStringValue(); if (!"null".equals(field.getStringValue())) { - if ((field.getDataType() == TSDataType.TEXT - || field.getDataType() == TSDataType.STRING) + final TSDataType dataType = field.getDataType(); + if ((dataType == TSDataType.TEXT || dataType == TSDataType.STRING) && !fieldStringValue.startsWith("root.")) { fieldStringValue = "\"" + fieldStringValue + "\""; + } else if (dataType == TSDataType.BLOB) { + final byte[] v = field.getBinaryV().getValues(); + if (v == null) { + fieldStringValue = null; + } else { + fieldStringValue = BytesUtils.parseBlobByteArrayToString(v); + } + } else if (dataType == TSDataType.DATE) { + final LocalDate dateV = field.getDateV(); + if (dateV == null) { + fieldStringValue = null; + } else { + fieldStringValue = dateV.toString(); + } } csvPrinterWrapper.print(fieldStringValue); } else { @@ -554,8 +782,25 @@ public static void writeSqlFile( headersTemp.remove(seriesList.get(index)); continue; } - if ("TEXT".equalsIgnoreCase(timeseriesList.get(3).getStringValue())) { - values.add("\"" + value + "\""); + final String dataType = timeseriesList.get(3).getStringValue(); + if (TSDataType.TEXT.name().equalsIgnoreCase(dataType) + || TSDataType.STRING.name().equalsIgnoreCase(dataType)) { + values.add("\'" + value + "\'"); + } else if (TSDataType.BLOB.name().equalsIgnoreCase(dataType)) { + final byte[] v = fields.get(index).getBinaryV().getValues(); + if (v == null) { + values.add(null); + } else { + values.add( + BytesUtils.parseBlobByteArrayToString(v).replaceFirst("0x", "X'") + "'"); + } + } else if (TSDataType.DATE.name().equalsIgnoreCase(dataType)) { + final LocalDate dateV = fields.get(index).getDateV(); + if (dateV == null) { + values.add(null); + } else { + values.add("'" + dateV.toString() + "'"); + } } else { values.add(value); } diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/data/ImportData.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/data/ImportData.java new file mode 100644 index 0000000000000..753713b9d1f68 --- /dev/null +++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/data/ImportData.java @@ -0,0 +1,773 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.tool.data; + +import org.apache.iotdb.cli.utils.IoTPrinter; +import org.apache.iotdb.commons.exception.IllegalPathException; +import org.apache.iotdb.exception.ArgsErrorException; +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.session.Session; +import org.apache.iotdb.session.pool.SessionPool; +import org.apache.iotdb.tool.tsfile.ImportTsFile; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.DefaultParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Option; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.commons.csv.CSVParser; +import org.apache.commons.csv.CSVRecord; +import org.apache.commons.lang3.ObjectUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.tsfile.enums.TSDataType; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Stream; + +public class ImportData extends AbstractDataTool { + + private static final String FILE_ARGS = "s"; + private static final String FILE_NAME = "source"; + + private static final String ON_SUCCESS_ARGS = "os"; + private static final String ON_SUCCESS_NAME = "on_success"; + + private static final String SUCCESS_DIR_ARGS = "sd"; + private static final String SUCCESS_DIR_NAME = "success_dir"; + + private static final String FAIL_DIR_ARGS = "fd"; + private static final String FAIL_DIR_NAME = "fail_dir"; + + private static final String ON_FAIL_ARGS = "of"; + private static final String ON_FAIL_NAME = "on_fail"; + + private static final String THREAD_NUM_ARGS = "tn"; + private static final String THREAD_NUM_NAME = "thread_num"; + + private static final String BATCH_POINT_SIZE_ARGS = "batch"; + private static final String BATCH_POINT_SIZE_NAME = "batch_size"; + private static final String BATCH_POINT_SIZE_ARGS_NAME = "batch_size"; + + private static final String ALIGNED_ARGS = "aligned"; + private static final String ALIGNED_NAME = "use_aligned"; + private static final String ALIGNED_ARGS_NAME = "use the aligned interface"; + + private static final String TIMESTAMP_PRECISION_ARGS = "tp"; + private static final String TIMESTAMP_PRECISION_NAME = "timestamp_precision"; + private static final String TIMESTAMP_PRECISION_ARGS_NAME = "timestamp precision (ms/us/ns)"; + + private static final String TYPE_INFER_ARGS = "ti"; + private static final String TYPE_INFER_NAME = "type_infer"; + + private static final String LINES_PER_FAILED_FILE_ARGS = "lpf"; + private static final String LINES_PER_FAILED_FILE_ARGS_NAME = "lines_per_failed_file"; + + private static final String TSFILEDB_CLI_PREFIX = "Import Data"; + private static final String TSFILEDB_CLI_HEAD = + "Please obtain help information for the corresponding data type based on different parameters, for example:\n" + + "./import_data.sh -help tsfile\n" + + "./import_data.sh -help sql\n" + + "./import_data.sh -help csv"; + + private static String targetPath; + private static String fileType = null; + private static Boolean aligned = false; + private static int threadNum = 8; + private static SessionPool sessionPool; + + private static final IoTPrinter ioTPrinter = new IoTPrinter(System.out); + + private static Options createTsFileOptions() { + Options options = createImportOptions(); + + Option opFile = + Option.builder(FILE_ARGS) + .required() + .longOpt(FILE_NAME) + .argName(FILE_NAME) + .hasArg() + .desc("The local directory path of the script file (folder) to be loaded. (required)") + .build(); + options.addOption(opFile); + + Option opOnSuccess = + Option.builder(ON_SUCCESS_ARGS) + .longOpt(ON_SUCCESS_NAME) + .argName(ON_SUCCESS_NAME) + .required() + .hasArg() + .desc( + "When loading tsfile successfully, do operation on tsfile (and its .resource and .mods files), " + + "optional parameters are none, mv, cp, delete. (required)") + .build(); + options.addOption(opOnSuccess); + + Option opSuccessDir = + Option.builder(SUCCESS_DIR_ARGS) + .longOpt(SUCCESS_DIR_NAME) + .argName(SUCCESS_DIR_NAME) + .hasArg() + .desc("The target folder when 'os' is 'mv' or 'cp'.(optional)") + .build(); + options.addOption(opSuccessDir); + + Option opOnFail = + Option.builder(ON_FAIL_ARGS) + .longOpt(ON_FAIL_NAME) + .argName(ON_FAIL_NAME) + .required() + .hasArg() + .desc( + "When loading tsfile fail, do operation on tsfile (and its .resource and .mods files), " + + "optional parameters are none, mv, cp, delete. (required)") + .build(); + options.addOption(opOnFail); + + Option opFailDir = + Option.builder(FAIL_DIR_ARGS) + .longOpt(FAIL_DIR_NAME) + .argName(FAIL_DIR_NAME) + .hasArg() + .desc("The target folder when 'of' is 'mv' or 'cp'.(optional)") + .build(); + options.addOption(opFailDir); + + Option opThreadNum = + Option.builder(THREAD_NUM_ARGS) + .longOpt(THREAD_NUM_NAME) + .argName(THREAD_NUM_NAME) + .hasArg() + .desc("The number of threads used to import tsfile, default is 8.(optional)") + .build(); + options.addOption(opThreadNum); + + Option opTimeZone = + Option.builder(TIME_ZONE_ARGS) + .longOpt(TIME_ZONE_NAME) + .argName(TIME_ZONE_NAME) + .hasArg() + .desc("Time Zone eg. +08:00 or -01:00 (optional)") + .build(); + options.addOption(opTimeZone); + + Option opTimestampPrecision = + Option.builder(TIMESTAMP_PRECISION_ARGS) + .longOpt(TIMESTAMP_PRECISION_NAME) + .argName(TIMESTAMP_PRECISION_ARGS_NAME) + .hasArg() + .desc("Timestamp precision (ms/us/ns) (optional)") + .build(); + + options.addOption(opTimestampPrecision); + return options; + } + + private static Options createCsvOptions() { + Options options = createImportOptions(); + + Option opFile = + Option.builder(FILE_ARGS) + .longOpt(FILE_NAME) + .argName(FILE_NAME) + .required() + .hasArg() + .desc("The local directory path of the script file (folder) to be loaded. (required)") + .build(); + options.addOption(opFile); + + Option opFailDir = + Option.builder(FAIL_DIR_ARGS) + .longOpt(FAIL_DIR_NAME) + .argName(FAIL_DIR_NAME) + .hasArg() + .desc( + "Specifying a directory to save failed file, default YOUR_CSV_FILE_PATH (optional)") + .build(); + options.addOption(opFailDir); + + Option opFailedLinesPerFile = + Option.builder(LINES_PER_FAILED_FILE_ARGS) + .longOpt(LINES_PER_FAILED_FILE_ARGS_NAME) + .argName(LINES_PER_FAILED_FILE_ARGS_NAME) + .hasArgs() + .desc("Lines per failed file (optional)") + .build(); + options.addOption(opFailedLinesPerFile); + + Option opAligned = + Option.builder(ALIGNED_ARGS) + .longOpt(ALIGNED_NAME) + .argName(ALIGNED_ARGS_NAME) + .hasArg() + .desc("Whether to use the interface of aligned (optional)") + .build(); + options.addOption(opAligned); + + Option opTypeInfer = + Option.builder(TYPE_INFER_ARGS) + .longOpt(TYPE_INFER_NAME) + .argName(TYPE_INFER_NAME) + .numberOfArgs(5) + .hasArgs() + .valueSeparator(',') + .desc("Define type info by option:\"boolean=text,int=long, ... (optional)") + .build(); + options.addOption(opTypeInfer); + + Option opTimestampPrecision = + Option.builder(TIMESTAMP_PRECISION_ARGS) + .longOpt(TIMESTAMP_PRECISION_NAME) + .argName(TIMESTAMP_PRECISION_ARGS_NAME) + .hasArg() + .desc("Timestamp precision (ms/us/ns) (optional)") + .build(); + + options.addOption(opTimestampPrecision); + + Option opTimeZone = + Option.builder(TIME_ZONE_ARGS) + .longOpt(TIME_ZONE_NAME) + .argName(TIME_ZONE_NAME) + .hasArg() + .desc("Time Zone eg. +08:00 or -01:00 (optional)") + .build(); + options.addOption(opTimeZone); + + Option opBatchPointSize = + Option.builder(BATCH_POINT_SIZE_ARGS) + .longOpt(BATCH_POINT_SIZE_NAME) + .argName(BATCH_POINT_SIZE_ARGS_NAME) + .hasArg() + .desc("100000 (optional)") + .build(); + options.addOption(opBatchPointSize); + + Option opThreadNum = + Option.builder(THREAD_NUM_ARGS) + .longOpt(THREAD_NUM_NAME) + .argName(THREAD_NUM_NAME) + .hasArg() + .desc("The number of threads used to import tsfile, default is 8. (optional)") + .build(); + options.addOption(opThreadNum); + return options; + } + + private static Options createSqlOptions() { + Options options = createImportOptions(); + + Option opFile = + Option.builder(FILE_ARGS) + .required() + .longOpt(FILE_NAME) + .argName(FILE_NAME) + .hasArg() + .desc("The local directory path of the script file (folder) to be loaded. (required)") + .build(); + options.addOption(opFile); + + Option opFailDir = + Option.builder(FAIL_DIR_ARGS) + .longOpt(FAIL_DIR_NAME) + .argName(FAIL_DIR_NAME) + .hasArg() + .desc( + "Specifying a directory to save failed file, default YOUR_CSV_FILE_PATH (optional)") + .build(); + options.addOption(opFailDir); + + Option opFailedLinesPerFile = + Option.builder(LINES_PER_FAILED_FILE_ARGS) + .argName(LINES_PER_FAILED_FILE_ARGS_NAME) + .hasArgs() + .desc("Lines per failed file (optional)") + .build(); + options.addOption(opFailedLinesPerFile); + + Option opTimeZone = + Option.builder(TIME_ZONE_ARGS) + .longOpt(TIME_ZONE_NAME) + .argName(TIME_ZONE_NAME) + .hasArg() + .desc("Time Zone eg. +08:00 or -01:00 (optional)") + .build(); + options.addOption(opTimeZone); + + Option opBatchPointSize = + Option.builder(BATCH_POINT_SIZE_ARGS) + .longOpt(BATCH_POINT_SIZE_NAME) + .argName(BATCH_POINT_SIZE_NAME) + .hasArg() + .desc("100000 (optional)") + .build(); + options.addOption(opBatchPointSize); + + Option opThreadNum = + Option.builder(THREAD_NUM_ARGS) + .longOpt(THREAD_NUM_NAME) + .argName(THREAD_NUM_NAME) + .hasArgs() + .desc("The number of threads used to import tsfile, default is 8. (optional)") + .build(); + options.addOption(opThreadNum); + return options; + } + + /** + * parse optional params + * + * @param commandLine + */ + private static void parseSpecialParams(CommandLine commandLine) throws ArgsErrorException { + timeZoneID = commandLine.getOptionValue(TIME_ZONE_ARGS); + targetPath = commandLine.getOptionValue(FILE_ARGS); + if (commandLine.getOptionValue(BATCH_POINT_SIZE_ARGS) != null) { + batchPointSize = Integer.parseInt(commandLine.getOptionValue(BATCH_POINT_SIZE_ARGS)); + } + if (commandLine.getOptionValue(FAIL_DIR_ARGS) != null) { + failedFileDirectory = commandLine.getOptionValue(FAIL_DIR_ARGS); + File file = new File(failedFileDirectory); + if (!file.isDirectory()) { + file.mkdir(); + failedFileDirectory = file.getAbsolutePath() + File.separator; + } else if (!failedFileDirectory.endsWith("/") && !failedFileDirectory.endsWith("\\")) { + failedFileDirectory += File.separator; + } + } + if (commandLine.getOptionValue(ALIGNED_ARGS) != null) { + aligned = Boolean.valueOf(commandLine.getOptionValue(ALIGNED_ARGS)); + } + if (commandLine.getOptionValue(THREAD_NUM_ARGS) != null) { + threadNum = Integer.parseInt(commandLine.getOptionValue(THREAD_NUM_ARGS)); + if (threadNum <= 0) { + ioTPrinter.println( + String.format( + "error: Invalid thread number '%s'. Please set a positive integer.", threadNum)); + System.exit(CODE_ERROR); + } + } + if (commandLine.getOptionValue(TIMESTAMP_PRECISION_ARGS) != null) { + timestampPrecision = commandLine.getOptionValue(TIMESTAMP_PRECISION_ARGS); + } + final String[] opTypeInferValues = commandLine.getOptionValues(TYPE_INFER_ARGS); + if (opTypeInferValues != null && opTypeInferValues.length > 0) { + for (String opTypeInferValue : opTypeInferValues) { + if (opTypeInferValue.contains("=")) { + final String[] typeInfoExpressionArr = opTypeInferValue.split("="); + final String key = typeInfoExpressionArr[0]; + final String value = typeInfoExpressionArr[1]; + applyTypeInferArgs(key, value); + } + } + } + if (commandLine.getOptionValue(LINES_PER_FAILED_FILE_ARGS) != null) { + linesPerFailedFile = Integer.parseInt(commandLine.getOptionValue(LINES_PER_FAILED_FILE_ARGS)); + } + } + + private static void applyTypeInferArgs(String key, String value) throws ArgsErrorException { + if (!TYPE_INFER_KEY_DICT.containsKey(key)) { + throw new ArgsErrorException("Unknown type infer key: " + key); + } + if (!TYPE_INFER_VALUE_DICT.containsKey(value)) { + throw new ArgsErrorException("Unknown type infer value: " + value); + } + if (key.equals(DATATYPE_NAN) + && !(value.equals(DATATYPE_FLOAT) + || value.equals(DATATYPE_DOUBLE) + || value.equals(DATATYPE_TEXT) + || value.equals(DATATYPE_STRING))) { + throw new ArgsErrorException("NaN can not convert to " + value); + } + if (key.equals(DATATYPE_BOOLEAN) + && !(value.equals(DATATYPE_BOOLEAN) + || value.equals(DATATYPE_TEXT) + || value.equals(DATATYPE_STRING))) { + throw new ArgsErrorException("Boolean can not convert to " + value); + } + if (key.equals(DATATYPE_DATE) + && !(value.equals(DATATYPE_DATE) + || value.equals(DATATYPE_TEXT) + || value.equals(DATATYPE_STRING))) { + throw new ArgsErrorException("Date can not convert to " + value); + } + if (key.equals(DATATYPE_TIMESTAMP) + && !(value.equals(DATATYPE_TIMESTAMP) + || value.equals(DATATYPE_TEXT) + || value.equals(DATATYPE_STRING) + || value.equals(DATATYPE_DOUBLE) + || value.equals(DATATYPE_LONG))) { + throw new ArgsErrorException("Timestamp can not convert to " + value); + } + if (key.equals(DATATYPE_BLOB) && !(value.equals(DATATYPE_BLOB))) { + throw new ArgsErrorException("Blob can not convert to " + value); + } + final TSDataType srcType = TYPE_INFER_VALUE_DICT.get(key); + final TSDataType dstType = TYPE_INFER_VALUE_DICT.get(value); + if (dstType.getType() < srcType.getType()) { + throw new ArgsErrorException(key + " can not convert to " + value); + } + TYPE_INFER_KEY_DICT.put(key, TYPE_INFER_VALUE_DICT.get(value)); + } + + public static void main(String[] args) throws IoTDBConnectionException { + Options helpOptions = createHelpOptions(); + Options tsFileOptions = createTsFileOptions(); + Options csvOptions = createCsvOptions(); + Options sqlOptions = createSqlOptions(); + HelpFormatter hf = new HelpFormatter(); + hf.setOptionComparator(null); + hf.setWidth(MAX_HELP_CONSOLE_WIDTH); + CommandLine commandLine = null; + CommandLineParser parser = new DefaultParser(); + + if (args == null || args.length == 0) { + printHelpOptions( + TSFILEDB_CLI_HEAD, TSFILEDB_CLI_PREFIX, hf, tsFileOptions, csvOptions, sqlOptions, true); + System.exit(CODE_ERROR); + } + try { + commandLine = parser.parse(helpOptions, args, true); + } catch (org.apache.commons.cli.ParseException e) { + printHelpOptions( + TSFILEDB_CLI_HEAD, TSFILEDB_CLI_PREFIX, hf, tsFileOptions, csvOptions, sqlOptions, true); + System.exit(CODE_ERROR); + } + final List argList = Arrays.asList(args); + int helpIndex = argList.indexOf(MINUS + HELP_ARGS); + int ftIndex = argList.indexOf(MINUS + FILE_TYPE_ARGS); + if (ftIndex < 0) { + ftIndex = argList.indexOf(MINUS + FILE_TYPE_NAME); + } + if (helpIndex >= 0) { + fileType = argList.get(helpIndex + 1); + if (StringUtils.isNotBlank(fileType)) { + if (TSFILE_SUFFIXS.equalsIgnoreCase(fileType)) { + printHelpOptions(null, TSFILEDB_CLI_PREFIX, hf, tsFileOptions, null, null, false); + } else if (CSV_SUFFIXS.equalsIgnoreCase(fileType)) { + printHelpOptions(null, TSFILEDB_CLI_PREFIX, hf, null, csvOptions, null, false); + } else if (SQL_SUFFIXS.equalsIgnoreCase(fileType)) { + printHelpOptions(null, TSFILEDB_CLI_PREFIX, hf, null, null, sqlOptions, false); + } else { + ioTPrinter.println(String.format("File type %s is not support", fileType)); + printHelpOptions( + TSFILEDB_CLI_HEAD, + TSFILEDB_CLI_PREFIX, + hf, + tsFileOptions, + csvOptions, + sqlOptions, + true); + } + } else { + printHelpOptions( + TSFILEDB_CLI_HEAD, + TSFILEDB_CLI_PREFIX, + hf, + tsFileOptions, + csvOptions, + sqlOptions, + true); + } + System.exit(CODE_ERROR); + } else if (ftIndex >= 0) { + fileType = argList.get(ftIndex + 1); + if (StringUtils.isNotBlank(fileType)) { + if (TSFILE_SUFFIXS.equalsIgnoreCase(fileType)) { + try { + commandLine = parser.parse(tsFileOptions, args); + ImportTsFile.importData(commandLine); + } catch (ParseException e) { + ioTPrinter.println("Parse error: " + e.getMessage()); + printHelpOptions(null, TSFILEDB_CLI_PREFIX, hf, tsFileOptions, null, null, false); + System.exit(CODE_ERROR); + } + } else if (CSV_SUFFIXS.equalsIgnoreCase(fileType)) { + try { + commandLine = parser.parse(csvOptions, args); + } catch (ParseException e) { + ioTPrinter.println("Parse error: " + e.getMessage()); + printHelpOptions(null, TSFILEDB_CLI_PREFIX, hf, null, csvOptions, null, false); + System.exit(CODE_ERROR); + } + } else if (SQL_SUFFIXS.equalsIgnoreCase(fileType)) { + try { + commandLine = parser.parse(sqlOptions, args); + } catch (ParseException e) { + ioTPrinter.println("Parse error: " + e.getMessage()); + printHelpOptions(null, TSFILEDB_CLI_PREFIX, hf, null, null, sqlOptions, false); + System.exit(CODE_ERROR); + } + } else { + ioTPrinter.println(String.format("File type %s is not support", fileType)); + printHelpOptions( + TSFILEDB_CLI_HEAD, + TSFILEDB_CLI_PREFIX, + hf, + tsFileOptions, + csvOptions, + sqlOptions, + true); + System.exit(CODE_ERROR); + } + } else { + ioTPrinter.println( + String.format( + "Invalid args: Required values for option '%s' not provided", FILE_TYPE_NAME)); + System.exit(CODE_ERROR); + } + } else { + ioTPrinter.println( + String.format( + "Invalid args: Required values for option '%s' not provided", FILE_TYPE_NAME)); + System.exit(CODE_ERROR); + } + + try { + parseBasicParams(commandLine); + String filename = commandLine.getOptionValue(FILE_ARGS); + if (filename == null) { + ioTPrinter.println(TSFILEDB_CLI_HEAD); + printHelpOptions( + null, TSFILEDB_CLI_PREFIX, hf, tsFileOptions, csvOptions, sqlOptions, true); + System.exit(CODE_ERROR); + } + parseSpecialParams(commandLine); + } catch (ArgsErrorException e) { + ioTPrinter.println("Args error: " + e.getMessage()); + System.exit(CODE_ERROR); + } catch (Exception e) { + ioTPrinter.println("Encounter an error, because: " + e.getMessage()); + System.exit(CODE_ERROR); + } + final int resultCode = importFromTargetPathAsync(); + if (ImportDataScanTool.getTsFileQueueSize() <= 0) { + System.exit(CODE_OK); + } + asyncImportDataFiles(); + System.exit(resultCode); + } + + private static void asyncImportDataFiles() { + List list = new ArrayList<>(threadNum); + for (int i = 0; i < threadNum; i++) { + final Thread thread = new Thread(new AsyncImportData()); + thread.start(); + list.add(thread); + } + list.forEach( + thread -> { + try { + thread.join(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + ioTPrinter.println("ImportData thread join interrupted: " + e.getMessage()); + } + }); + ioTPrinter.println("Import completely!"); + } + + private static int importFromTargetPathAsync() { + try { + sessionPool = + new SessionPool.Builder() + .host(host) + .port(Integer.parseInt(port)) + .user(username) + .password(password) + .maxSize(threadNum + 1) + .enableCompression(false) + .enableRedirection(false) + .enableAutoFetch(false) + .build(); + sessionPool.setEnableQueryRedirection(false); + AsyncImportData.setAligned(aligned); + AsyncImportData.setSessionPool(sessionPool); + AsyncImportData.setTimeZone(); + ImportDataScanTool.setSourceFullPath(targetPath); + final File file = new File(targetPath); + if (!file.isFile() && !file.isDirectory()) { + ioTPrinter.println(String.format("Source file or directory %s does not exist", targetPath)); + System.exit(CODE_ERROR); + } + ImportDataScanTool.traverseAndCollectFiles(); + asyncImportDataFiles(); + return CODE_OK; + } catch (InterruptedException e) { + ioTPrinter.println(String.format("Import tsfile fail: %s", e.getMessage())); + Thread.currentThread().interrupt(); + return CODE_ERROR; + } catch (Exception e) { + ioTPrinter.println(String.format("Import tsfile fail: %s", e.getMessage())); + return CODE_ERROR; + } + } + + /** + * Specifying a CSV file or a directory including CSV files that you want to import. This method + * can be offered to console cli to implement importing CSV file by command. + * + * @param host + * @param port + * @param username + * @param password + * @param targetPath a CSV file or a directory including CSV files + * @param timeZone + * @return the status code + * @throws IoTDBConnectionException + */ + @SuppressWarnings({"squid:S2093"}) // ignore try-with-resources + public static int importFromTargetPath( + String host, int port, String username, String password, String targetPath, String timeZone) + throws IoTDBConnectionException { + try { + session = new Session(host, port, username, password, false); + session.open(false); + timeZoneID = timeZone; + setTimeZone(); + + File file = new File(targetPath); + if (file.isFile()) { + if (file.getName().endsWith(SQL_SUFFIXS)) { + importFromSqlFile(session, file); + } else { + importFromSingleFile(session, file); + } + } else if (file.isDirectory()) { + File[] files = file.listFiles(); + if (files == null) { + return CODE_OK; + } + + for (File subFile : files) { + if (subFile.isFile()) { + if (subFile.getName().endsWith(SQL_SUFFIXS)) { + importFromSqlFile(session, subFile); + } else { + importFromSingleFile(session, subFile); + } + } + } + } else { + ioTPrinter.println("File not found!"); + return CODE_ERROR; + } + } catch (IoTDBConnectionException | StatementExecutionException e) { + ioTPrinter.println("Encounter an error when connecting to server, because " + e.getMessage()); + return CODE_ERROR; + } finally { + if (session != null) { + session.close(); + } + } + return CODE_OK; + } + + /** + * import the CSV file and load headers and records. + * + * @param file the File object of the CSV file that you want to import. + */ + private static void importFromSingleFile(Session session, File file) { + if (file.getName().endsWith(CSV_SUFFIXS) || file.getName().endsWith(TXT_SUFFIXS)) { + try { + CSVParser csvRecords = readCsvFile(file.getAbsolutePath()); + List headerNames = csvRecords.getHeaderNames(); + Stream records = csvRecords.stream(); + if (headerNames.isEmpty()) { + ioTPrinter.println("Empty file!"); + return; + } + if (!timeColumn.equalsIgnoreCase(filterBomHeader(headerNames.get(0)))) { + ioTPrinter.println("The first field of header must be `Time`!"); + return; + } + String failedFilePath = null; + if (failedFileDirectory == null) { + failedFilePath = file.getAbsolutePath() + ".failed"; + } else { + failedFilePath = failedFileDirectory + file.getName() + ".failed"; + } + if (!deviceColumn.equalsIgnoreCase(headerNames.get(1))) { + writeDataAlignedByTime(session, headerNames, records, failedFilePath); + } else { + writeDataAlignedByDevice(session, headerNames, records, failedFilePath); + } + } catch (IOException | IllegalPathException e) { + ioTPrinter.println("CSV file read exception because: " + e.getMessage()); + } + } else { + ioTPrinter.println("The file name must end with \"csv\" or \"txt\"!"); + } + } + + @SuppressWarnings("java:S2259") + private static void importFromSqlFile(Session session, File file) { + ArrayList> failedRecords = new ArrayList<>(); + String failedFilePath = null; + if (failedFileDirectory == null) { + failedFilePath = file.getAbsolutePath() + ".failed"; + } else { + failedFilePath = failedFileDirectory + file.getName() + ".failed"; + } + try (BufferedReader br = new BufferedReader(new FileReader(file.getAbsolutePath()))) { + String sql; + while ((sql = br.readLine()) != null) { + try { + session.executeNonQueryStatement(sql); + } catch (IoTDBConnectionException | StatementExecutionException e) { + failedRecords.add(Arrays.asList(sql)); + } + } + ioTPrinter.println(file.getName() + " Import completely!"); + } catch (IOException e) { + ioTPrinter.println("SQL file read exception because: " + e.getMessage()); + } + if (!failedRecords.isEmpty()) { + FileWriter writer = null; + try { + writer = new FileWriter(failedFilePath); + for (List failedRecord : failedRecords) { + writer.write(failedRecord.get(0).toString() + "\n"); + } + } catch (IOException e) { + ioTPrinter.println("Cannot dump fail result because: " + e.getMessage()); + } finally { + if (ObjectUtils.isNotEmpty(writer)) { + try { + writer.flush(); + writer.close(); + } catch (IOException e) { + } + } + } + } + } +} diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/data/ImportDataScanTool.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/data/ImportDataScanTool.java new file mode 100644 index 0000000000000..eaed0f5a4b096 --- /dev/null +++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/data/ImportDataScanTool.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.tool.data; + +import java.io.File; +import java.util.concurrent.LinkedBlockingQueue; + +public class ImportDataScanTool { + + private static final LinkedBlockingQueue dataQueue = new LinkedBlockingQueue<>(); + private static String sourceFullPath; + + public static void traverseAndCollectFiles() throws InterruptedException { + traverseAndCollectFilesBySourceFullPath(new File(sourceFullPath)); + } + + private static void traverseAndCollectFilesBySourceFullPath(final File file) + throws InterruptedException { + if (file.isFile()) { + putToQueue(file.getAbsolutePath()); + } else if (file.isDirectory()) { + final File[] files = file.listFiles(); + if (files != null) { + for (File f : files) { + traverseAndCollectFilesBySourceFullPath(f); + } + } + } + } + + public static String pollFromQueue() { + return ImportDataScanTool.dataQueue.poll(); + } + + public static void putToQueue(final String filePath) throws InterruptedException { + ImportDataScanTool.dataQueue.put(filePath); + } + + public static void setSourceFullPath(final String sourceFullPath) { + ImportDataScanTool.sourceFullPath = sourceFullPath; + } + + public static int getTsFileQueueSize() { + return ImportDataScanTool.dataQueue.size(); + } +} diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/AbstractSchemaTool.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/AbstractSchemaTool.java similarity index 99% rename from iotdb-client/cli/src/main/java/org/apache/iotdb/tool/AbstractSchemaTool.java rename to iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/AbstractSchemaTool.java index 9e91d404d1211..00d926c707447 100644 --- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/AbstractSchemaTool.java +++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/AbstractSchemaTool.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.tool; +package org.apache.iotdb.tool.schema; import org.apache.iotdb.cli.utils.IoTPrinter; import org.apache.iotdb.exception.ArgsErrorException; diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ExportSchema.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ExportSchema.java similarity index 99% rename from iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ExportSchema.java rename to iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ExportSchema.java index 9892da625e752..54453acd898ef 100644 --- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ExportSchema.java +++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ExportSchema.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.tool; +package org.apache.iotdb.tool.schema; import org.apache.iotdb.cli.type.ExitType; import org.apache.iotdb.cli.utils.CliContext; diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ImportSchema.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ImportSchema.java similarity index 99% rename from iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ImportSchema.java rename to iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ImportSchema.java index 3a7b8ab60e5d0..a7709313b1bfd 100644 --- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ImportSchema.java +++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ImportSchema.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.tool; +package org.apache.iotdb.tool.schema; import org.apache.iotdb.cli.utils.IoTPrinter; import org.apache.iotdb.commons.exception.IllegalPathException; diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/AbstractTsFileTool.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/AbstractTsFileTool.java similarity index 97% rename from iotdb-client/cli/src/main/java/org/apache/iotdb/tool/AbstractTsFileTool.java rename to iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/AbstractTsFileTool.java index fe8691ddf8b65..64b9f806d5889 100644 --- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/AbstractTsFileTool.java +++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/AbstractTsFileTool.java @@ -17,10 +17,11 @@ * under the License. */ -package org.apache.iotdb.tool; +package org.apache.iotdb.tool.tsfile; import org.apache.iotdb.cli.utils.IoTPrinter; import org.apache.iotdb.exception.ArgsErrorException; +import org.apache.iotdb.session.Session; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Option; @@ -52,7 +53,7 @@ public abstract class AbstractTsFileTool { protected static Options options; protected static Options helpOptions; - + protected static Session session; protected static String host = "127.0.0.1"; protected static String port = "6667"; protected static String username = "root"; diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ExportTsFile.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/ExportTsFile.java similarity index 62% rename from iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ExportTsFile.java rename to iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/ExportTsFile.java index 723e410741e54..2874a99520323 100644 --- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ExportTsFile.java +++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/ExportTsFile.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.tool; +package org.apache.iotdb.tool.tsfile; import org.apache.iotdb.cli.type.ExitType; import org.apache.iotdb.cli.utils.CliContext; @@ -54,17 +54,22 @@ import java.io.IOException; import java.nio.file.Files; import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Set; public class ExportTsFile extends AbstractTsFileTool { private static final String TARGET_DIR_ARGS = "t"; private static final String TARGET_DIR_NAME = "targetDirectory"; + private static final String TARGET_DIR_NAME_BACK = "target"; + private static final String TARGET_FILE_ARGS = "tfn"; private static final String TARGET_FILE_NAME = "targetFileName"; + private static final String TARGET_FILE_ARGS_BACK = "pfn"; private static final String SQL_FILE_ARGS = "s"; private static final String SQL_FILE_NAME = "sourceSqlFile"; @@ -73,11 +78,10 @@ public class ExportTsFile extends AbstractTsFileTool { private static final String DUMP_FILE_NAME_DEFAULT = "dump"; private static final String TSFILEDB_CLI_PREFIX = "ExportTsFile"; - private static Session session; - private static String targetDirectory; private static String targetFile = DUMP_FILE_NAME_DEFAULT; private static String queryCommand; + private static String sqlFile; private static long timeout = -1; @@ -89,41 +93,16 @@ public class ExportTsFile extends AbstractTsFileTool { }) // Suppress high Cognitive Complexity warning, ignore try-with-resources /* main function of export tsFile tool. */ public static void main(String[] args) { - createOptions(); - HelpFormatter hf = new HelpFormatter(); - CommandLine commandLine = null; - CommandLineParser parser = new DefaultParser(); - hf.setOptionComparator(null); // avoid reordering - hf.setWidth(MAX_HELP_CONSOLE_WIDTH); - - if (args == null || args.length == 0) { - ioTPrinter.println("Too few params input, please check the following hint."); - hf.printHelp(TSFILEDB_CLI_PREFIX, options, true); - System.exit(CODE_ERROR); - } - - try { - commandLine = parser.parse(options, args); - } catch (ParseException e) { - ioTPrinter.println(e.getMessage()); - hf.printHelp(TSFILEDB_CLI_PREFIX, options, true); - System.exit(CODE_ERROR); - } - if (commandLine.hasOption(HELP_ARGS)) { - hf.printHelp(TSFILEDB_CLI_PREFIX, options, true); - System.exit(CODE_ERROR); - } + int exitCode = getCommandLine(args); + exportTsfile(exitCode); + } - int exitCode = CODE_OK; + public static void exportTsfile(int exitCode) { try { - parseBasicParams(commandLine); - parseSpecialParams(commandLine); - session = new Session(host, Integer.parseInt(port), username, password); session.open(false); if (queryCommand == null) { - String sqlFile = commandLine.getOptionValue(SQL_FILE_ARGS); String sql; if (sqlFile == null) { @@ -152,9 +131,6 @@ public static void main(String[] args) { } catch (IOException e) { ioTPrinter.println("Failed to operate on file, because " + e.getMessage()); exitCode = CODE_ERROR; - } catch (ArgsErrorException e) { - ioTPrinter.println("Invalid args: " + e.getMessage()); - exitCode = CODE_ERROR; } catch (IoTDBConnectionException e) { ioTPrinter.println("Connect failed because " + e.getMessage()); exitCode = CODE_ERROR; @@ -172,6 +148,51 @@ public static void main(String[] args) { System.exit(exitCode); } + public ExportTsFile(CommandLine commandLine) { + try { + parseBasicParams(commandLine); + parseSpecialParamsBack(commandLine); + } catch (ArgsErrorException e) { + ioTPrinter.println("Invalid args: " + e.getMessage()); + System.exit(CODE_ERROR); + } + } + + protected static int getCommandLine(String[] args) { + createOptions(); + HelpFormatter hf = new HelpFormatter(); + CommandLine commandLine = null; + CommandLineParser parser = new DefaultParser(); + hf.setOptionComparator(null); // avoid reordering + hf.setWidth(MAX_HELP_CONSOLE_WIDTH); + + if (args == null || args.length == 0) { + ioTPrinter.println("Too few params input, please check the following hint."); + hf.printHelp(TSFILEDB_CLI_PREFIX, options, true); + System.exit(CODE_ERROR); + } + try { + commandLine = parser.parse(options, args); + } catch (ParseException e) { + ioTPrinter.println(e.getMessage()); + hf.printHelp(TSFILEDB_CLI_PREFIX, options, true); + System.exit(CODE_ERROR); + } + if (commandLine.hasOption(HELP_ARGS)) { + hf.printHelp(TSFILEDB_CLI_PREFIX, options, true); + System.exit(CODE_ERROR); + } + int exitCode = CODE_OK; + try { + parseBasicParams(commandLine); + parseSpecialParams(commandLine); + } catch (ArgsErrorException e) { + ioTPrinter.println("Invalid args: " + e.getMessage()); + exitCode = CODE_ERROR; + } + return exitCode; + } + private static void legalCheck(String sql) { String sqlLower = sql.toLowerCase(); if (sqlLower.contains("count(") @@ -201,6 +222,24 @@ private static void parseSpecialParams(CommandLine commandLine) throws ArgsError targetDirectory = checkRequiredArg(TARGET_DIR_ARGS, TARGET_DIR_NAME, commandLine); queryCommand = commandLine.getOptionValue(QUERY_COMMAND_ARGS); targetFile = commandLine.getOptionValue(TARGET_FILE_ARGS); + sqlFile = commandLine.getOptionValue(SQL_FILE_ARGS); + String timeoutString = commandLine.getOptionValue(TIMEOUT_ARGS); + if (timeoutString != null) { + timeout = Long.parseLong(timeoutString); + } + if (targetFile == null) { + targetFile = DUMP_FILE_NAME_DEFAULT; + } + + if (!targetDirectory.endsWith("/") && !targetDirectory.endsWith("\\")) { + targetDirectory += File.separator; + } + } + + private static void parseSpecialParamsBack(CommandLine commandLine) throws ArgsErrorException { + targetDirectory = checkRequiredArg(TARGET_DIR_ARGS, TARGET_DIR_NAME_BACK, commandLine); + queryCommand = commandLine.getOptionValue(QUERY_COMMAND_ARGS); + targetFile = commandLine.getOptionValue(TARGET_FILE_ARGS_BACK); String timeoutString = commandLine.getOptionValue(TIMEOUT_ARGS); if (timeoutString != null) { timeout = Long.parseLong(timeoutString); @@ -298,9 +337,11 @@ private static void dumpResult(String sql, int index) { final String path = targetDirectory + targetFile + index + ".tsfile"; try (SessionDataSet sessionDataSet = session.executeQueryStatement(sql, timeout)) { long start = System.currentTimeMillis(); - writeTsFileFile(sessionDataSet, path); - long end = System.currentTimeMillis(); - ioTPrinter.println("Export completely!cost: " + (end - start) + " ms."); + boolean isComplete = writeWithTablets(sessionDataSet, path); + if (isComplete) { + long end = System.currentTimeMillis(); + ioTPrinter.println("Export completely!cost: " + (end - start) + " ms."); + } } catch (StatementExecutionException | IoTDBConnectionException | IOException @@ -309,11 +350,119 @@ private static void dumpResult(String sql, int index) { } } + private static void collectSchemas( + List columnNames, + List columnTypes, + Map> deviceSchemaMap, + Set alignedDevices, + Map> deviceColumnIndices) + throws IoTDBConnectionException, StatementExecutionException { + for (int i = 0; i < columnNames.size(); i++) { + String column = columnNames.get(i); + if (!column.startsWith("root.")) { + continue; + } + TSDataType tsDataType = getTsDataType(columnTypes.get(i)); + Path path = new Path(column, true); + String deviceId = path.getDevice(); + // query whether the device is aligned or not + try (SessionDataSet deviceDataSet = + session.executeQueryStatement("show devices " + deviceId, timeout)) { + List deviceList = deviceDataSet.next().getFields(); + if (deviceList.size() > 1 && "true".equals(deviceList.get(1).getStringValue())) { + alignedDevices.add(deviceId); + } + } + + // query timeseries metadata + MeasurementSchema measurementSchema = + new MeasurementSchema(path.getMeasurement(), tsDataType); + List seriesList = + session.executeQueryStatement("show timeseries " + column, timeout).next().getFields(); + measurementSchema.setEncoding( + TSEncoding.valueOf(seriesList.get(4).getStringValue()).serialize()); + measurementSchema.setCompressor( + CompressionType.valueOf(seriesList.get(5).getStringValue()).serialize()); + + deviceSchemaMap.computeIfAbsent(deviceId, key -> new ArrayList<>()).add(measurementSchema); + deviceColumnIndices.computeIfAbsent(deviceId, key -> new ArrayList<>()).add(i); + } + } + + private static List constructTablets( + Map> deviceSchemaMap, + Set alignedDevices, + TsFileWriter tsFileWriter) + throws WriteProcessException { + List tabletList = new ArrayList<>(deviceSchemaMap.size()); + for (Map.Entry> stringListEntry : deviceSchemaMap.entrySet()) { + String deviceId = stringListEntry.getKey(); + List schemaList = stringListEntry.getValue(); + Tablet tablet = new Tablet(deviceId, schemaList); + tablet.initBitMaps(); + Path path = new Path(tablet.deviceId); + if (alignedDevices.contains(tablet.deviceId)) { + tsFileWriter.registerAlignedTimeseries(path, schemaList); + } else { + tsFileWriter.registerTimeseries(path, schemaList); + } + tabletList.add(tablet); + } + return tabletList; + } + + private static void writeWithTablets( + SessionDataSet sessionDataSet, + List tabletList, + Set alignedDevices, + TsFileWriter tsFileWriter, + Map> deviceColumnIndices) + throws IoTDBConnectionException, + StatementExecutionException, + IOException, + WriteProcessException { + while (sessionDataSet.hasNext()) { + RowRecord rowRecord = sessionDataSet.next(); + List fields = rowRecord.getFields(); + + for (Tablet tablet : tabletList) { + String deviceId = tablet.deviceId; + List columnIndices = deviceColumnIndices.get(deviceId); + int rowIndex = tablet.rowSize++; + tablet.addTimestamp(rowIndex, rowRecord.getTimestamp()); + List schemas = tablet.getSchemas(); + + for (int i = 0, columnIndicesSize = columnIndices.size(); i < columnIndicesSize; i++) { + Integer columnIndex = columnIndices.get(i); + MeasurementSchema measurementSchema = schemas.get(i); + // -1 for time not in fields + Object value = fields.get(columnIndex - 1).getObjectValue(measurementSchema.getType()); + if (value == null) { + tablet.bitMaps[i].mark(rowIndex); + } + tablet.addValue(measurementSchema.getMeasurementId(), rowIndex, value); + } + + if (tablet.rowSize == tablet.getMaxRowNumber()) { + writeToTsFile(alignedDevices, tsFileWriter, tablet); + tablet.initBitMaps(); + tablet.reset(); + } + } + } + + for (Tablet tablet : tabletList) { + if (tablet.rowSize != 0) { + writeToTsFile(alignedDevices, tsFileWriter, tablet); + } + } + } + @SuppressWarnings({ "squid:S3776", "squid:S6541" }) // Suppress high Cognitive Complexity warning, Suppress many task in one method warning - public static void writeTsFileFile(SessionDataSet sessionDataSet, String filePath) + public static Boolean writeWithTablets(SessionDataSet sessionDataSet, String filePath) throws IOException, IoTDBConnectionException, StatementExecutionException, @@ -324,91 +473,35 @@ public static void writeTsFileFile(SessionDataSet sessionDataSet, String filePat if (f.exists()) { Files.delete(f.toPath()); } - HashSet deviceFilterSet = new HashSet<>(); + boolean isEmpty = false; try (TsFileWriter tsFileWriter = new TsFileWriter(f)) { - Map> schemaMap = new LinkedHashMap<>(); - for (int i = 0; i < columnNames.size(); i++) { - String column = columnNames.get(i); - if (!column.startsWith("root.")) { - continue; - } - TSDataType tsDataType = getTsDataType(columnTypes.get(i)); - Path path = new Path(column, true); - String deviceId = path.getDevice(); - try (SessionDataSet deviceDataSet = - session.executeQueryStatement("show devices " + deviceId, timeout)) { - List deviceList = deviceDataSet.next().getFields(); - if (deviceList.size() > 1 && "true".equals(deviceList.get(1).getStringValue())) { - deviceFilterSet.add(deviceId); - } - } - MeasurementSchema measurementSchema = - new MeasurementSchema(path.getMeasurement(), tsDataType); + // device -> column indices in columnNames + Map> deviceColumnIndices = new HashMap<>(); + Set alignedDevices = new HashSet<>(); + Map> deviceSchemaMap = new LinkedHashMap<>(); - List seriesList = - session.executeQueryStatement("show timeseries " + column, timeout).next().getFields(); + collectSchemas( + columnNames, columnTypes, deviceSchemaMap, alignedDevices, deviceColumnIndices); - measurementSchema.setEncoding( - TSEncoding.valueOf(seriesList.get(4).getStringValue()).serialize()); - measurementSchema.setCompressor( - CompressionType.valueOf(seriesList.get(5).getStringValue()).serialize()); - schemaMap.computeIfAbsent(deviceId, key -> new ArrayList<>()).add(measurementSchema); - } - List tabletList = new ArrayList<>(); - for (Map.Entry> stringListEntry : schemaMap.entrySet()) { - String deviceId = stringListEntry.getKey(); - List schemaList = stringListEntry.getValue(); - Tablet tablet = new Tablet(deviceId, schemaList); - tablet.initBitMaps(); - Path path = new Path(tablet.deviceId); - if (deviceFilterSet.contains(tablet.deviceId)) { - tsFileWriter.registerAlignedTimeseries(path, schemaList); - } else { - tsFileWriter.registerTimeseries(path, schemaList); - } - tabletList.add(tablet); - } - if (tabletList.isEmpty()) { - ioTPrinter.println("!!!Warning:Tablet is empty,no data can be exported."); - System.exit(CODE_ERROR); - } - while (sessionDataSet.hasNext()) { - RowRecord rowRecord = sessionDataSet.next(); - List fields = rowRecord.getFields(); - int i = 0; - while (i < fields.size()) { - for (Tablet tablet : tabletList) { - int rowIndex = tablet.rowSize++; - tablet.addTimestamp(rowIndex, rowRecord.getTimestamp()); - List schemas = tablet.getSchemas(); - for (int j = 0; j < schemas.size(); j++) { - MeasurementSchema measurementSchema = schemas.get(j); - Object value = fields.get(i).getObjectValue(measurementSchema.getType()); - if (value == null) { - tablet.bitMaps[j].mark(rowIndex); - } - tablet.addValue(measurementSchema.getMeasurementId(), rowIndex, value); - i++; - } - if (tablet.rowSize == tablet.getMaxRowNumber()) { - writeToTsfile(deviceFilterSet, tsFileWriter, tablet); - tablet.initBitMaps(); - tablet.reset(); - } - } - } - } - for (Tablet tablet : tabletList) { - if (tablet.rowSize != 0) { - writeToTsfile(deviceFilterSet, tsFileWriter, tablet); - } + List tabletList = constructTablets(deviceSchemaMap, alignedDevices, tsFileWriter); + + if (!tabletList.isEmpty()) { + writeWithTablets( + sessionDataSet, tabletList, alignedDevices, tsFileWriter, deviceColumnIndices); + tsFileWriter.flushAllChunkGroups(); + } else { + isEmpty = true; } - tsFileWriter.flushAllChunkGroups(); } + if (isEmpty) { + ioTPrinter.println("!!!Warning:Tablet is empty,no data can be exported."); + return false; + } + return true; } - private static void writeToTsfile( - HashSet deviceFilterSet, TsFileWriter tsFileWriter, Tablet tablet) + private static void writeToTsFile( + Set deviceFilterSet, TsFileWriter tsFileWriter, Tablet tablet) throws IOException, WriteProcessException { if (deviceFilterSet.contains(tablet.deviceId)) { tsFileWriter.writeAligned(tablet); diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ImportTsFile.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/ImportTsFile.java similarity index 50% rename from iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ImportTsFile.java rename to iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/ImportTsFile.java index 576d2927beb3f..cdfbd5b4437ad 100644 --- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ImportTsFile.java +++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/ImportTsFile.java @@ -17,9 +17,10 @@ * under the License. */ -package org.apache.iotdb.tool; +package org.apache.iotdb.tool.tsfile; import org.apache.iotdb.cli.utils.IoTPrinter; +import org.apache.iotdb.commons.utils.NodeUrlUtils; import org.apache.iotdb.session.pool.SessionPool; import org.apache.commons.cli.CommandLine; @@ -31,18 +32,13 @@ import java.io.File; import java.io.IOException; -import java.nio.file.FileAlreadyExistsException; +import java.net.UnknownHostException; import java.nio.file.Files; -import java.nio.file.Path; import java.nio.file.Paths; -import java.nio.file.StandardCopyOption; import java.util.ArrayList; -import java.util.HashSet; +import java.util.Collections; import java.util.List; import java.util.Objects; -import java.util.Set; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.atomic.LongAdder; public class ImportTsFile extends AbstractTsFileTool { @@ -64,15 +60,19 @@ public class ImportTsFile extends AbstractTsFileTool { private static final String THREAD_NUM_ARGS = "tn"; private static final String THREAD_NUM_NAME = "thread_num"; - private static final IoTPrinter ioTPrinter = new IoTPrinter(System.out); + protected static final String VERIFY_ARGS = "v"; + protected static final String VERIFY_NAME = "verify"; + + private static final IoTPrinter IOT_PRINTER = new IoTPrinter(System.out); private static final String TS_FILE_CLI_PREFIX = "ImportTsFile"; - private static final String RESOURCE = ".resource"; - private static final String MODS = ".mods"; + protected static String timestampPrecision = "ms"; + private static final String TIMESTAMP_PRECISION_ARGS = "tp"; + private static final String TIMESTAMP_PRECISION_NAME = "timestamp_precision"; + private static final String TIMESTAMP_PRECISION_ARGS_NAME = "timestamp precision (ms/us/ns)"; private static String source; - private static String sourceFullPath; private static String successDir = "success/"; private static String failDir = "fail/"; @@ -82,15 +82,8 @@ public class ImportTsFile extends AbstractTsFileTool { private static int threadNum = 8; - private static final LongAdder loadFileSuccessfulNum = new LongAdder(); - private static final LongAdder loadFileFailedNum = new LongAdder(); - private static final LongAdder processingLoadSuccessfulFileSuccessfulNum = new LongAdder(); - private static final LongAdder processingLoadFailedFileSuccessfulNum = new LongAdder(); - - private static final LinkedBlockingQueue tsfileQueue = new LinkedBlockingQueue<>(); - private static final Set tsfileSet = new HashSet<>(); - private static final Set resourceOrModsSet = new HashSet<>(); - + private static boolean isRemoteLoad = true; + protected static boolean verify = true; private static SessionPool sessionPool; private static void createOptions() { @@ -158,6 +151,16 @@ private static void createOptions() { .desc("The number of threads used to import tsfile, default is 8.") .build(); options.addOption(opThreadNum); + + Option opTimestampPrecision = + Option.builder(TIMESTAMP_PRECISION_ARGS) + .longOpt(TIMESTAMP_PRECISION_NAME) + .argName(TIMESTAMP_PRECISION_ARGS_NAME) + .hasArg() + .desc("Timestamp precision (ms/us/ns) (optional)") + .build(); + + options.addOption(opTimestampPrecision); } public static void main(String[] args) { @@ -171,7 +174,7 @@ public static void main(String[] args) { helpFormatter.setWidth(MAX_HELP_CONSOLE_WIDTH); if (args == null || args.length == 0) { - ioTPrinter.println("Too few arguments, please check the following hint."); + IOT_PRINTER.println("Too few arguments, please check the following hint."); helpFormatter.printHelp(TS_FILE_CLI_PREFIX, options, true); System.exit(CODE_ERROR); } @@ -182,7 +185,7 @@ public static void main(String[] args) { System.exit(CODE_OK); } } catch (ParseException e) { - ioTPrinter.println("Failed to parse the provided options: " + e.getMessage()); + IOT_PRINTER.println("Failed to parse the provided options: " + e.getMessage()); helpFormatter.printHelp(TS_FILE_CLI_PREFIX, options, true); System.exit(CODE_ERROR); } @@ -191,7 +194,7 @@ public static void main(String[] args) { try { commandLine = parser.parse(options, args, true); } catch (ParseException e) { - ioTPrinter.println("Failed to parse the provided options: " + e.getMessage()); + IOT_PRINTER.println("Failed to parse the provided options: " + e.getMessage()); helpFormatter.printHelp(TS_FILE_CLI_PREFIX, options, true); System.exit(CODE_ERROR); } @@ -200,45 +203,72 @@ public static void main(String[] args) { parseBasicParams(commandLine); parseSpecialParams(commandLine); } catch (Exception e) { - ioTPrinter.println("Encounter an error when parsing the provided options: " + e.getMessage()); + IOT_PRINTER.println( + "Encounter an error when parsing the provided options: " + e.getMessage()); + System.exit(CODE_ERROR); + } + + IOT_PRINTER.println(isRemoteLoad ? "Load remotely." : "Load locally."); + final int resultCode = importFromTargetPath(); + + ImportTsFileBase.printResult(startTime); + System.exit(resultCode); + } + + private static void checkTimePrecision() { + String precision = null; + if (!isRemoteLoad) { + try { + precision = sessionPool.getTimestampPrecision(); + } catch (Exception e) { + IOT_PRINTER.println( + "Encounter an error when get IoTDB server timestampPrecision : " + e.getMessage()); + System.exit(CODE_ERROR); + } + if (!timestampPrecision.equalsIgnoreCase(precision)) { + IOT_PRINTER.println( + String.format( + "Encounter an error The time accuracy of the iotdb server is \"%s\", but the client time accuracy is %s", + precision, timestampPrecision)); + System.exit(CODE_ERROR); + } + } + } + + public static void importData(CommandLine commandLine) { + long startTime = System.currentTimeMillis(); + createOptions(); + final HelpFormatter helpFormatter = new HelpFormatter(); + helpFormatter.setOptionComparator(null); + helpFormatter.setWidth(MAX_HELP_CONSOLE_WIDTH); + try { + parseBasicParams(commandLine); + parseSpecialParams(commandLine); + } catch (Exception e) { + IOT_PRINTER.println( + "Encounter an error when parsing the provided options: " + e.getMessage()); System.exit(CODE_ERROR); } + IOT_PRINTER.println(isRemoteLoad ? "Load remotely." : "Load locally."); + final int resultCode = importFromTargetPath(); - ioTPrinter.println( - "Successfully load " - + loadFileSuccessfulNum.sum() - + " tsfile(s) (--on_success operation(s): " - + processingLoadSuccessfulFileSuccessfulNum.sum() - + " succeed, " - + (loadFileSuccessfulNum.sum() - processingLoadSuccessfulFileSuccessfulNum.sum()) - + " failed)"); - ioTPrinter.println( - "Failed to load " - + loadFileFailedNum.sum() - + " file(s) (--on_fail operation(s): " - + processingLoadFailedFileSuccessfulNum.sum() - + " succeed, " - + (loadFileFailedNum.sum() - processingLoadFailedFileSuccessfulNum.sum()) - + " failed)"); - ioTPrinter.println("For more details, please check the log."); - ioTPrinter.println( - "Total operation time: " + (System.currentTimeMillis() - startTime) + " ms."); - ioTPrinter.println("Work has been completed!"); + + ImportTsFileBase.printResult(startTime); System.exit(resultCode); } private static void parseSpecialParams(CommandLine commandLine) { source = commandLine.getOptionValue(SOURCE_ARGS); if (!Files.exists(Paths.get(source))) { - ioTPrinter.println(String.format("Source file or directory %s does not exist", source)); + IOT_PRINTER.println(String.format("Source file or directory %s does not exist", source)); System.exit(CODE_ERROR); } final String onSuccess = commandLine.getOptionValue(ON_SUCCESS_ARGS).trim().toLowerCase(); final String onFail = commandLine.getOptionValue(ON_FAIL_ARGS).trim().toLowerCase(); if (!Operation.isValidOperation(onSuccess) || !Operation.isValidOperation(onFail)) { - ioTPrinter.println("Args error: os/of must be one of none, mv, cp, delete"); + IOT_PRINTER.println("Args error: os/of must be one of none, mv, cp, delete"); System.exit(CODE_ERROR); } @@ -262,6 +292,21 @@ private static void parseSpecialParams(CommandLine commandLine) { if (commandLine.getOptionValue(THREAD_NUM_ARGS) != null) { threadNum = Integer.parseInt(commandLine.getOptionValue(THREAD_NUM_ARGS)); } + + try { + isRemoteLoad = !NodeUrlUtils.containsLocalAddress(Collections.singletonList(host)); + } catch (UnknownHostException e) { + IOT_PRINTER.println( + "Unknown host: " + host + ". Exception: " + e.getMessage() + ". Will use remote load."); + } + if (commandLine.getOptionValue(TIMESTAMP_PRECISION_ARGS) != null) { + timestampPrecision = commandLine.getOptionValue(TIMESTAMP_PRECISION_ARGS); + } + + verify = + null != commandLine.getOptionValue(VERIFY_ARGS) + ? Boolean.parseBoolean(commandLine.getOptionValue(VERIFY_ARGS)) + : verify; } public static boolean isFileStoreEquals(String pathString, File dir) { @@ -269,7 +314,7 @@ public static boolean isFileStoreEquals(String pathString, File dir) { return Objects.equals( Files.getFileStore(Paths.get(pathString)), Files.getFileStore(dir.toPath())); } catch (IOException e) { - ioTPrinter.println("IOException when checking file store: " + e.getMessage()); + IOT_PRINTER.println("IOException when checking file store: " + e.getMessage()); return false; } } @@ -281,7 +326,7 @@ public static File createSuccessDir(CommandLine commandLine) { File file = new File(successDir); if (!file.isDirectory()) { if (!file.mkdirs()) { - ioTPrinter.println(String.format("Failed to create %s %s", SUCCESS_DIR_NAME, successDir)); + IOT_PRINTER.println(String.format("Failed to create %s %s", SUCCESS_DIR_NAME, successDir)); System.exit(CODE_ERROR); } } @@ -295,7 +340,7 @@ public static File createFailDir(CommandLine commandLine) { File file = new File(failDir); if (!file.isDirectory()) { if (!file.mkdirs()) { - ioTPrinter.println(String.format("Failed to create %s %s", FAIL_DIR_NAME, failDir)); + IOT_PRINTER.println(String.format("Failed to create %s %s", FAIL_DIR_NAME, failDir)); System.exit(CODE_ERROR); } } @@ -304,13 +349,6 @@ public static File createFailDir(CommandLine commandLine) { public static int importFromTargetPath() { try { - final File file = new File(source); - sourceFullPath = file.getAbsolutePath(); - if (!file.isFile() && !file.isDirectory()) { - ioTPrinter.println(String.format("source file or directory %s does not exist", source)); - return CODE_ERROR; - } - sessionPool = new SessionPool.Builder() .host(host) @@ -323,16 +361,22 @@ public static int importFromTargetPath() { .enableAutoFetch(false) .build(); sessionPool.setEnableQueryRedirection(false); + checkTimePrecision(); + // set params + processSetParams(); + + ImportTsFileScanTool.traverseAndCollectFiles(); + ImportTsFileScanTool.addNoResourceOrModsToQueue(); - traverseAndCollectFiles(file); - addNoResourceOrModsToQueue(); - ioTPrinter.println("Load file total number : " + tsfileQueue.size()); asyncImportTsFiles(); return CODE_OK; } catch (InterruptedException e) { - ioTPrinter.println(String.format("Import tsfile fail: %s", e.getMessage())); + IOT_PRINTER.println(String.format("Import tsfile fail: %s", e.getMessage())); Thread.currentThread().interrupt(); return CODE_ERROR; + } catch (Exception e) { + IOT_PRINTER.println(String.format("Import tsfile fail: %s", e.getMessage())); + return CODE_ERROR; } finally { if (sessionPool != null) { sessionPool.close(); @@ -340,40 +384,39 @@ public static int importFromTargetPath() { } } - public static void traverseAndCollectFiles(File file) throws InterruptedException { - if (file.isFile()) { - if (file.getName().endsWith(RESOURCE) || file.getName().endsWith(MODS)) { - resourceOrModsSet.add(file.getAbsolutePath()); - } else { - tsfileSet.add(file.getAbsolutePath()); - tsfileQueue.put(file.getAbsolutePath()); - } - } else if (file.isDirectory()) { - final File[] files = file.listFiles(); - if (files != null) { - for (File f : files) { - traverseAndCollectFiles(f); - } - } + // process other classes need param + private static void processSetParams() { + // ImportTsFileLocally + final File file = new File(source); + ImportTsFileScanTool.setSourceFullPath(file.getAbsolutePath()); + if (!file.isFile() && !file.isDirectory()) { + IOT_PRINTER.println(String.format("Source file or directory %s does not exist", source)); + System.exit(CODE_ERROR); } - } - public static void addNoResourceOrModsToQueue() throws InterruptedException { - for (final String filePath : resourceOrModsSet) { - final String tsfilePath = - filePath.endsWith(RESOURCE) - ? filePath.substring(0, filePath.length() - RESOURCE.length()) - : filePath.substring(0, filePath.length() - MODS.length()); - if (!tsfileSet.contains(tsfilePath)) { - tsfileQueue.put(filePath); - } - } + ImportTsFileLocally.setSessionPool(sessionPool); + ImportTsFileLocally.setVerify(verify); + + // ImportTsFileRemotely + ImportTsFileRemotely.setHost(host); + ImportTsFileRemotely.setPort(port); + ImportTsFileRemotely.setUsername(username); + ImportTsFileRemotely.setPassword(password); + ImportTsFileRemotely.setValidateTsFile(verify); + + // ImportTsFileBase + ImportTsFileBase.setSuccessAndFailDirAndOperation( + successDir, successOperation, failDir, failOperation); } public static void asyncImportTsFiles() { final List list = new ArrayList<>(threadNum); for (int i = 0; i < threadNum; i++) { - final Thread thread = new Thread(ImportTsFile::importTsFile); + final Thread thread = + new Thread( + isRemoteLoad + ? new ImportTsFileRemotely(timestampPrecision) + : new ImportTsFileLocally()); thread.start(); list.add(thread); } @@ -383,164 +426,11 @@ public static void asyncImportTsFiles() { thread.join(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); - ioTPrinter.println("importTsFile thread join interrupted: " + e.getMessage()); + IOT_PRINTER.println("ImportTsFile thread join interrupted: " + e.getMessage()); } }); } - public static void importTsFile() { - String filePath; - try { - while ((filePath = tsfileQueue.poll()) != null) { - final String sql = "load '" + filePath + "' onSuccess=none "; - - try { - sessionPool.executeNonQueryStatement(sql); - - loadFileSuccessfulNum.increment(); - ioTPrinter.println("Imported [ " + filePath + " ] file successfully!"); - - try { - processingFile(filePath, successDir, successOperation); - processingLoadSuccessfulFileSuccessfulNum.increment(); - ioTPrinter.println("Processed success file [ " + filePath + " ] successfully!"); - } catch (Exception processSuccessException) { - ioTPrinter.println( - "Failed to process success file [ " - + filePath - + " ]: " - + processSuccessException.getMessage()); - } - } catch (Exception e) { - // Reject because of memory controls, do retry later - if (Objects.nonNull(e.getMessage()) && e.getMessage().contains("memory")) { - ioTPrinter.println( - "Rejecting file [ " + filePath + " ] due to memory constraints, will retry later."); - tsfileQueue.put(filePath); - continue; - } - - loadFileFailedNum.increment(); - ioTPrinter.println("Failed to import [ " + filePath + " ] file: " + e.getMessage()); - - try { - processingFile(filePath, failDir, failOperation); - processingLoadFailedFileSuccessfulNum.increment(); - ioTPrinter.println("Processed fail file [ " + filePath + " ] successfully!"); - } catch (Exception processFailException) { - ioTPrinter.println( - "Failed to process fail file [ " - + filePath - + " ]: " - + processFailException.getMessage()); - } - } - } - } catch (InterruptedException e) { - ioTPrinter.println("Unexpected error occurred: " + e.getMessage()); - Thread.currentThread().interrupt(); - } catch (Exception e) { - ioTPrinter.println("Unexpected error occurred: " + e.getMessage()); - } - } - - public static void processingFile(String filePath, String dir, Operation operation) { - String relativePath = filePath.substring(sourceFullPath.length() + 1); - Path sourcePath = Paths.get(filePath); - - String target = dir + File.separator + relativePath.replace(File.separator, "_"); - Path targetPath = Paths.get(target); - - Path sourceResourcePath = Paths.get(sourcePath + RESOURCE); - sourceResourcePath = Files.exists(sourceResourcePath) ? sourceResourcePath : null; - Path targetResourcePath = Paths.get(target + RESOURCE); - - Path sourceModsPath = Paths.get(sourcePath + MODS); - sourceModsPath = Files.exists(sourceModsPath) ? sourceModsPath : null; - Path targetModsPath = Paths.get(target + MODS); - - switch (operation) { - case DELETE: - { - try { - Files.deleteIfExists(sourcePath); - if (null != sourceResourcePath) { - Files.deleteIfExists(sourceResourcePath); - } - if (null != sourceModsPath) { - Files.deleteIfExists(sourceModsPath); - } - } catch (Exception e) { - ioTPrinter.println(String.format("Failed to delete file: %s", e.getMessage())); - } - break; - } - case CP: - { - try { - Files.copy(sourcePath, targetPath, StandardCopyOption.REPLACE_EXISTING); - if (null != sourceResourcePath) { - Files.copy( - sourceResourcePath, targetResourcePath, StandardCopyOption.REPLACE_EXISTING); - } - if (null != sourceModsPath) { - Files.copy(sourceModsPath, targetModsPath, StandardCopyOption.REPLACE_EXISTING); - } - } catch (Exception e) { - ioTPrinter.println(String.format("Failed to copy file: %s", e.getMessage())); - } - break; - } - case HARDLINK: - { - try { - Files.createLink(targetPath, sourcePath); - } catch (FileAlreadyExistsException e) { - ioTPrinter.println("Hardlink already exists: " + e.getMessage()); - } catch (Exception e) { - try { - Files.copy(sourcePath, targetPath, StandardCopyOption.REPLACE_EXISTING); - } catch (Exception copyException) { - ioTPrinter.println( - String.format("Failed to copy file: %s", copyException.getMessage())); - } - } - - try { - if (null != sourceResourcePath) { - Files.copy( - sourceResourcePath, targetResourcePath, StandardCopyOption.REPLACE_EXISTING); - } - if (null != sourceModsPath) { - Files.copy(sourceModsPath, targetModsPath, StandardCopyOption.REPLACE_EXISTING); - } - } catch (Exception e) { - ioTPrinter.println( - String.format("Failed to copy resource or mods file: %s", e.getMessage())); - } - break; - } - case MV: - { - try { - Files.move(sourcePath, targetPath, StandardCopyOption.REPLACE_EXISTING); - if (null != sourceResourcePath) { - Files.move( - sourceResourcePath, targetResourcePath, StandardCopyOption.REPLACE_EXISTING); - } - if (null != sourceModsPath) { - Files.move(sourceModsPath, targetModsPath, StandardCopyOption.REPLACE_EXISTING); - } - } catch (Exception e) { - ioTPrinter.println(String.format("Failed to move file: %s", e.getMessage())); - } - break; - } - default: - break; - } - } - public enum Operation { NONE, MV, @@ -571,7 +461,7 @@ public static Operation getOperation(String operation, boolean isFileStoreEquals case "delete": return Operation.DELETE; default: - ioTPrinter.println("Args error: os/of must be one of none, mv, cp, delete"); + IOT_PRINTER.println("Args error: os/of must be one of none, mv, cp, delete"); System.exit(CODE_ERROR); return null; } diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/ImportTsFileBase.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/ImportTsFileBase.java new file mode 100644 index 0000000000000..6d6f95dcb827a --- /dev/null +++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/ImportTsFileBase.java @@ -0,0 +1,251 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.tool.tsfile; + +import org.apache.iotdb.cli.utils.IoTPrinter; + +import java.io.File; +import java.nio.file.FileAlreadyExistsException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; +import java.util.Objects; +import java.util.concurrent.atomic.LongAdder; + +public abstract class ImportTsFileBase implements Runnable { + + private static final IoTPrinter IOT_PRINTER = new IoTPrinter(System.out); + + private static final LongAdder loadFileSuccessfulNum = new LongAdder(); + private static final LongAdder loadFileFailedNum = new LongAdder(); + private static final LongAdder processingLoadSuccessfulFileSuccessfulNum = new LongAdder(); + private static final LongAdder processingLoadFailedFileSuccessfulNum = new LongAdder(); + private static String timePrecision = "ms"; + private static String successDir; + private static ImportTsFile.Operation successOperation; + private static String failDir; + private static ImportTsFile.Operation failOperation; + + @Override + public void run() { + loadTsFile(); + } + + protected abstract void loadTsFile(); + + protected void processFailFile(final String filePath, final Exception e) { + // Reject because of memory controls, do retry later + try { + if (Objects.nonNull(e.getMessage()) && e.getMessage().contains("memory")) { + IOT_PRINTER.println( + "Rejecting file [ " + filePath + " ] due to memory constraints, will retry later."); + ImportTsFileScanTool.putToQueue(filePath); + return; + } + + loadFileFailedNum.increment(); + IOT_PRINTER.println("Failed to import [ " + filePath + " ] file: " + e.getMessage()); + + try { + processingFile(filePath, false); + processingLoadFailedFileSuccessfulNum.increment(); + IOT_PRINTER.println("Processed fail file [ " + filePath + " ] successfully!"); + } catch (final Exception processFailException) { + IOT_PRINTER.println( + "Failed to process fail file [ " + + filePath + + " ]: " + + processFailException.getMessage()); + } + } catch (final InterruptedException e1) { + IOT_PRINTER.println("Unexpected error occurred: " + e1.getMessage()); + Thread.currentThread().interrupt(); + } catch (final Exception e1) { + IOT_PRINTER.println("Unexpected error occurred: " + e1.getMessage()); + } + } + + protected static void processSuccessFile(final String filePath) { + loadFileSuccessfulNum.increment(); + IOT_PRINTER.println("Imported [ " + filePath + " ] file successfully!"); + + try { + processingFile(filePath, true); + processingLoadSuccessfulFileSuccessfulNum.increment(); + IOT_PRINTER.println("Processed success file [ " + filePath + " ] successfully!"); + } catch (final Exception processSuccessException) { + IOT_PRINTER.println( + "Failed to process success file [ " + + filePath + + " ]: " + + processSuccessException.getMessage()); + } + } + + public static void processingFile(final String filePath, final boolean isSuccess) { + final String relativePath = + filePath.substring(ImportTsFileScanTool.getSourceFullPathLength() + 1); + final Path sourcePath = Paths.get(filePath); + + final String target = + isSuccess + ? successDir + : failDir + File.separator + relativePath.replace(File.separator, "_"); + final Path targetPath = Paths.get(target); + + final String RESOURCE = ".resource"; + Path sourceResourcePath = Paths.get(sourcePath + RESOURCE); + sourceResourcePath = Files.exists(sourceResourcePath) ? sourceResourcePath : null; + final Path targetResourcePath = Paths.get(target + RESOURCE); + + final String MODS = ".mods"; + Path sourceModsPath = Paths.get(sourcePath + MODS); + sourceModsPath = Files.exists(sourceModsPath) ? sourceModsPath : null; + final Path targetModsPath = Paths.get(target + MODS); + + switch (isSuccess ? successOperation : failOperation) { + case DELETE: + { + try { + Files.deleteIfExists(sourcePath); + if (null != sourceResourcePath) { + Files.deleteIfExists(sourceResourcePath); + } + if (null != sourceModsPath) { + Files.deleteIfExists(sourceModsPath); + } + } catch (final Exception e) { + IOT_PRINTER.println(String.format("Failed to delete file: %s", e.getMessage())); + } + break; + } + case CP: + { + try { + Files.copy(sourcePath, targetPath, StandardCopyOption.REPLACE_EXISTING); + if (null != sourceResourcePath) { + Files.copy( + sourceResourcePath, targetResourcePath, StandardCopyOption.REPLACE_EXISTING); + } + if (null != sourceModsPath) { + Files.copy(sourceModsPath, targetModsPath, StandardCopyOption.REPLACE_EXISTING); + } + } catch (final Exception e) { + IOT_PRINTER.println(String.format("Failed to copy file: %s", e.getMessage())); + } + break; + } + case HARDLINK: + { + try { + Files.createLink(targetPath, sourcePath); + } catch (FileAlreadyExistsException e) { + IOT_PRINTER.println("Hardlink already exists: " + e.getMessage()); + } catch (final Exception e) { + try { + Files.copy(sourcePath, targetPath, StandardCopyOption.REPLACE_EXISTING); + } catch (final Exception copyException) { + IOT_PRINTER.println( + String.format("Failed to copy file: %s", copyException.getMessage())); + } + } + + try { + if (null != sourceResourcePath) { + Files.copy( + sourceResourcePath, targetResourcePath, StandardCopyOption.REPLACE_EXISTING); + } + if (null != sourceModsPath) { + Files.copy(sourceModsPath, targetModsPath, StandardCopyOption.REPLACE_EXISTING); + } + } catch (final Exception e) { + IOT_PRINTER.println( + String.format("Failed to copy resource or mods file: %s", e.getMessage())); + } + break; + } + case MV: + { + try { + Files.move(sourcePath, targetPath, StandardCopyOption.REPLACE_EXISTING); + if (null != sourceResourcePath) { + Files.move( + sourceResourcePath, targetResourcePath, StandardCopyOption.REPLACE_EXISTING); + } + if (null != sourceModsPath) { + Files.move(sourceModsPath, targetModsPath, StandardCopyOption.REPLACE_EXISTING); + } + } catch (final Exception e) { + IOT_PRINTER.println(String.format("Failed to move file: %s", e.getMessage())); + } + break; + } + default: + break; + } + } + + protected static void printResult(final long startTime) { + IOT_PRINTER.println( + "Successfully load " + + loadFileSuccessfulNum.sum() + + " tsfile(s) (--on_success operation(s): " + + processingLoadSuccessfulFileSuccessfulNum.sum() + + " succeed, " + + (loadFileSuccessfulNum.sum() - processingLoadSuccessfulFileSuccessfulNum.sum()) + + " failed)"); + IOT_PRINTER.println( + "Failed to load " + + loadFileFailedNum.sum() + + " file(s) (--on_fail operation(s): " + + processingLoadFailedFileSuccessfulNum.sum() + + " succeed, " + + (loadFileFailedNum.sum() - processingLoadFailedFileSuccessfulNum.sum()) + + " failed)"); + IOT_PRINTER.println( + "Unprocessed " + + ImportTsFileScanTool.getTsFileQueueSize() + + " file(s), due to unexpected exceptions"); + IOT_PRINTER.println("For more details, please check the log."); + IOT_PRINTER.println( + "Total operation time: " + (System.currentTimeMillis() - startTime) + " ms."); + IOT_PRINTER.println("Work has been completed!"); + } + + public static void setSuccessAndFailDirAndOperation( + final String successDir, + final ImportTsFile.Operation successOperation, + final String failDir, + final ImportTsFile.Operation failOperation) { + ImportTsFileBase.successDir = successDir; + ImportTsFileBase.successOperation = successOperation; + ImportTsFileBase.failDir = failDir; + ImportTsFileBase.failOperation = failOperation; + } + + protected void setTimePrecision(String timePrecision) { + this.timePrecision = timePrecision; + } + + protected String getTimePrecision() { + return timePrecision; + } +} diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/ImportTsFileLocally.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/ImportTsFileLocally.java new file mode 100644 index 0000000000000..f94050e98eb02 --- /dev/null +++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/ImportTsFileLocally.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.tool.tsfile; + +import org.apache.iotdb.cli.utils.IoTPrinter; +import org.apache.iotdb.session.pool.SessionPool; + +public class ImportTsFileLocally extends ImportTsFileBase implements Runnable { + + private static final IoTPrinter ioTPrinter = new IoTPrinter(System.out); + + private static SessionPool sessionPool; + private static boolean verify; + + @Override + public void loadTsFile() { + String filePath; + try { + while ((filePath = ImportTsFileScanTool.pollFromQueue()) != null) { + final String sql = + "load '" + filePath + "' onSuccess=none " + (verify ? "" : "verify=false"); + try { + sessionPool.executeNonQueryStatement(sql); + + processSuccessFile(filePath); + } catch (final Exception e) { + processFailFile(filePath, e); + } + } + } catch (final Exception e) { + ioTPrinter.println("Unexpected error occurred: " + e.getMessage()); + } + } + + public static void setSessionPool(SessionPool sessionPool) { + ImportTsFileLocally.sessionPool = sessionPool; + } + + public static void setVerify(boolean verify) { + ImportTsFileLocally.verify = verify; + } +} diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/ImportTsFileRemotely.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/ImportTsFileRemotely.java new file mode 100644 index 0000000000000..d842b9803af29 --- /dev/null +++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/ImportTsFileRemotely.java @@ -0,0 +1,353 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.tool.tsfile; + +import org.apache.iotdb.cli.utils.IoTPrinter; +import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.client.property.ThriftClientProperty; +import org.apache.iotdb.commons.pipe.config.PipeConfig; +import org.apache.iotdb.commons.pipe.sink.client.IoTDBSyncClient; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.common.PipeTransferHandshakeConstant; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferFilePieceReq; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.response.PipeTransferFilePieceResp; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferDataNodeHandshakeV1Req; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferDataNodeHandshakeV2Req; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTsFilePieceReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTsFilePieceWithModReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTsFileSealReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTsFileSealWithModReq; +import org.apache.iotdb.isession.SessionConfig; +import org.apache.iotdb.pipe.api.exception.PipeConnectionException; +import org.apache.iotdb.pipe.api.exception.PipeException; +import org.apache.iotdb.rpc.TSStatusCode; +import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; +import org.apache.iotdb.service.rpc.thrift.TPipeTransferResp; + +import org.apache.thrift.transport.TTransportException; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.security.SecureRandom; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.LockSupport; + +public class ImportTsFileRemotely extends ImportTsFileBase { + + private static final IoTPrinter IOT_PRINTER = new IoTPrinter(System.out); + + private static final String MODS = ".mods"; + private static final String LOAD_STRATEGY = "sync"; + private static final Integer MAX_RETRY_COUNT = 3; + + private IoTDBSyncClient client; + + private static String host; + private static String port; + + private static String username = SessionConfig.DEFAULT_USER; + private static String password = SessionConfig.DEFAULT_PASSWORD; + private static boolean validateTsFile; + + public ImportTsFileRemotely(String timePrecision) { + setTimePrecision(timePrecision); + initClient(); + sendHandshake(); + } + + @Override + public void loadTsFile() { + try { + String filePath; + while ((filePath = ImportTsFileScanTool.pollFromQueue()) != null) { + final File tsFile = new File(filePath); + try { + if (ImportTsFileScanTool.isContainModsFile(filePath + MODS)) { + doTransfer(tsFile, new File(filePath + MODS)); + } else { + doTransfer(tsFile, null); + } + + processSuccessFile(filePath); + } catch (final Exception e) { + IOT_PRINTER.println( + "Connect is abort, try to reconnect, max retry count: " + MAX_RETRY_COUNT); + + boolean isReconnectAndLoadSuccessFul = false; + + for (int i = 1; i <= MAX_RETRY_COUNT; i++) { + try { + IOT_PRINTER.println(String.format("The %sth retry will after %s seconds.", i, i * 2)); + LockSupport.parkNanos(TimeUnit.SECONDS.toNanos(i * 2L)); + + close(); + initClient(); + sendHandshake(); + + if (ImportTsFileScanTool.isContainModsFile(filePath + MODS)) { + doTransfer(tsFile, new File(filePath + MODS)); + } else { + doTransfer(tsFile, null); + } + + processSuccessFile(filePath); + isReconnectAndLoadSuccessFul = true; + + IOT_PRINTER.println("Reconnect successful."); + break; + } catch (final Exception e1) { + IOT_PRINTER.println(String.format("The %sth reconnect failed", i)); + } + } + + if (!isReconnectAndLoadSuccessFul) { + processFailFile(filePath, e); + + close(); + initClient(); + sendHandshake(); + } + } + } + } catch (final Exception e) { + IOT_PRINTER.println("Unexpected error occurred: " + e.getMessage()); + } finally { + close(); + } + } + + public void sendHandshake() { + try { + final Map params = constructParamsMap(); + TPipeTransferResp resp = + client.pipeTransfer(PipeTransferDataNodeHandshakeV2Req.toTPipeTransferReq(params)); + + if (resp.getStatus().getCode() == TSStatusCode.PIPE_TYPE_ERROR.getStatusCode()) { + IOT_PRINTER.println( + String.format( + "Handshake error with target server ip: %s, port: %s, because: %s. " + + "Retry to handshake by PipeTransferHandshakeV1Req.", + client.getIpAddress(), client.getPort(), resp.getStatus())); + resp = + client.pipeTransfer( + PipeTransferDataNodeHandshakeV1Req.toTPipeTransferReq(getTimePrecision())); + } + + if (resp.getStatus().getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + throw new PipeConnectionException( + String.format( + "Handshake error with target server ip: %s, port: %s, because: %s.", + client.getIpAddress(), client.getPort(), resp.getStatus())); + } else { + client.setTimeout(PipeConfig.getInstance().getPipeConnectorTransferTimeoutMs()); + IOT_PRINTER.println( + String.format( + "Handshake success. Target server ip: %s, port: %s", + client.getIpAddress(), client.getPort())); + } + } catch (final Exception e) { + throw new PipeException( + String.format( + "Handshake error with target server ip: %s, port: %s, because: %s.", + client.getIpAddress(), client.getPort(), e.getMessage())); + } + } + + private Map constructParamsMap() { + final Map params = new HashMap<>(); + params.put(PipeTransferHandshakeConstant.HANDSHAKE_KEY_TIME_PRECISION, getTimePrecision()); + params.put(PipeTransferHandshakeConstant.HANDSHAKE_KEY_CLUSTER_ID, getClusterId()); + params.put( + PipeTransferHandshakeConstant.HANDSHAKE_KEY_CONVERT_ON_TYPE_MISMATCH, + Boolean.toString(true)); + params.put(PipeTransferHandshakeConstant.HANDSHAKE_KEY_LOAD_TSFILE_STRATEGY, LOAD_STRATEGY); + params.put(PipeTransferHandshakeConstant.HANDSHAKE_KEY_USERNAME, username); + params.put(PipeTransferHandshakeConstant.HANDSHAKE_KEY_PASSWORD, password); + params.put( + PipeTransferHandshakeConstant.HANDSHAKE_KEY_VALIDATE_TSFILE, + Boolean.toString(validateTsFile)); + return params; + } + + public void doTransfer(final File tsFile, final File modFile) throws PipeException, IOException { + final TPipeTransferResp resp; + final TPipeTransferReq req; + + if (Objects.nonNull(modFile)) { + transferFilePieces(modFile, true); + transferFilePieces(tsFile, true); + + req = + PipeTransferTsFileSealWithModReq.toTPipeTransferReq( + modFile.getName(), modFile.length(), tsFile.getName(), tsFile.length()); + } else { + transferFilePieces(tsFile, false); + + req = PipeTransferTsFileSealReq.toTPipeTransferReq(tsFile.getName(), tsFile.length()); + } + + try { + resp = client.pipeTransfer(req); + } catch (final Exception e) { + throw new PipeConnectionException( + String.format("Network error when seal file %s, because %s.", tsFile, e.getMessage()), e); + } + + final TSStatus status = resp.getStatus(); + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode() + && status.getCode() != TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()) { + throw new PipeConnectionException( + String.format("Seal file %s error, result status %s.", tsFile, status)); + } + + IOT_PRINTER.println("Successfully transferred file " + tsFile); + } + + private void transferFilePieces(final File file, final boolean isMultiFile) + throws PipeException, IOException { + final int readFileBufferSize = PipeConfig.getInstance().getPipeConnectorReadFileBufferSize(); + final byte[] readBuffer = new byte[readFileBufferSize]; + long position = 0; + try (final RandomAccessFile reader = new RandomAccessFile(file, "r")) { + while (true) { + final int readLength = reader.read(readBuffer); + if (readLength == -1) { + break; + } + + final byte[] payLoad = + readLength == readFileBufferSize + ? readBuffer + : Arrays.copyOfRange(readBuffer, 0, readLength); + final PipeTransferFilePieceResp resp; + try { + final TPipeTransferReq req = + isMultiFile + ? getTransferMultiFilePieceReq(file.getName(), position, payLoad) + : getTransferSingleFilePieceReq(file.getName(), position, payLoad); + resp = PipeTransferFilePieceResp.fromTPipeTransferResp(client.pipeTransfer(req)); + } catch (final Exception e) { + throw new PipeConnectionException( + String.format( + "Network error when transfer file %s, because %s.", file, e.getMessage()), + e); + } + + position += readLength; + + final TSStatus status = resp.getStatus(); + if (status.getCode() == TSStatusCode.PIPE_TRANSFER_FILE_OFFSET_RESET.getStatusCode()) { + position = resp.getEndWritingOffset(); + reader.seek(position); + IOT_PRINTER.println(String.format("Redirect file position to %s.", position)); + continue; + } + + if (status.getCode() + == TSStatusCode.PIPE_CONFIG_RECEIVER_HANDSHAKE_NEEDED.getStatusCode()) { + sendHandshake(); + } + // Only handle the failed statuses to avoid string format performance overhead + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode() + && status.getCode() != TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()) { + throw new PipeException( + String.format("Transfer file %s error, result status %s.", file, status)); + } + } + } + } + + private PipeTransferFilePieceReq getTransferMultiFilePieceReq( + final String fileName, final long position, final byte[] payLoad) throws IOException { + return PipeTransferTsFilePieceWithModReq.toTPipeTransferReq(fileName, position, payLoad); + } + + private PipeTransferFilePieceReq getTransferSingleFilePieceReq( + final String fileName, final long position, final byte[] payLoad) throws IOException { + return PipeTransferTsFilePieceReq.toTPipeTransferReq(fileName, position, payLoad); + } + + private void initClient() { + try { + this.client = + new IoTDBSyncClient( + new ThriftClientProperty.Builder() + .setConnectionTimeoutMs( + PipeConfig.getInstance().getPipeConnectorHandshakeTimeoutMs()) + .setRpcThriftCompressionEnabled( + PipeConfig.getInstance().isPipeConnectorRPCThriftCompressionEnabled()) + .build(), + getEndPoint().getIp(), + getEndPoint().getPort(), + false, + "", + ""); + } catch (final TTransportException e) { + throw new PipeException("Sync client init error because " + e.getMessage()); + } + } + + private TEndPoint getEndPoint() { + return new TEndPoint(host, Integer.parseInt(port)); + } + + private String getClusterId() { + final SecureRandom random = new SecureRandom(); + final byte[] bytes = new byte[32]; // 32 bytes = 256 bits + random.nextBytes(bytes); + return "TSFILE-IMPORTER-" + UUID.nameUUIDFromBytes(bytes); + } + + private void close() { + try { + if (this.client != null) { + this.client.close(); + } + } catch (final Exception e) { + IOT_PRINTER.println("Failed to close client because " + e.getMessage()); + } + } + + public static void setHost(final String host) { + ImportTsFileRemotely.host = host; + } + + public static void setPort(final String port) { + ImportTsFileRemotely.port = port; + } + + public static void setUsername(final String username) { + ImportTsFileRemotely.username = username; + } + + public static void setPassword(final String password) { + ImportTsFileRemotely.password = password; + } + + public static void setValidateTsFile(final boolean validateTsFile) { + ImportTsFileRemotely.validateTsFile = validateTsFile; + } +} diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/ImportTsFileScanTool.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/ImportTsFileScanTool.java new file mode 100644 index 0000000000000..4891f42548616 --- /dev/null +++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/ImportTsFileScanTool.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.tool.tsfile; + +import java.io.File; +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.LinkedBlockingQueue; + +public class ImportTsFileScanTool { + + private static final String RESOURCE = ".resource"; + private static final String MODS = ".mods"; + + private static final LinkedBlockingQueue tsfileQueue = new LinkedBlockingQueue<>(); + private static final Set tsfileSet = new HashSet<>(); + private static final Set resourceOrModsSet = new HashSet<>(); + private static String sourceFullPath; + + public static void traverseAndCollectFiles() throws InterruptedException { + traverseAndCollectFilesBySourceFullPath(new File(sourceFullPath)); + } + + private static void traverseAndCollectFilesBySourceFullPath(final File file) + throws InterruptedException { + if (file.isFile()) { + if (file.getName().endsWith(RESOURCE) || file.getName().endsWith(MODS)) { + resourceOrModsSet.add(file.getAbsolutePath()); + } else { + tsfileSet.add(file.getAbsolutePath()); + tsfileQueue.put(file.getAbsolutePath()); + } + } else if (file.isDirectory()) { + final File[] files = file.listFiles(); + if (files != null) { + for (File f : files) { + traverseAndCollectFilesBySourceFullPath(f); + } + } + } + } + + public static void addNoResourceOrModsToQueue() throws InterruptedException { + for (final String filePath : resourceOrModsSet) { + final String tsfilePath = + filePath.endsWith(RESOURCE) + ? filePath.substring(0, filePath.length() - RESOURCE.length()) + : filePath.substring(0, filePath.length() - MODS.length()); + if (!tsfileSet.contains(tsfilePath)) { + tsfileQueue.put(filePath); + } + } + } + + public static boolean isContainModsFile(final String modsFilePath) { + return ImportTsFileScanTool.resourceOrModsSet.contains(modsFilePath); + } + + public static String pollFromQueue() { + return ImportTsFileScanTool.tsfileQueue.poll(); + } + + public static void putToQueue(final String filePath) throws InterruptedException { + ImportTsFileScanTool.tsfileQueue.put(filePath); + } + + public static void setSourceFullPath(final String sourceFullPath) { + ImportTsFileScanTool.sourceFullPath = sourceFullPath; + } + + public static int getSourceFullPathLength() { + return new File(sourceFullPath).isDirectory() + ? ImportTsFileScanTool.sourceFullPath.length() + : new File(ImportTsFileScanTool.sourceFullPath).getParent().length(); + } + + public static int getTsFileQueueSize() { + return ImportTsFileScanTool.tsfileQueue.size(); + } +} diff --git a/iotdb-client/cli/src/test/java/org/apache/iotdb/tool/WriteDataFileTest.java b/iotdb-client/cli/src/test/java/org/apache/iotdb/tool/WriteDataFileTest.java index eb9e01d293700..f9b43b8a88079 100644 --- a/iotdb-client/cli/src/test/java/org/apache/iotdb/tool/WriteDataFileTest.java +++ b/iotdb-client/cli/src/test/java/org/apache/iotdb/tool/WriteDataFileTest.java @@ -19,6 +19,8 @@ package org.apache.iotdb.tool; +import org.apache.iotdb.tool.data.AbstractDataTool; + import org.junit.Before; import org.junit.Test; diff --git a/iotdb-client/client-cpp/pom.xml b/iotdb-client/client-cpp/pom.xml index 6aa597170f3a5..040925614e9a6 100644 --- a/iotdb-client/client-cpp/pom.xml +++ b/iotdb-client/client-cpp/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-client - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT client-cpp pom @@ -43,7 +43,7 @@ org.apache.iotdb iotdb-thrift-commons - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT provided @@ -72,6 +72,32 @@ + + org.apache.maven.plugins + maven-resources-plugin + + + copy-cpp-files + + copy-resources + + validate + + ${project.build.directory}/build/main/generated-sources-cpp + + + ${project.basedir}/src/main + + **/*.h + **/*.cpp + **/*.cc + + + + + + + org.apache.maven.plugins maven-dependency-plugin @@ -155,14 +181,6 @@ ${project.basedir}/src/main/CMakeLists.txt ${project.build.directory}/build/main/CMakeLists.txt - - ${project.basedir}/src/main/Session.h - ${project.build.directory}/build/main/generated-sources-cpp/Session.h - - - ${project.basedir}/src/main/Session.cpp - ${project.build.directory}/build/main/generated-sources-cpp/Session.cpp - diff --git a/iotdb-client/client-cpp/src/main/AbstractSessionBuilder.h b/iotdb-client/client-cpp/src/main/AbstractSessionBuilder.h new file mode 100644 index 0000000000000..441e3a41f4fac --- /dev/null +++ b/iotdb-client/client-cpp/src/main/AbstractSessionBuilder.h @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef IOTDB_ABSTRACTSESSIONBUILDER_H +#define IOTDB_ABSTRACTSESSIONBUILDER_H + +#include + +class AbstractSessionBuilder { +public: + std::string host = "localhost"; + int rpcPort = 6667; + std::string username = "root"; + std::string password = "root"; + std::string zoneId = ""; + int fetchSize = 10000; + std::string sqlDialect = "tree"; + std::string database = ""; + bool enableAutoFetch = true; + bool enableRedirections = true; + bool enableRPCCompression = false; +}; + +#endif // IOTDB_ABSTRACTSESSIONBUILDER_H \ No newline at end of file diff --git a/iotdb-client/client-cpp/src/main/Column.cpp b/iotdb-client/client-cpp/src/main/Column.cpp new file mode 100644 index 0000000000000..4c533676d075a --- /dev/null +++ b/iotdb-client/client-cpp/src/main/Column.cpp @@ -0,0 +1,359 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#include "Column.h" +#include "ColumnDecoder.h" + +TimeColumn::TimeColumn(int32_t arrayOffset, int32_t positionCount, const std::vector& values) + : arrayOffset_(arrayOffset), positionCount_(positionCount), values_(values) { + if (arrayOffset < 0) throw IoTDBException("arrayOffset is negative"); + if (positionCount < 0) throw IoTDBException("positionCount is negative"); + if (static_cast(values.size()) - arrayOffset < positionCount) { + throw IoTDBException("values length is less than positionCount"); + } +} + +TSDataType::TSDataType TimeColumn::getDataType() const { return TSDataType::INT64; } +ColumnEncoding TimeColumn::getEncoding() const { return ColumnEncoding::Int64Array; } + +int64_t TimeColumn::getLong(int32_t position) const { + return values_[position + arrayOffset_]; +} + +bool TimeColumn::mayHaveNull() const { return false; } +bool TimeColumn::isNull(int32_t position) const { return false; } +std::vector TimeColumn::isNulls() const { return {}; } + +int32_t TimeColumn::getPositionCount() const { return positionCount_; } + +int64_t TimeColumn::getStartTime() const { return values_[arrayOffset_]; } +int64_t TimeColumn::getEndTime() const { return values_[positionCount_ + arrayOffset_ - 1]; } + +const std::vector& TimeColumn::getTimes() const { return values_; } +std::vector TimeColumn::getLongs() const { return getTimes(); } + +BinaryColumn::BinaryColumn(int32_t arrayOffset, int32_t positionCount, + const std::vector& valueIsNull, const std::vector>& values) + : arrayOffset_(arrayOffset), positionCount_(positionCount), + valueIsNull_(valueIsNull), values_(values) { + if (arrayOffset < 0) throw IoTDBException("arrayOffset is negative"); + if (positionCount < 0) throw IoTDBException("positionCount is negative"); + if (static_cast(values.size()) - arrayOffset < positionCount) { + throw IoTDBException("values length is less than positionCount"); + } + if (!valueIsNull.empty() && static_cast(valueIsNull.size()) - arrayOffset < positionCount) { + throw IoTDBException("isNull length is less than positionCount"); + } +} + +TSDataType::TSDataType BinaryColumn::getDataType() const { return TSDataType::TSDataType::TEXT; } +ColumnEncoding BinaryColumn::getEncoding() const { return ColumnEncoding::BinaryArray; } + +std::shared_ptr BinaryColumn::getBinary(int32_t position) const { + return values_[position + arrayOffset_]; +} + +std::vector> BinaryColumn::getBinaries() const { return values_; } + + +bool BinaryColumn::mayHaveNull() const { return !valueIsNull_.empty(); } + +bool BinaryColumn::isNull(int32_t position) const { + return !valueIsNull_.empty() && valueIsNull_[position + arrayOffset_]; +} + +std::vector BinaryColumn::isNulls() const { + if (!valueIsNull_.empty()) return valueIsNull_; + + std::vector result(positionCount_, false); + return result; +} + +int32_t BinaryColumn::getPositionCount() const { return positionCount_; } + +IntColumn::IntColumn(int32_t arrayOffset, int32_t positionCount, + const std::vector& valueIsNull, const std::vector& values) + : arrayOffset_(arrayOffset), positionCount_(positionCount), + valueNull_(valueIsNull), values_(values) { + if (arrayOffset < 0) throw IoTDBException("arrayOffset is negative"); + if (positionCount < 0) throw IoTDBException("positionCount is negative"); + if (static_cast(values.size()) - arrayOffset < positionCount) { + throw IoTDBException("values length is less than positionCount"); + } + if (!valueIsNull.empty() && static_cast(valueIsNull.size()) - arrayOffset < positionCount) { + throw IoTDBException("isNull length is less than positionCount"); + } +} + +TSDataType::TSDataType IntColumn::getDataType() const { return TSDataType::INT32; } +ColumnEncoding IntColumn::getEncoding() const { return ColumnEncoding::Int32Array; } + +int32_t IntColumn::getInt(int32_t position) const { + return values_[position + arrayOffset_]; +} + +std::vector IntColumn::getInts() const { return values_; } + +bool IntColumn::mayHaveNull() const { return !valueNull_.empty(); } + +bool IntColumn::isNull(int32_t position) const { + return !valueNull_.empty() && valueNull_[position + arrayOffset_]; +} + +std::vector IntColumn::isNulls() const { + if (!valueNull_.empty()) return valueNull_; + + std::vector result(positionCount_, false); + return result; +} + +int32_t IntColumn::getPositionCount() const { return positionCount_; } + +FloatColumn::FloatColumn(int32_t arrayOffset, int32_t positionCount, + const std::vector& valueIsNull, const std::vector& values) + : arrayOffset_(arrayOffset), positionCount_(positionCount), + valueIsNull_(valueIsNull), values_(values) { + if (arrayOffset < 0) throw IoTDBException("arrayOffset is negative"); + if (positionCount < 0) throw IoTDBException("positionCount is negative"); + if (static_cast(values.size()) - arrayOffset < positionCount) { + throw IoTDBException("values length is less than positionCount"); + } + if (!valueIsNull.empty() && static_cast(valueIsNull.size()) - arrayOffset < positionCount) { + throw IoTDBException("isNull length is less than positionCount"); + } +} + +TSDataType::TSDataType FloatColumn::getDataType() const { return TSDataType::TSDataType::FLOAT; } +ColumnEncoding FloatColumn::getEncoding() const { return ColumnEncoding::Int32Array; } + +float FloatColumn::getFloat(int32_t position) const { + return values_[position + arrayOffset_]; +} + +std::vector FloatColumn::getFloats() const { return values_; } + +bool FloatColumn::mayHaveNull() const { return !valueIsNull_.empty(); } + +bool FloatColumn::isNull(int32_t position) const { + return !valueIsNull_.empty() && valueIsNull_[position + arrayOffset_]; +} + +std::vector FloatColumn::isNulls() const { + if (!valueIsNull_.empty()) return valueIsNull_; + + std::vector result(positionCount_, false); + return result; +} + +int32_t FloatColumn::getPositionCount() const { return positionCount_; } + +LongColumn::LongColumn(int32_t arrayOffset, int32_t positionCount, + const std::vector& valueIsNull, const std::vector& values) + : arrayOffset_(arrayOffset), positionCount_(positionCount), + valueIsNull_(valueIsNull), values_(values) { + if (arrayOffset < 0) throw IoTDBException("arrayOffset is negative"); + if (positionCount < 0) throw IoTDBException("positionCount is negative"); + if (static_cast(values.size()) - arrayOffset < positionCount) { + throw IoTDBException("values length is less than positionCount"); + } + if (!valueIsNull.empty() && static_cast(valueIsNull.size()) - arrayOffset < positionCount) { + throw IoTDBException("isNull length is less than positionCount"); + } +} + +TSDataType::TSDataType LongColumn::getDataType() const { return TSDataType::TSDataType::INT64; } +ColumnEncoding LongColumn::getEncoding() const { return ColumnEncoding::Int64Array; } + +int64_t LongColumn::getLong(int32_t position) const { + return values_[position + arrayOffset_]; +} + +std::vector LongColumn::getLongs() const { return values_; } + +bool LongColumn::mayHaveNull() const { return !valueIsNull_.empty(); } + +bool LongColumn::isNull(int32_t position) const { + return !valueIsNull_.empty() && valueIsNull_[position + arrayOffset_]; +} + +std::vector LongColumn::isNulls() const { + if (!valueIsNull_.empty()) return valueIsNull_; + + std::vector result(positionCount_, false); + return result; +} + +int32_t LongColumn::getPositionCount() const { return positionCount_; } + +DoubleColumn::DoubleColumn(int32_t arrayOffset, int32_t positionCount, + const std::vector& valueIsNull, const std::vector& values) + : arrayOffset_(arrayOffset), positionCount_(positionCount), + valueIsNull_(valueIsNull), values_(values) { + if (arrayOffset < 0) throw IoTDBException("arrayOffset is negative"); + if (positionCount < 0) throw IoTDBException("positionCount is negative"); + if (static_cast(values.size()) - arrayOffset < positionCount) { + throw IoTDBException("values length is less than positionCount"); + } + if (!valueIsNull.empty() && static_cast(valueIsNull.size()) - arrayOffset < positionCount) { + throw IoTDBException("isNull length is less than positionCount"); + } +} + +TSDataType::TSDataType DoubleColumn::getDataType() const { return TSDataType::TSDataType::DOUBLE; } +ColumnEncoding DoubleColumn::getEncoding() const { return ColumnEncoding::Int64Array; } + +double DoubleColumn::getDouble(int32_t position) const { + return values_[position + arrayOffset_]; +} + +std::vector DoubleColumn::getDoubles() const { return values_; } + +bool DoubleColumn::mayHaveNull() const { return !valueIsNull_.empty(); } + +bool DoubleColumn::isNull(int32_t position) const { + return !valueIsNull_.empty() && valueIsNull_[position + arrayOffset_]; +} + +std::vector DoubleColumn::isNulls() const { + if (!valueIsNull_.empty()) return valueIsNull_; + + std::vector result(positionCount_, false); + return result; +} + +int32_t DoubleColumn::getPositionCount() const { return positionCount_; } + +BooleanColumn::BooleanColumn(int32_t arrayOffset, int32_t positionCount, + const std::vector& valueIsNull, const std::vector& values) + : arrayOffset_(arrayOffset), positionCount_(positionCount), + valueIsNull_(valueIsNull), values_(values) { + if (arrayOffset < 0) throw IoTDBException("arrayOffset is negative"); + if (positionCount < 0) throw IoTDBException("positionCount is negative"); + if (static_cast(values.size()) - arrayOffset < positionCount) { + throw IoTDBException("values length is less than positionCount"); + } + if (!valueIsNull.empty() && static_cast(valueIsNull.size()) - arrayOffset < positionCount) { + throw IoTDBException("isNull length is less than positionCount"); + } +} + +TSDataType::TSDataType BooleanColumn::getDataType() const { return TSDataType::TSDataType::BOOLEAN; } +ColumnEncoding BooleanColumn::getEncoding() const { return ColumnEncoding::ByteArray; } + +bool BooleanColumn::getBoolean(int32_t position) const { + return values_[position + arrayOffset_]; +} + +std::vector BooleanColumn::getBooleans() const { return values_; } + +bool BooleanColumn::mayHaveNull() const { return !valueIsNull_.empty(); } + +bool BooleanColumn::isNull(int32_t position) const { + return !valueIsNull_.empty() && valueIsNull_[position + arrayOffset_]; +} + +std::vector BooleanColumn::isNulls() const { + if (!valueIsNull_.empty()) return valueIsNull_; + + std::vector result(positionCount_, false); + return result; +} + +int32_t BooleanColumn::getPositionCount() const { return positionCount_; } + +RunLengthEncodedColumn::RunLengthEncodedColumn(std::shared_ptr value, int32_t positionCount) + : value_(value), positionCount_(positionCount) { + if (!value) throw IoTDBException("value is null"); + if (value->getPositionCount() != 1) { + throw IoTDBException("Expected value to contain a single position"); + } + if (positionCount < 0) throw IoTDBException("positionCount is negative"); +} + +std::shared_ptr RunLengthEncodedColumn::getValue() const { return value_; } + +TSDataType::TSDataType RunLengthEncodedColumn::getDataType() const { return value_->getDataType(); } +ColumnEncoding RunLengthEncodedColumn::getEncoding() const { return ColumnEncoding::Rle; } + +bool RunLengthEncodedColumn::getBoolean(int32_t position) const { + return value_->getBoolean(0); +} + +int32_t RunLengthEncodedColumn::getInt(int32_t position) const { + return value_->getInt(0); +} + +int64_t RunLengthEncodedColumn::getLong(int32_t position) const { + return value_->getLong(0); +} + +float RunLengthEncodedColumn::getFloat(int32_t position) const { + return value_->getFloat(0); +} + +double RunLengthEncodedColumn::getDouble(int32_t position) const { + return value_->getDouble(0); +} + +std::shared_ptr RunLengthEncodedColumn::getBinary(int32_t position) const { + return value_->getBinary(0); +} + +std::vector RunLengthEncodedColumn::getBooleans() const { + bool v = value_->getBoolean(0); + return std::vector(positionCount_, v); +} + +std::vector RunLengthEncodedColumn::getInts() const { + int32_t v = value_->getInt(0); + return std::vector(positionCount_, v); +} + +std::vector RunLengthEncodedColumn::getLongs() const { + int64_t v = value_->getLong(0); + return std::vector(positionCount_, v); +} + +std::vector RunLengthEncodedColumn::getFloats() const { + float v = value_->getFloat(0); + return std::vector(positionCount_, v); +} + +std::vector RunLengthEncodedColumn::getDoubles() const { + double v = value_->getDouble(0); + return std::vector(positionCount_, v); +} + +std::vector> RunLengthEncodedColumn::getBinaries() const { + auto v = value_->getBinary(0); + return std::vector>(positionCount_, v); +} + +bool RunLengthEncodedColumn::mayHaveNull() const { return value_->mayHaveNull(); } + +bool RunLengthEncodedColumn::isNull(int32_t position) const { + return value_->isNull(0); +} + +std::vector RunLengthEncodedColumn::isNulls() const { + bool v = value_->isNull(0); + return std::vector(positionCount_, v); +} + +int32_t RunLengthEncodedColumn::getPositionCount() const { return positionCount_; } diff --git a/iotdb-client/client-cpp/src/main/Column.h b/iotdb-client/client-cpp/src/main/Column.h new file mode 100644 index 0000000000000..04f611f41a874 --- /dev/null +++ b/iotdb-client/client-cpp/src/main/Column.h @@ -0,0 +1,353 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +#ifndef IOTDB_COLUMN_H +#define IOTDB_COLUMN_H + +#include +#include +#include +#include + +#include "Common.h" +#include "ColumnDecoder.h" + +enum class ColumnEncoding : uint8_t { + ByteArray, + Int32Array, + Int64Array, + BinaryArray, + Rle +}; + +class Binary { +public: + explicit Binary(std::vector data) : data_(std::move(data)) { + } + + const std::vector& getData() const { return data_; } + + std::string getStringValue() const { + return {data_.begin(), data_.end()}; + } + +private: + std::vector data_; +}; + + +const std::map> kEncodingToDecoder = { + {ColumnEncoding::Int32Array, std::make_shared()}, + {ColumnEncoding::Int64Array, std::make_shared()}, + {ColumnEncoding::ByteArray, std::make_shared()}, + {ColumnEncoding::BinaryArray, std::make_shared()}, + {ColumnEncoding::Rle, std::make_shared()} +}; + +const std::map kByteToEncoding = { + {0, ColumnEncoding::ByteArray}, + {1, ColumnEncoding::Int32Array}, + {2, ColumnEncoding::Int64Array}, + {3, ColumnEncoding::BinaryArray}, + {4, ColumnEncoding::Rle} +}; + +inline std::shared_ptr getColumnDecoder(ColumnEncoding encoding) { + auto it = kEncodingToDecoder.find(encoding); + if (it == kEncodingToDecoder.end()) { + throw IoTDBException("Unsupported column encoding"); + } + return it->second; +} + +inline ColumnEncoding getColumnEncodingByByte(uint8_t b) { + auto it = kByteToEncoding.find(b); + if (it == kByteToEncoding.end()) { + throw IoTDBException("Invalid encoding value: " + std::to_string(b)); + } + return it->second; +} + +class Column { +public: + virtual ~Column() = default; + + virtual TSDataType::TSDataType getDataType() const = 0; + virtual ColumnEncoding getEncoding() const = 0; + + virtual bool getBoolean(int32_t position) const { + throw IoTDBException("Unsupported operation: getBoolean"); + } + + virtual int32_t getInt(int32_t position) const { + throw IoTDBException("Unsupported operation: getInt"); + } + + virtual int64_t getLong(int32_t position) const { + throw IoTDBException("Unsupported operation: getLong"); + } + + virtual float getFloat(int32_t position) const { + throw IoTDBException("Unsupported operation: getFloat"); + } + + virtual double getDouble(int32_t position) const { + throw IoTDBException("Unsupported operation: getDouble"); + } + + virtual std::shared_ptr getBinary(int32_t position) const { + throw IoTDBException("Unsupported operation: getBinary"); + } + + virtual std::vector getBooleans() const { + throw IoTDBException("Unsupported operation: getBooleans"); + } + + virtual std::vector getInts() const { + throw IoTDBException("Unsupported operation: getInts"); + } + + virtual std::vector getLongs() const { + throw IoTDBException("Unsupported operation: getLongs"); + } + + virtual std::vector getFloats() const { + throw IoTDBException("Unsupported operation: getFloats"); + } + + virtual std::vector getDoubles() const { + throw IoTDBException("Unsupported operation: getDoubles"); + } + + virtual std::vector> getBinaries() const { + throw IoTDBException("Unsupported operation: getBinaries"); + } + + virtual bool mayHaveNull() const = 0; + virtual bool isNull(int32_t position) const = 0; + virtual std::vector isNulls() const = 0; + + virtual int32_t getPositionCount() const = 0; +}; + +class TimeColumn : public Column { +public: + TimeColumn(int32_t arrayOffset, int32_t positionCount, const std::vector& values); + + TSDataType::TSDataType getDataType() const override; + ColumnEncoding getEncoding() const override; + + int64_t getLong(int32_t position) const override; + + bool mayHaveNull() const override; + bool isNull(int32_t position) const override; + std::vector isNulls() const override; + + int32_t getPositionCount() const override; + + int64_t getStartTime() const; + int64_t getEndTime() const; + + const std::vector& getTimes() const; + std::vector getLongs() const override; + +private: + int32_t arrayOffset_; + int32_t positionCount_; + std::vector values_; +}; + +class BinaryColumn : public Column { +public: + BinaryColumn(int32_t arrayOffset, int32_t positionCount, + const std::vector& valueIsNull, const std::vector>& values); + + TSDataType::TSDataType getDataType() const override; + ColumnEncoding getEncoding() const override; + + std::shared_ptr getBinary(int32_t position) const override; + std::vector> getBinaries() const override; + + bool mayHaveNull() const override; + bool isNull(int32_t position) const override; + std::vector isNulls() const override; + + int32_t getPositionCount() const override; + +private: + int32_t arrayOffset_; + int32_t positionCount_; + std::vector valueIsNull_; + std::vector> values_; +}; + +class IntColumn : public Column { +public: + IntColumn(int32_t arrayOffset, int32_t positionCount, + const std::vector& valueIsNull, const std::vector& values); + + TSDataType::TSDataType getDataType() const override; + ColumnEncoding getEncoding() const override; + + int32_t getInt(int32_t position) const override; + std::vector getInts() const override; + + bool mayHaveNull() const override; + bool isNull(int32_t position) const override; + std::vector isNulls() const override; + + int32_t getPositionCount() const override; + +private: + int32_t arrayOffset_; + int32_t positionCount_; + std::vector valueNull_; + std::vector values_; +}; + +class FloatColumn : public Column { +public: + FloatColumn(int32_t arrayOffset, int32_t positionCount, + const std::vector& valueIsNull, const std::vector& values); + + TSDataType::TSDataType getDataType() const override; + ColumnEncoding getEncoding() const override; + + float getFloat(int32_t position) const override; + std::vector getFloats() const override; + + bool mayHaveNull() const override; + bool isNull(int32_t position) const override; + std::vector isNulls() const override; + + int32_t getPositionCount() const override; + +private: + int32_t arrayOffset_; + int32_t positionCount_; + std::vector valueIsNull_; + std::vector values_; +}; + +class LongColumn : public Column { +public: + LongColumn(int32_t arrayOffset, int32_t positionCount, + const std::vector& valueIsNull, const std::vector& values); + + TSDataType::TSDataType getDataType() const override; + ColumnEncoding getEncoding() const override; + + int64_t getLong(int32_t position) const override; + std::vector getLongs() const override; + + bool mayHaveNull() const override; + bool isNull(int32_t position) const override; + std::vector isNulls() const override; + + int32_t getPositionCount() const override; + +private: + int32_t arrayOffset_; + int32_t positionCount_; + std::vector valueIsNull_; + std::vector values_; +}; + +class DoubleColumn : public Column { +public: + DoubleColumn(int32_t arrayOffset, int32_t positionCount, + const std::vector& valueIsNull, const std::vector& values); + + TSDataType::TSDataType getDataType() const override; + ColumnEncoding getEncoding() const override; + + double getDouble(int32_t position) const override; + std::vector getDoubles() const override; + + bool mayHaveNull() const override; + bool isNull(int32_t position) const override; + std::vector isNulls() const override; + + int32_t getPositionCount() const override; + +private: + int32_t arrayOffset_; + int32_t positionCount_; + std::vector valueIsNull_; + std::vector values_; +}; + +class BooleanColumn : public Column { +public: + BooleanColumn(int32_t arrayOffset, int32_t positionCount, + const std::vector& valueIsNull, const std::vector& values); + + TSDataType::TSDataType getDataType() const override; + ColumnEncoding getEncoding() const override; + + bool getBoolean(int32_t position) const override; + std::vector getBooleans() const override; + + bool mayHaveNull() const override; + bool isNull(int32_t position) const override; + std::vector isNulls() const override; + + int32_t getPositionCount() const override; + +private: + int32_t arrayOffset_; + int32_t positionCount_; + std::vector valueIsNull_; + std::vector values_; +}; + +class RunLengthEncodedColumn : public Column { +public: + RunLengthEncodedColumn(std::shared_ptr value, int32_t positionCount); + + std::shared_ptr getValue() const; + + TSDataType::TSDataType getDataType() const override; + ColumnEncoding getEncoding() const override; + + bool getBoolean(int32_t position) const override; + int32_t getInt(int32_t position) const override; + int64_t getLong(int32_t position) const override; + float getFloat(int32_t position) const override; + double getDouble(int32_t position) const override; + std::shared_ptr getBinary(int32_t position) const override; + + std::vector getBooleans() const override; + std::vector getInts() const override; + std::vector getLongs() const override; + std::vector getFloats() const override; + std::vector getDoubles() const override; + std::vector> getBinaries() const override; + + bool mayHaveNull() const override; + bool isNull(int32_t position) const override; + std::vector isNulls() const override; + + int32_t getPositionCount() const override; + +private: + std::shared_ptr value_; + int32_t positionCount_; +}; + +#endif diff --git a/iotdb-client/client-cpp/src/main/ColumnDecoder.cpp b/iotdb-client/client-cpp/src/main/ColumnDecoder.cpp new file mode 100644 index 0000000000000..e45cb49409a0d --- /dev/null +++ b/iotdb-client/client-cpp/src/main/ColumnDecoder.cpp @@ -0,0 +1,178 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#include "ColumnDecoder.h" + +#include "Column.h" + +std::vector deserializeNullIndicators(MyStringBuffer& buffer, int32_t positionCount) { + uint8_t mayHaveNullByte = buffer.getChar(); + + bool mayHaveNull = mayHaveNullByte != 0; + if (!mayHaveNull) { + return {}; + } + + return deserializeBooleanArray(buffer, positionCount); +} + +std::vector deserializeBooleanArray(MyStringBuffer& buffer, int32_t size) { + const int32_t packedSize = (size + 7) / 8; + std::vector packedBytes(packedSize); + for (int i = 0; i < packedSize; i++) { + packedBytes[i] = buffer.getChar(); + } + + std::vector output(size); + int currentByte = 0; + const int fullGroups = size & ~0b111; + + for (int pos = 0; pos < fullGroups; pos += 8) { + const uint8_t b = packedBytes[currentByte++]; + output[pos + 0] = (b & 0b10000000) != 0; + output[pos + 1] = (b & 0b01000000) != 0; + output[pos + 2] = (b & 0b00100000) != 0; + output[pos + 3] = (b & 0b00010000) != 0; + output[pos + 4] = (b & 0b00001000) != 0; + output[pos + 5] = (b & 0b00000100) != 0; + output[pos + 6] = (b & 0b00000010) != 0; + output[pos + 7] = (b & 0b00000001) != 0; + } + + if ((size & 0b111) > 0) { + const uint8_t b = packedBytes.back(); + uint8_t mask = 0b10000000; + + for (int pos = fullGroups; pos < size; pos++) { + output[pos] = (b & mask) != 0; + mask >>= 1; + } + } + + return output; +} + +std::unique_ptr BaseColumnDecoder::readColumn( + MyStringBuffer& buffer, TSDataType::TSDataType dataType, int32_t positionCount) { + return nullptr; +} + +std::unique_ptr Int32ArrayColumnDecoder::readColumn( + MyStringBuffer& buffer, TSDataType::TSDataType dataType, int32_t positionCount) { + auto nullIndicators = deserializeNullIndicators(buffer, positionCount); + + switch (dataType) { + case TSDataType::INT32: + case TSDataType::DATE: { + std::vector intValues(positionCount); + for (int32_t i = 0; i < positionCount; i++) { + if (!nullIndicators.empty() && nullIndicators[i]) continue; + intValues[i] = buffer.getInt(); + } + return std::unique_ptr(new IntColumn(0, positionCount, nullIndicators, intValues)); + } + case TSDataType::FLOAT: { + std::vector floatValues(positionCount); + for (int32_t i = 0; i < positionCount; i++) { + if (!nullIndicators.empty() && nullIndicators[i]) continue; + floatValues[i] = buffer.getFloat(); + } + return std::unique_ptr(new FloatColumn(0, positionCount, nullIndicators, floatValues)); + } + default: + throw IoTDBException("Invalid data type for Int32ArrayColumnDecoder"); + } +} + +std::unique_ptr Int64ArrayColumnDecoder::readColumn( + MyStringBuffer& buffer, TSDataType::TSDataType dataType, int32_t positionCount) { + auto nullIndicators = deserializeNullIndicators(buffer, positionCount); + + switch (dataType) { + case TSDataType::INT64: + case TSDataType::TIMESTAMP: { + std::vector values(positionCount); + for (int32_t i = 0; i < positionCount; i++) { + if (!nullIndicators.empty() && nullIndicators[i]) continue; + values[i] = buffer.getInt64(); + } + return std::unique_ptr(new LongColumn(0, positionCount, nullIndicators, values)); + } + case TSDataType::DOUBLE: { + std::vector values(positionCount); + for (int32_t i = 0; i < positionCount; i++) { + if (!nullIndicators.empty() && nullIndicators[i]) continue; + values[i] = buffer.getDouble(); + } + return std::unique_ptr(new DoubleColumn(0, positionCount, nullIndicators, values)); + } + default: + throw IoTDBException("Invalid data type for Int64ArrayColumnDecoder"); + } +} + +std::unique_ptr ByteArrayColumnDecoder::readColumn( + MyStringBuffer& buffer, TSDataType::TSDataType dataType, int32_t positionCount) { + if (dataType != TSDataType::BOOLEAN) { + throw IoTDBException("Invalid data type for ByteArrayColumnDecoder"); + } + + auto nullIndicators = deserializeNullIndicators(buffer, positionCount); + auto values = deserializeBooleanArray(buffer, positionCount); + return std::unique_ptr(new BooleanColumn(0, positionCount, nullIndicators, values)); +} + +std::unique_ptr BinaryArrayColumnDecoder::readColumn( + MyStringBuffer& buffer, TSDataType::TSDataType dataType, int32_t positionCount) { + if (dataType != TSDataType::TEXT) { + throw IoTDBException("Invalid data type for BinaryArrayColumnDecoder"); + } + + auto nullIndicators = deserializeNullIndicators(buffer, positionCount); + std::vector> values(positionCount); + + for (int32_t i = 0; i < positionCount; i++) { + if (!nullIndicators.empty() && nullIndicators[i]) continue; + + int32_t length = buffer.getInt(); + + std::vector value(length); + for (int32_t j = 0; j < length; j++) { + value[j] = buffer.getChar(); + } + + values[i] = std::make_shared(value); + } + + return std::unique_ptr(new BinaryColumn(0, positionCount, nullIndicators, values)); +} + +std::unique_ptr RunLengthColumnDecoder::readColumn( + MyStringBuffer& buffer, TSDataType::TSDataType dataType, int32_t positionCount) { + uint8_t encodingByte = buffer.getChar(); + + auto columnEncoding = static_cast(encodingByte); + auto decoder = getColumnDecoder(columnEncoding); + + auto column = decoder->readColumn(buffer, dataType, 1); + if (!column) { + throw IoTDBException("Failed to read inner column"); + } + return std::unique_ptr(new RunLengthEncodedColumn(move(column), positionCount)); +} diff --git a/iotdb-client/client-cpp/src/main/ColumnDecoder.h b/iotdb-client/client-cpp/src/main/ColumnDecoder.h new file mode 100644 index 0000000000000..f5340d1e400ac --- /dev/null +++ b/iotdb-client/client-cpp/src/main/ColumnDecoder.h @@ -0,0 +1,75 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +#ifndef IOTDB_COLUMN_DECODER_H +#define IOTDB_COLUMN_DECODER_H + +#include +#include + +#include "Common.h" + +class Column; + +class ColumnDecoder { +public: + virtual ~ColumnDecoder() = default; + virtual std::unique_ptr readColumn( + MyStringBuffer& buffer, TSDataType::TSDataType dataType, int32_t positionCount) = 0; +}; + +std::vector deserializeNullIndicators(MyStringBuffer& buffer, int32_t positionCount); +std::vector deserializeBooleanArray(MyStringBuffer& buffer, int32_t size); + +class BaseColumnDecoder : public ColumnDecoder { +public: + std::unique_ptr readColumn( + MyStringBuffer& buffer, TSDataType::TSDataType dataType, int32_t positionCount) override; +}; + +class Int32ArrayColumnDecoder : public BaseColumnDecoder { +public: + std::unique_ptr readColumn( + MyStringBuffer& buffer, TSDataType::TSDataType dataType, int32_t positionCount) override; +}; + +class Int64ArrayColumnDecoder : public BaseColumnDecoder { +public: + std::unique_ptr readColumn( + MyStringBuffer& buffer, TSDataType::TSDataType dataType, int32_t positionCount) override; +}; + +class ByteArrayColumnDecoder : public BaseColumnDecoder { +public: + std::unique_ptr readColumn( + MyStringBuffer& buffer, TSDataType::TSDataType dataType, int32_t positionCount) override; +}; + +class BinaryArrayColumnDecoder : public BaseColumnDecoder { +public: + std::unique_ptr readColumn( + MyStringBuffer& buffer, TSDataType::TSDataType dataType, int32_t positionCount) override; +}; + +class RunLengthColumnDecoder : public BaseColumnDecoder { +public: + std::unique_ptr readColumn( + MyStringBuffer& buffer, TSDataType::TSDataType dataType, int32_t positionCount) override; +}; + +#endif diff --git a/iotdb-client/client-cpp/src/main/Common.cpp b/iotdb-client/client-cpp/src/main/Common.cpp new file mode 100644 index 0000000000000..38e8a31d2ff37 --- /dev/null +++ b/iotdb-client/client-cpp/src/main/Common.cpp @@ -0,0 +1,456 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#include "Common.h" +#include + +int32_t parseDateExpressionToInt(const boost::gregorian::date& date) { + if (date.is_not_a_date()) { + throw IoTDBException("Date expression is null or empty."); + } + + const int year = date.year(); + if (year < 1000 || year > 9999) { + throw DateTimeParseException( + "Year must be between 1000 and 9999.", + boost::gregorian::to_iso_extended_string(date), + 0 + ); + } + + const int64_t result = static_cast(year) * 10000 + + date.month() * 100 + + date.day(); + if (result > INT32_MAX || result < INT32_MIN) { + throw DateTimeParseException( + "Date value overflow. ", + boost::gregorian::to_iso_extended_string(date), + 0 + ); + } + return static_cast(result); +} + +boost::gregorian::date parseIntToDate(int32_t dateInt) { + if (dateInt == EMPTY_DATE_INT) { + return boost::gregorian::date(boost::date_time::not_a_date_time); + } + int year = dateInt / 10000; + int month = (dateInt % 10000) / 100; + int day = dateInt % 100; + return boost::gregorian::date(year, month, day); +} + +std::string getTimePrecision(int32_t timeFactor) { + if (timeFactor >= 1000000) return "us"; + if (timeFactor >= 1000) return "ms"; + return "s"; +} + +std::string formatDatetime(const std::string& format, const std::string& precision, + int64_t timestamp, const std::string& zoneId) { + // Simplified implementation - in real code you'd use proper timezone handling + std::time_t time = static_cast(timestamp); + std::tm* tm = std::localtime(&time); + char buffer[80]; + strftime(buffer, sizeof(buffer), format.c_str(), tm); + return std::string(buffer); +} + +std::tm convertToTimestamp(int64_t value, int32_t timeFactor) { + std::time_t time = static_cast(value / timeFactor); + return *std::localtime(&time); +} + +TSDataType::TSDataType getDataTypeByStr(const std::string& typeStr) { + if (typeStr == "BOOLEAN") return TSDataType::BOOLEAN; + if (typeStr == "INT32") return TSDataType::INT32; + if (typeStr == "INT64") return TSDataType::INT64; + if (typeStr == "FLOAT") return TSDataType::FLOAT; + if (typeStr == "DOUBLE") return TSDataType::DOUBLE; + if (typeStr == "TEXT") return TSDataType::TEXT; + if (typeStr == "TIMESTAMP") return TSDataType::TIMESTAMP; + if (typeStr == "DATE") return TSDataType::DATE; + if (typeStr == "BLOB") return TSDataType::BLOB; + if (typeStr == "STRING") return TSDataType::STRING; + return TSDataType::UNKNOWN; +} + +std::tm int32ToDate(int32_t value) { + // Convert days since epoch (1970-01-01) to tm struct + std::time_t time = static_cast(value) * 86400; // seconds per day + return *std::localtime(&time); +} + +void RpcUtils::verifySuccess(const TSStatus& status) { + if (status.code == TSStatusCode::MULTIPLE_ERROR) { + verifySuccess(status.subStatus); + return; + } + if (status.code != TSStatusCode::SUCCESS_STATUS + && status.code != TSStatusCode::REDIRECTION_RECOMMEND) { + throw ExecutionException(to_string(status.code) + ": " + status.message, status); + } +} + +void RpcUtils::verifySuccessWithRedirection(const TSStatus& status) { + verifySuccess(status); + if (status.__isset.redirectNode) { + throw RedirectException(to_string(status.code) + ": " + status.message, status.redirectNode); + } + if (status.__isset.subStatus) { + auto statusSubStatus = status.subStatus; + vector endPointList(statusSubStatus.size()); + int count = 0; + for (TSStatus subStatus : statusSubStatus) { + if (subStatus.__isset.redirectNode) { + endPointList[count++] = subStatus.redirectNode; + } + else { + TEndPoint endPoint; + endPointList[count++] = endPoint; + } + } + if (!endPointList.empty()) { + throw RedirectException(to_string(status.code) + ": " + status.message, endPointList); + } + } +} + +void RpcUtils::verifySuccessWithRedirectionForMultiDevices(const TSStatus& status, vector devices) { + verifySuccess(status); + + if (status.code == TSStatusCode::MULTIPLE_ERROR + || status.code == TSStatusCode::REDIRECTION_RECOMMEND) { + map deviceEndPointMap; + vector statusSubStatus; + for (int i = 0; i < statusSubStatus.size(); i++) { + TSStatus subStatus = statusSubStatus[i]; + if (subStatus.__isset.redirectNode) { + deviceEndPointMap.insert(make_pair(devices[i], subStatus.redirectNode)); + } + } + throw RedirectException(to_string(status.code) + ": " + status.message, deviceEndPointMap); + } + + if (status.__isset.redirectNode) { + throw RedirectException(to_string(status.code) + ": " + status.message, status.redirectNode); + } + if (status.__isset.subStatus) { + auto statusSubStatus = status.subStatus; + vector endPointList(statusSubStatus.size()); + int count = 0; + for (TSStatus subStatus : statusSubStatus) { + if (subStatus.__isset.redirectNode) { + endPointList[count++] = subStatus.redirectNode; + } + else { + TEndPoint endPoint; + endPointList[count++] = endPoint; + } + } + if (!endPointList.empty()) { + throw RedirectException(to_string(status.code) + ": " + status.message, endPointList); + } + } +} + +void RpcUtils::verifySuccess(const vector& statuses) { + for (const TSStatus& status : statuses) { + if (status.code != TSStatusCode::SUCCESS_STATUS) { + throw BatchExecutionException(status.message, statuses); + } + } +} + +TSStatus RpcUtils::getStatus(TSStatusCode::TSStatusCode tsStatusCode) { + TSStatus status; + status.__set_code(tsStatusCode); + return status; +} + +TSStatus RpcUtils::getStatus(int code, const string& message) { + TSStatus status; + status.__set_code(code); + status.__set_message(message); + return status; +} + +shared_ptr RpcUtils::getTSExecuteStatementResp(TSStatusCode::TSStatusCode tsStatusCode) { + TSStatus status = getStatus(tsStatusCode); + return getTSExecuteStatementResp(status); +} + +shared_ptr +RpcUtils::getTSExecuteStatementResp(TSStatusCode::TSStatusCode tsStatusCode, const string& message) { + TSStatus status = getStatus(tsStatusCode, message); + return getTSExecuteStatementResp(status); +} + +shared_ptr RpcUtils::getTSExecuteStatementResp(const TSStatus& status) { + shared_ptr resp(new TSExecuteStatementResp()); + TSStatus tsStatus(status); + resp->__set_status(status); + return resp; +} + +shared_ptr RpcUtils::getTSFetchResultsResp(TSStatusCode::TSStatusCode tsStatusCode) { + TSStatus status = getStatus(tsStatusCode); + return getTSFetchResultsResp(status); +} + +shared_ptr +RpcUtils::getTSFetchResultsResp(TSStatusCode::TSStatusCode tsStatusCode, const string& appendMessage) { + TSStatus status = getStatus(tsStatusCode, appendMessage); + return getTSFetchResultsResp(status); +} + +shared_ptr RpcUtils::getTSFetchResultsResp(const TSStatus& status) { + shared_ptr resp(new TSFetchResultsResp()); + TSStatus tsStatus(status); + resp->__set_status(tsStatus); + return resp; +} + +MyStringBuffer::MyStringBuffer() : pos(0) { + checkBigEndian(); +} + +MyStringBuffer::MyStringBuffer(const std::string& str) : str(str), pos(0) { + checkBigEndian(); +} + +void MyStringBuffer::reserve(size_t n) { + str.reserve(n); +} + +void MyStringBuffer::clear() { + str.clear(); + pos = 0; +} + +bool MyStringBuffer::hasRemaining() { + return pos < str.size(); +} + +int MyStringBuffer::getInt() { + return *(int*)getOrderedByte(4); +} + +boost::gregorian::date MyStringBuffer::getDate() { + return parseIntToDate(getInt()); +} + +int64_t MyStringBuffer::getInt64() { +#ifdef ARCH32 + const char *buf_addr = getOrderedByte(8); + if (reinterpret_cast(buf_addr) % 4 == 0) { + return *(int64_t *)buf_addr; + } else { + char tmp_buf[8]; + memcpy(tmp_buf, buf_addr, 8); + return *(int64_t*)tmp_buf; + } +#else + return *(int64_t*)getOrderedByte(8); +#endif +} + +float MyStringBuffer::getFloat() { + return *(float*)getOrderedByte(4); +} + +double MyStringBuffer::getDouble() { +#ifdef ARCH32 + const char *buf_addr = getOrderedByte(8); + if (reinterpret_cast(buf_addr) % 4 == 0) { + return *(double*)buf_addr; + } else { + char tmp_buf[8]; + memcpy(tmp_buf, buf_addr, 8); + return *(double*)tmp_buf; + } +#else + return *(double*)getOrderedByte(8); +#endif +} + +char MyStringBuffer::getChar() { + return str[pos++]; +} + +bool MyStringBuffer::getBool() { + return getChar() == 1; +} + +std::string MyStringBuffer::getString() { + size_t len = getInt(); + size_t tmpPos = pos; + pos += len; + return str.substr(tmpPos, len); +} + +void MyStringBuffer::putInt(int ins) { + putOrderedByte((char*)&ins, 4); +} + +void MyStringBuffer::putDate(boost::gregorian::date date) { + putInt(parseDateExpressionToInt(date)); +} + +void MyStringBuffer::putInt64(int64_t ins) { + putOrderedByte((char*)&ins, 8); +} + +void MyStringBuffer::putFloat(float ins) { + putOrderedByte((char*)&ins, 4); +} + +void MyStringBuffer::putDouble(double ins) { + putOrderedByte((char*)&ins, 8); +} + +void MyStringBuffer::putChar(char ins) { + str += ins; +} + +void MyStringBuffer::putBool(bool ins) { + char tmp = ins ? 1 : 0; + str += tmp; +} + +void MyStringBuffer::putString(const std::string& ins) { + putInt((int)(ins.size())); + str += ins; +} + +void MyStringBuffer::concat(const std::string& ins) { + str.append(ins); +} + +void MyStringBuffer::checkBigEndian() { + static int chk = 0x0201; //used to distinguish CPU's type (BigEndian or LittleEndian) + isBigEndian = (0x01 != *(char*)(&chk)); +} + +const char* MyStringBuffer::getOrderedByte(size_t len) { + const char* p = nullptr; + if (isBigEndian) { + p = str.c_str() + pos; + } + else { + const char* tmp = str.c_str(); + for (size_t i = pos; i < pos + len; i++) { + numericBuf[pos + len - 1 - i] = tmp[i]; + } + p = numericBuf; + } + pos += len; + return p; +} + +void MyStringBuffer::putOrderedByte(char* buf, int len) { + if (isBigEndian) { + str.assign(buf, len); + } + else { + for (int i = len - 1; i > -1; i--) { + str += buf[i]; + } + } +} + +BitMap::BitMap(size_t size) { + resize(size); +} + +void BitMap::resize(size_t size) { + this->size = size; + this->bits.resize((size >> 3) + 1); // equal to "size/8 + 1" + reset(); +} + +bool BitMap::mark(size_t position) { + if (position >= size) + return false; + + bits[position >> 3] |= (char)1 << (position % 8); + return true; +} + +bool BitMap::unmark(size_t position) { + if (position >= size) + return false; + + bits[position >> 3] &= ~((char)1 << (position % 8)); + return true; +} + +void BitMap::markAll() { + std::fill(bits.begin(), bits.end(), (char)0XFF); +} + +void BitMap::reset() { + std::fill(bits.begin(), bits.end(), (char)0); +} + +bool BitMap::isMarked(size_t position) const { + if (position >= size) + return false; + + return (bits[position >> 3] & ((char)1 << (position % 8))) != 0; +} + +bool BitMap::isAllUnmarked() const { + size_t j; + for (j = 0; j < size >> 3; j++) { + if (bits[j] != (char)0) { + return false; + } + } + for (j = 0; j < size % 8; j++) { + if ((bits[size >> 3] & ((char)1 << j)) != 0) { + return false; + } + } + return true; +} + +bool BitMap::isAllMarked() const { + size_t j; + for (j = 0; j < size >> 3; j++) { + if (bits[j] != (char)0XFF) { + return false; + } + } + for (j = 0; j < size % 8; j++) { + if ((bits[size >> 3] & ((char)1 << j)) == 0) { + return false; + } + } + return true; +} + +const std::vector& BitMap::getByteArray() const { + return this->bits; +} + +size_t BitMap::getSize() const { + return this->size; +} diff --git a/iotdb-client/client-cpp/src/main/Common.h b/iotdb-client/client-cpp/src/main/Common.h new file mode 100644 index 0000000000000..a9f4552ecc5fd --- /dev/null +++ b/iotdb-client/client-cpp/src/main/Common.h @@ -0,0 +1,484 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +#ifndef IOTDB_COMMON_H +#define IOTDB_COMMON_H + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "client_types.h" +#include "common_types.h" + +using namespace std; + +using ::apache::thrift::protocol::TBinaryProtocol; +using ::apache::thrift::protocol::TCompactProtocol; +using ::apache::thrift::transport::TSocket; +using ::apache::thrift::transport::TTransport; +using ::apache::thrift::transport::TTransportException; +using ::apache::thrift::transport::TBufferedTransport; +using ::apache::thrift::transport::TFramedTransport; +using ::apache::thrift::TException; + +using namespace std; + +constexpr int32_t EMPTY_DATE_INT = 10000101; + +int32_t parseDateExpressionToInt(const boost::gregorian::date& date); +boost::gregorian::date parseIntToDate(int32_t dateInt); + +std::string getTimePrecision(int32_t timeFactor); + +std::string formatDatetime(const std::string& format, const std::string& precision, + int64_t timestamp, const std::string& zoneId); + +std::tm convertToTimestamp(int64_t value, int32_t timeFactor); +std::tm int32ToDate(int32_t value); + +namespace Version { +enum Version { + V_0_12, V_0_13, V_1_0 +}; +} + +namespace CompressionType { +enum CompressionType { + UNCOMPRESSED = (char)0, + SNAPPY = (char)1, + GZIP = (char)2, + LZO = (char)3, + SDT = (char)4, + PAA = (char)5, + PLA = (char)6, + LZ4 = (char)7, + ZSTD = (char)8, + LZMA2 = (char)9, +}; +} + +namespace TSDataType { +enum TSDataType { + BOOLEAN = (char)0, + INT32 = (char)1, + INT64 = (char)2, + FLOAT = (char)3, + DOUBLE = (char)4, + TEXT = (char)5, + VECTOR = (char)6, + UNKNOWN = (char)7, + TIMESTAMP = (char)8, + DATE = (char)9, + BLOB = (char)10, + STRING = (char)11 +}; +} + +TSDataType::TSDataType getDataTypeByStr(const std::string& typeStr); + +namespace TSEncoding { +enum TSEncoding { + PLAIN = (char)0, + DICTIONARY = (char)1, + RLE = (char)2, + DIFF = (char)3, + TS_2DIFF = (char)4, + BITMAP = (char)5, + GORILLA_V1 = (char)6, + REGULAR = (char)7, + GORILLA = (char)8, + ZIGZAG = (char)9, + FREQ = (char)10, + INVALID_ENCODING = (char)255 +}; +} + +namespace TSStatusCode { +enum TSStatusCode { + SUCCESS_STATUS = 200, + + // System level + INCOMPATIBLE_VERSION = 201, + CONFIGURATION_ERROR = 202, + START_UP_ERROR = 203, + SHUT_DOWN_ERROR = 204, + + // General Error + UNSUPPORTED_OPERATION = 300, + EXECUTE_STATEMENT_ERROR = 301, + MULTIPLE_ERROR = 302, + ILLEGAL_PARAMETER = 303, + OVERLAP_WITH_EXISTING_TASK = 304, + INTERNAL_SERVER_ERROR = 305, + + // Client, + REDIRECTION_RECOMMEND = 400, + + // Schema Engine + DATABASE_NOT_EXIST = 500, + DATABASE_ALREADY_EXISTS = 501, + SERIES_OVERFLOW = 502, + TIMESERIES_ALREADY_EXIST = 503, + TIMESERIES_IN_BLACK_LIST = 504, + ALIAS_ALREADY_EXIST = 505, + PATH_ALREADY_EXIST = 506, + METADATA_ERROR = 507, + PATH_NOT_EXIST = 508, + ILLEGAL_PATH = 509, + CREATE_TEMPLATE_ERROR = 510, + DUPLICATED_TEMPLATE = 511, + UNDEFINED_TEMPLATE = 512, + TEMPLATE_NOT_SET = 513, + DIFFERENT_TEMPLATE = 514, + TEMPLATE_IS_IN_USE = 515, + TEMPLATE_INCOMPATIBLE = 516, + SEGMENT_NOT_FOUND = 517, + PAGE_OUT_OF_SPACE = 518, + RECORD_DUPLICATED = 519, + SEGMENT_OUT_OF_SPACE = 520, + PBTREE_FILE_NOT_EXISTS = 521, + OVERSIZE_RECORD = 522, + PBTREE_FILE_REDO_LOG_BROKEN = 523, + TEMPLATE_NOT_ACTIVATED = 524, + + // Storage Engine + SYSTEM_READ_ONLY = 600, + STORAGE_ENGINE_ERROR = 601, + STORAGE_ENGINE_NOT_READY = 602, + + // Query Engine + PLAN_FAILED_NETWORK_PARTITION = 721 +}; +} + +class Field { +public: + TSDataType::TSDataType dataType = TSDataType::UNKNOWN; + bool boolV{}; + int intV{}; + boost::gregorian::date dateV; + int64_t longV{}; + float floatV{}; + double doubleV{}; + std::string stringV; + + explicit Field(TSDataType::TSDataType a) { + dataType = a; + } + + Field() = default; +}; + +enum class ColumnCategory { + TAG, + FIELD, + ATTRIBUTE +}; + +class MyStringBuffer { +public: + MyStringBuffer(); + explicit MyStringBuffer(const std::string& str); + + void reserve(size_t n); + void clear(); + bool hasRemaining(); + int getInt(); + boost::gregorian::date getDate(); + int64_t getInt64(); + float getFloat(); + double getDouble(); + char getChar(); + bool getBool(); + std::string getString(); + + void putInt(int ins); + void putDate(boost::gregorian::date date); + void putInt64(int64_t ins); + void putFloat(float ins); + void putDouble(double ins); + void putChar(char ins); + void putBool(bool ins); + void putString(const std::string& ins); + void concat(const std::string& ins); + +public: + std::string str; + size_t pos; + +private: + void checkBigEndian(); + const char* getOrderedByte(size_t len); + void putOrderedByte(char* buf, int len); + +private: + bool isBigEndian{}; + char numericBuf[8]{}; //only be used by int, long, float, double etc. +}; + +class BitMap { +public: + explicit BitMap(size_t size = 0); + void resize(size_t size); + bool mark(size_t position); + bool unmark(size_t position); + void markAll(); + void reset(); + bool isMarked(size_t position) const; + bool isAllUnmarked() const; + bool isAllMarked() const; + const std::vector& getByteArray() const; + size_t getSize() const; + +private: + size_t size; + std::vector bits; +}; + +class IoTDBException : public std::exception { +public: + IoTDBException() = default; + + explicit IoTDBException(const std::string& m) : message(m) { + } + + explicit IoTDBException(const char* m) : message(m) { + } + + virtual const char* what() const noexcept override { + return message.c_str(); + } + +private: + std::string message; +}; + +class DateTimeParseException : public IoTDBException { +private: + std::string parsedString; + int errorIndex; + +public: + explicit DateTimeParseException(const std::string& message, + std::string parsedData, + int errorIndex) + : IoTDBException(message), + parsedString(std::move(parsedData)), + errorIndex(errorIndex) { + } + + explicit DateTimeParseException(const std::string& message, + std::string parsedData, + int errorIndex, + const std::exception& cause) + : IoTDBException(message + " [Caused by: " + cause.what() + "]"), + parsedString(std::move(parsedData)), + errorIndex(errorIndex) { + } + + const std::string& getParsedString() const noexcept { + return parsedString; + } + + int getErrorIndex() const noexcept { + return errorIndex; + } + + const char* what() const noexcept override { + static std::string fullMsg; + fullMsg = std::string(IoTDBException::what()) + + "\nParsed data: " + parsedString + + "\nError index: " + std::to_string(errorIndex); + return fullMsg.c_str(); + } +}; + +class IoTDBConnectionException : public IoTDBException { +public: + IoTDBConnectionException() { + } + + explicit IoTDBConnectionException(const char* m) : IoTDBException(m) { + } + + explicit IoTDBConnectionException(const std::string& m) : IoTDBException(m) { + } +}; + +class ExecutionException : public IoTDBException { +public: + ExecutionException() { + } + + explicit ExecutionException(const char* m) : IoTDBException(m) { + } + + explicit ExecutionException(const std::string& m) : IoTDBException(m) { + } + + explicit ExecutionException(const std::string& m, const TSStatus& tsStatus) : IoTDBException(m), status(tsStatus) { + } + + TSStatus status; +}; + +class BatchExecutionException : public IoTDBException { +public: + BatchExecutionException() { + } + + explicit BatchExecutionException(const char* m) : IoTDBException(m) { + } + + explicit BatchExecutionException(const std::string& m) : IoTDBException(m) { + } + + explicit BatchExecutionException(const std::vector& statusList) : statusList(statusList) { + } + + BatchExecutionException(const std::string& m, const std::vector& statusList) : IoTDBException(m), + statusList(statusList) { + } + + std::vector statusList; +}; + +class RedirectException : public IoTDBException { +public: + RedirectException() { + } + + explicit RedirectException(const char* m) : IoTDBException(m) { + } + + explicit RedirectException(const std::string& m) : IoTDBException(m) { + } + + RedirectException(const std::string& m, const TEndPoint& endPoint) : IoTDBException(m), endPoint(endPoint) { + } + + RedirectException(const std::string& m, const map& deviceEndPointMap) : IoTDBException(m), + deviceEndPointMap(deviceEndPointMap) { + } + + RedirectException(const std::string& m, const vector& endPointList) : IoTDBException(m), + endPointList(endPointList) { + } + + TEndPoint endPoint; + map deviceEndPointMap; + vector endPointList; +}; + +class UnSupportedDataTypeException : public IoTDBException { +public: + UnSupportedDataTypeException() { + } + + explicit UnSupportedDataTypeException(const char* m) : IoTDBException(m) { + } + + explicit UnSupportedDataTypeException(const std::string& m) : IoTDBException("UnSupported dataType: " + m) { + } +}; + +class SchemaNotFoundException : public IoTDBException { +public: + SchemaNotFoundException() { + } + + explicit SchemaNotFoundException(const char* m) : IoTDBException(m) { + } + + explicit SchemaNotFoundException(const std::string& m) : IoTDBException(m) { + } +}; + +class StatementExecutionException : public IoTDBException { +public: + StatementExecutionException() { + } + + explicit StatementExecutionException(const char* m) : IoTDBException(m) { + } + + explicit StatementExecutionException(const std::string& m) : IoTDBException(m) { + } +}; + +enum LogLevelType { + LEVEL_DEBUG = 0, + LEVEL_INFO, + LEVEL_WARN, + LEVEL_ERROR +}; + +extern LogLevelType LOG_LEVEL; + +#define log_debug(fmt,...) do {if(LOG_LEVEL <= LEVEL_DEBUG) {string s=string("[DEBUG] %s:%d (%s) - ") + fmt + "\n"; printf(s.c_str(), __FILE__, __LINE__, __FUNCTION__, ##__VA_ARGS__);}} while(0) +#define log_info(fmt,...) do {if(LOG_LEVEL <= LEVEL_INFO) {string s=string("[INFO] %s:%d (%s) - ") + fmt + "\n"; printf(s.c_str(), __FILE__, __LINE__, __FUNCTION__, ##__VA_ARGS__);}} while(0) +#define log_warn(fmt,...) do {if(LOG_LEVEL <= LEVEL_WARN) {string s=string("[WARN] %s:%d (%s) - ") + fmt + "\n"; printf(s.c_str(), __FILE__, __LINE__, __FUNCTION__, ##__VA_ARGS__);}} while(0) +#define log_error(fmt,...) do {if(LOG_LEVEL <= LEVEL_ERROR) {string s=string("[ERROR] %s:%d (%s) - ") + fmt + "\n"; printf(s.c_str(), __FILE__, __LINE__, __FUNCTION__, ##__VA_ARGS__);}} while(0) + +class RpcUtils { +public: + std::shared_ptr SUCCESS_STATUS; + + RpcUtils() { + SUCCESS_STATUS = std::make_shared(); + SUCCESS_STATUS->__set_code(TSStatusCode::SUCCESS_STATUS); + } + + static void verifySuccess(const TSStatus& status); + + static void verifySuccessWithRedirection(const TSStatus& status); + + static void verifySuccessWithRedirectionForMultiDevices(const TSStatus& status, vector devices); + + static void verifySuccess(const std::vector& statuses); + + static TSStatus getStatus(TSStatusCode::TSStatusCode tsStatusCode); + + static TSStatus getStatus(int code, const std::string& message); + + static std::shared_ptr getTSExecuteStatementResp(TSStatusCode::TSStatusCode tsStatusCode); + + static std::shared_ptr + getTSExecuteStatementResp(TSStatusCode::TSStatusCode tsStatusCode, const std::string& message); + + static std::shared_ptr getTSExecuteStatementResp(const TSStatus& status); + + static std::shared_ptr getTSFetchResultsResp(TSStatusCode::TSStatusCode tsStatusCode); + + static std::shared_ptr + getTSFetchResultsResp(TSStatusCode::TSStatusCode tsStatusCode, const std::string& appendMessage); + + static std::shared_ptr getTSFetchResultsResp(const TSStatus& status); +}; + + +#endif diff --git a/iotdb-client/client-cpp/src/main/DeviceID.h b/iotdb-client/client-cpp/src/main/DeviceID.h new file mode 100644 index 0000000000000..df2682cd5199e --- /dev/null +++ b/iotdb-client/client-cpp/src/main/DeviceID.h @@ -0,0 +1,161 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +#ifndef IOTDB_DEVICEID_H +#define IOTDB_DEVICEID_H + +#include +#include +#include +#include +#include + +namespace storage { + +static const int DEFAULT_SEGMENT_NUM_FOR_TABLE_NAME = 3; +static const std::string PATH_SEPARATOR = "."; + +class IDeviceID { +public: + virtual ~IDeviceID() = default; + virtual std::string get_table_name() { return ""; } + virtual int segment_num() { return 0; } + virtual const std::vector& get_segments() const { + return empty_segments_; + } + virtual std::string get_device_name() const { return ""; }; + virtual bool operator<(const IDeviceID& other) { return 0; } + virtual bool operator==(const IDeviceID& other) { return false; } + virtual bool operator!=(const IDeviceID& other) { return false; } + +protected: + IDeviceID() : empty_segments_() {} + +private: + const std::vector empty_segments_; +}; + +struct IDeviceIDComparator { + bool operator()(const std::shared_ptr& lhs, + const std::shared_ptr& rhs) const { + return *lhs < *rhs; + } +}; + +class StringArrayDeviceID : public IDeviceID { +public: + explicit StringArrayDeviceID(const std::vector& segments) + : segments_(formalize(segments)) {} + + explicit StringArrayDeviceID() : segments_() {} + + ~StringArrayDeviceID() override = default; + + std::string get_device_name() const override { + return segments_.empty() ? "" : std::accumulate(std::next(segments_.begin()), segments_.end(), + segments_.front(), + [](std::string a, const std::string& b) { + return std::move(a) + "." + b; + }); + }; + + std::string get_table_name() override { + return segments_.empty() ? "" : segments_[0]; + } + + int segment_num() override { return static_cast(segments_.size()); } + + const std::vector& get_segments() const override { + return segments_; + } + + bool operator<(const IDeviceID& other) override { + auto other_segments = other.get_segments(); + return std::lexicographical_compare(segments_.begin(), segments_.end(), + other_segments.begin(), + other_segments.end()); + } + + bool operator==(const IDeviceID& other) override { + auto other_segments = other.get_segments(); + return (segments_.size() == other_segments.size()) && + std::equal(segments_.begin(), segments_.end(), + other_segments.begin()); + } + + bool operator!=(const IDeviceID& other) override { + return !(*this == other); + } + +private: + std::vector segments_; + + std::vector formalize( + const std::vector& segments) { + auto it = + std::find_if(segments.rbegin(), segments.rend(), + [](const std::string& seg) { return !seg.empty(); }); + return std::vector(segments.begin(), it.base()); + } + + std::vector split_device_id_string( + const std::vector& splits) { + size_t segment_cnt = splits.size(); + std::vector final_segments; + + if (segment_cnt == 0) { + return final_segments; + } + + if (segment_cnt == 1) { + // "root" -> {"root"} + final_segments.push_back(splits[0]); + } else if (segment_cnt < static_cast( + DEFAULT_SEGMENT_NUM_FOR_TABLE_NAME + 1)) { + // "root.a" -> {"root", "a"} + // "root.a.b" -> {"root.a", "b"} + std::string table_name = std::accumulate( + splits.begin(), splits.end() - 1, std::string(), + [](const std::string& a, const std::string& b) { + return a.empty() ? b : a + PATH_SEPARATOR + b; + }); + final_segments.push_back(table_name); + final_segments.push_back(splits.back()); + } else { + // "root.a.b.c" -> {"root.a.b", "c"} + // "root.a.b.c.d" -> {"root.a.b", "c", "d"} + std::string table_name = std::accumulate( + splits.begin(), + splits.begin() + DEFAULT_SEGMENT_NUM_FOR_TABLE_NAME, + std::string(), [](const std::string& a, const std::string& b) { + return a.empty() ? b : a + PATH_SEPARATOR + b; + }); + + final_segments.emplace_back(std::move(table_name)); + final_segments.insert( + final_segments.end(), + splits.begin() + DEFAULT_SEGMENT_NUM_FOR_TABLE_NAME, + splits.end()); + } + + return final_segments; + } +}; +} + +#endif \ No newline at end of file diff --git a/iotdb-client/client-cpp/src/main/IoTDBRpcDataSet.cpp b/iotdb-client/client-cpp/src/main/IoTDBRpcDataSet.cpp new file mode 100644 index 0000000000000..607f812a88a20 --- /dev/null +++ b/iotdb-client/client-cpp/src/main/IoTDBRpcDataSet.cpp @@ -0,0 +1,557 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#include +#include +#include + +#include "IoTDBRpcDataSet.h" +#include "Column.h" + +const int32_t IoTDBRpcDataSet::START_INDEX = 2; +const std::string IoTDBRpcDataSet::TIMESTAMP_STR = "Time"; +const std::string IoTDBRpcDataSet::DEFAULT_TIME_FORMAT = "default"; + +IoTDBRpcDataSet::IoTDBRpcDataSet(const std::string& sql, + const std::vector& columnNameList, + const std::vector& columnTypeList, + const std::map& columnNameIndex, + bool ignoreTimestamp, + bool moreData, + int64_t queryId, + int64_t statementId, + std::shared_ptr client, + int64_t sessionId, + const std::vector& queryResult, + int32_t fetchSize, + int64_t timeout, + const std::string& zoneId, + const std::string& timeFormat) + : sql_(sql), + isClosed_(false), + client_(client), + fetchSize_(fetchSize), + timeout_(timeout), + hasCachedRecord_(false), + lastReadWasNull_(false), + columnSize_(static_cast(columnNameList.size())), + sessionId_(sessionId), + queryId_(queryId), + statementId_(statementId), + time_(0), + ignoreTimestamp_(ignoreTimestamp), + moreData_(moreData), + queryResult_(queryResult), + curTsBlock_(nullptr), + queryResultSize_(static_cast(queryResult.size())), + queryResultIndex_(0), + tsBlockSize_(0), + tsBlockIndex_(-1), + timeZoneId_(zoneId), + timeFormat_(timeFormat) { + if (!ignoreTimestamp) { + columnNameList_.push_back(TIMESTAMP_STR); + columnTypeList_.emplace_back("INT64"); + columnOrdinalMap_[TIMESTAMP_STR] = 1; + } + + // Process column names and types + if (!columnNameIndex.empty()) { + // Deduplicate column types + std::set uniqueValues; + for (const auto& entry : columnNameIndex) { + uniqueValues.insert(entry.second); + } + int deduplicatedColumnSize = static_cast(uniqueValues.size()); + columnTypeDeduplicatedList_.resize(deduplicatedColumnSize); + for (size_t i = 0; i < columnNameList.size(); ++i) { + const std::string& name = columnNameList[i]; + columnNameList_.push_back(name); + columnTypeList_.push_back(columnTypeList[i]); + // Update ordinal map and deduplicated types + if (!columnOrdinalMap_.count(name)) { + int index = columnNameIndex.at(name); + if (std::none_of(columnOrdinalMap_.begin(), columnOrdinalMap_.end(), + [index](const std::pair& entry) { + return entry.second == (index + START_INDEX); + })) { + columnTypeDeduplicatedList_[index] = getDataTypeByStr(columnTypeList[i]); + } + columnOrdinalMap_[name] = index + START_INDEX; + } + } + } + else { + // Handle case without column name index + int32_t currentIndex = START_INDEX; + for (size_t i = 0; i < columnNameList.size(); ++i) { + std::string name = columnNameList[i]; + columnNameList_.push_back(name); + columnTypeList_.push_back(columnTypeList[i]); + if (!columnOrdinalMap_.count(name)) { + columnOrdinalMap_[name] = currentIndex++; + columnTypeDeduplicatedList_.push_back(getDataTypeByStr(columnTypeList[i])); + } + } + } + + columnSize_ = static_cast(columnNameList_.size()); +} + +IoTDBRpcDataSet::~IoTDBRpcDataSet() { + if (!isClosed_) { + close(); + } +} + +bool IoTDBRpcDataSet::next() { + if (hasCachedBlock()) { + lastReadWasNull_ = false; + constructOneRow(); + return true; + } + + if (hasCachedByteBuffer()) { + constructOneTsBlock(); + constructOneRow(); + return true; + } + + if (moreData_) { + bool hasResultSet = fetchResults(); + if (hasResultSet && hasCachedByteBuffer()) { + constructOneTsBlock(); + constructOneRow(); + return true; + } + } + + close(); + return false; +} + +void IoTDBRpcDataSet::close(bool forceClose) { + if ((!forceClose) && isClosed_) { + return; + } + TSCloseOperationReq closeReq; + closeReq.__set_sessionId(sessionId_); + closeReq.__set_statementId(statementId_); + closeReq.__set_queryId(queryId_); + TSStatus tsStatus; + try { + client_->closeOperation(tsStatus, closeReq); + RpcUtils::verifySuccess(tsStatus); + } + catch (const TTransportException& e) { + log_debug(e.what()); + throw IoTDBConnectionException(e.what()); + } catch (const IoTDBException& e) { + log_debug(e.what()); + throw; + } catch (exception& e) { + log_debug(e.what()); + throw IoTDBException(e.what()); + } + isClosed_ = true; + client_ = nullptr; +} + +bool IoTDBRpcDataSet::fetchResults() { + if (isClosed_) { + throw IoTDBException("This data set is already closed"); + } + + TSFetchResultsReq req; + req.__set_sessionId(sessionId_); + req.__set_statement(sql_); + req.__set_fetchSize(fetchSize_); + req.__set_queryId(queryId_); + req.__set_isAlign(true); + req.__set_timeout(timeout_); + TSFetchResultsResp resp; + client_->fetchResultsV2(resp, req); + RpcUtils::verifySuccess(resp.status); + if (!resp.hasResultSet) { + close(); + } + else { + queryResult_ = resp.queryResult; + queryResultIndex_ = 0; + if (!queryResult_.empty()) { + queryResultSize_ = queryResult_.size(); + } + else { + queryResultSize_ = 0; + } + tsBlockIndex_ = -1; + tsBlockSize_ = 0; + } + return resp.hasResultSet; +} + +void IoTDBRpcDataSet::constructOneRow() { + tsBlockIndex_++; + hasCachedRecord_ = true; + time_ = curTsBlock_->getTimeColumn()->getLong(tsBlockIndex_); +} + +void IoTDBRpcDataSet::constructOneTsBlock() { + lastReadWasNull_ = false; + const auto& curTsBlockBytes = queryResult_[queryResultIndex_]; + queryResultIndex_++; + curTsBlock_ = TsBlock::deserialize(curTsBlockBytes); + tsBlockIndex_ = -1; + tsBlockSize_ = curTsBlock_->getPositionCount(); +} + +bool IoTDBRpcDataSet::isNullByIndex(int32_t columnIndex) { + int index = columnOrdinalMap_[findColumnNameByIndex(columnIndex)] - START_INDEX; + // time column will never be null + if (index < 0) { + return false; + } + return isNull(index, tsBlockIndex_); +} + +bool IoTDBRpcDataSet::isNullByColumnName(const std::string& columnName) { + int index = columnOrdinalMap_[columnName] - START_INDEX; + // time column will never be null + if (index < 0) { + return false; + } + return isNull(index, tsBlockIndex_); +} + +bool IoTDBRpcDataSet::isNull(int32_t index, int32_t rowNum) { + return index >= 0 && curTsBlock_->getColumn(index)->isNull(rowNum); +} + +bool IoTDBRpcDataSet::getBooleanByIndex(int32_t columnIndex) { + return getBoolean(findColumnNameByIndex(columnIndex)); +} + +bool IoTDBRpcDataSet::getBoolean(const std::string& columnName) { + int index = columnOrdinalMap_[columnName] - START_INDEX; + return getBooleanByTsBlockColumnIndex(index); +} + +bool IoTDBRpcDataSet::getBooleanByTsBlockColumnIndex(int32_t tsBlockColumnIndex) { + checkRecord(); + if (!isNull(tsBlockColumnIndex, tsBlockIndex_)) { + lastReadWasNull_ = false; + return curTsBlock_->getColumn(tsBlockColumnIndex)->getBoolean(tsBlockIndex_); + } + else { + lastReadWasNull_ = true; + return false; + } +} + +double IoTDBRpcDataSet::getDoubleByIndex(int32_t columnIndex) { + return getDouble(findColumnNameByIndex(columnIndex)); +} + +double IoTDBRpcDataSet::getDouble(const std::string& columnName) { + int index = columnOrdinalMap_[columnName] - START_INDEX; + return getDoubleByTsBlockColumnIndex(index); +} + +double IoTDBRpcDataSet::getDoubleByTsBlockColumnIndex(int32_t tsBlockColumnIndex) { + checkRecord(); + if (!isNull(tsBlockColumnIndex, tsBlockIndex_)) { + lastReadWasNull_ = false; + return curTsBlock_->getColumn(tsBlockColumnIndex)->getDouble(tsBlockIndex_); + } + else { + lastReadWasNull_ = true; + return 0.0; + } +} + +float IoTDBRpcDataSet::getFloatByIndex(int32_t columnIndex) { + return getFloat(findColumnNameByIndex(columnIndex)); +} + +float IoTDBRpcDataSet::getFloat(const std::string& columnName) { + int index = columnOrdinalMap_[columnName] - START_INDEX; + return getFloatByTsBlockColumnIndex(index); +} + +float IoTDBRpcDataSet::getFloatByTsBlockColumnIndex(int32_t tsBlockColumnIndex) { + checkRecord(); + if (!isNull(tsBlockColumnIndex, tsBlockIndex_)) { + lastReadWasNull_ = false; + return curTsBlock_->getColumn(tsBlockColumnIndex)->getFloat(tsBlockIndex_); + } + else { + lastReadWasNull_ = true; + return 0.0f; + } +} + +int32_t IoTDBRpcDataSet::getIntByIndex(int32_t columnIndex) { + return getInt(findColumnNameByIndex(columnIndex)); +} + +int32_t IoTDBRpcDataSet::getInt(const std::string& columnName) { + int index = columnOrdinalMap_[columnName] - START_INDEX; + return getIntByTsBlockColumnIndex(index); +} + +int32_t IoTDBRpcDataSet::getIntByTsBlockColumnIndex(int32_t tsBlockColumnIndex) { + checkRecord(); + if (!isNull(tsBlockColumnIndex, tsBlockIndex_)) { + lastReadWasNull_ = false; + TSDataType::TSDataType dataType = curTsBlock_->getColumn(tsBlockColumnIndex)->getDataType(); + if (dataType == TSDataType::INT64) { + return static_cast(curTsBlock_->getColumn(tsBlockColumnIndex)->getLong(tsBlockIndex_)); + } + return curTsBlock_->getColumn(tsBlockColumnIndex)->getInt(tsBlockIndex_); + } + else { + lastReadWasNull_ = true; + return 0; + } +} + +int64_t IoTDBRpcDataSet::getLongByIndex(int32_t columnIndex) { + return getLong(findColumnNameByIndex(columnIndex)); +} + +int64_t IoTDBRpcDataSet::getLong(const std::string& columnName) { + int index = columnOrdinalMap_[columnName] - START_INDEX; + return getLongByTsBlockColumnIndex(index); +} + +int64_t IoTDBRpcDataSet::getLongByTsBlockColumnIndex(int32_t tsBlockColumnIndex) { + checkRecord(); + if (tsBlockColumnIndex < 0) { + lastReadWasNull_ = false; + return curTsBlock_->getTimeByIndex(tsBlockIndex_); + } + if (!isNull(tsBlockColumnIndex, tsBlockIndex_)) { + lastReadWasNull_ = false; + TSDataType::TSDataType dataType = curTsBlock_->getColumn(tsBlockColumnIndex)->getDataType(); + if (dataType == TSDataType::INT32) { + return static_cast(curTsBlock_->getColumn(tsBlockColumnIndex)->getInt(tsBlockIndex_)); + } + return curTsBlock_->getColumn(tsBlockColumnIndex)->getLong(tsBlockIndex_); + } + else { + lastReadWasNull_ = true; + return 0; + } +} + +std::shared_ptr IoTDBRpcDataSet::getBinaryByIndex(int32_t columnIndex) { + return getBinary(findColumnNameByIndex(columnIndex)); +} + +std::shared_ptr IoTDBRpcDataSet::getBinary(const std::string& columnName) { + int index = columnOrdinalMap_[columnName] - START_INDEX; + return getBinaryByTsBlockColumnIndex(index); +} + +std::shared_ptr IoTDBRpcDataSet::getBinaryByTsBlockColumnIndex(int32_t tsBlockColumnIndex) { + checkRecord(); + if (!isNull(tsBlockColumnIndex, tsBlockIndex_)) { + lastReadWasNull_ = false; + return curTsBlock_->getColumn(tsBlockColumnIndex)->getBinary(tsBlockIndex_); + } + else { + lastReadWasNull_ = true; + return nullptr; + } +} + +std::string IoTDBRpcDataSet::getStringByIndex(int32_t columnIndex) { + return getString(findColumnNameByIndex(columnIndex)); +} + +std::string IoTDBRpcDataSet::getString(const std::string& columnName) { + int index = columnOrdinalMap_[columnName] - START_INDEX; + return getStringByTsBlockColumnIndex(index); +} + +std::string IoTDBRpcDataSet::getStringByTsBlockColumnIndex(int32_t tsBlockColumnIndex) { + checkRecord(); + if (tsBlockColumnIndex == -1) { + int64_t timestamp = curTsBlock_->getTimeByIndex(tsBlockIndex_); + return std::to_string(timestamp); + } + if (isNull(tsBlockColumnIndex, tsBlockIndex_)) { + lastReadWasNull_ = true; + return ""; + } + lastReadWasNull_ = false; + return getStringByTsBlockColumnIndexAndDataType(tsBlockColumnIndex, + getDataTypeByIndex(tsBlockColumnIndex)); +} + +std::string IoTDBRpcDataSet::getStringByTsBlockColumnIndexAndDataType(int32_t index, + TSDataType::TSDataType tsDataType) { + switch (tsDataType) { + case TSDataType::BOOLEAN: + return std::to_string(curTsBlock_->getColumn(index)->getBoolean(tsBlockIndex_)); + case TSDataType::INT32: + return std::to_string(curTsBlock_->getColumn(index)->getInt(tsBlockIndex_)); + case TSDataType::INT64: + return std::to_string(curTsBlock_->getColumn(index)->getLong(tsBlockIndex_)); + case TSDataType::TIMESTAMP: { + int64_t value = curTsBlock_->getColumn(index)->getLong(tsBlockIndex_); + return formatDatetime(timeFormat_, timePrecision_, value, timeZoneId_); + } + case TSDataType::FLOAT: + return std::to_string(curTsBlock_->getColumn(index)->getFloat(tsBlockIndex_)); + case TSDataType::DOUBLE: + return std::to_string(curTsBlock_->getColumn(index)->getDouble(tsBlockIndex_)); + case TSDataType::TEXT: + case TSDataType::STRING: + case TSDataType::BLOB: { + auto binary = curTsBlock_->getColumn(index)->getBinary(tsBlockIndex_); + return binary->getStringValue(); + } + case TSDataType::DATE: { + int32_t value = curTsBlock_->getColumn(index)->getInt(tsBlockIndex_); + auto date = parseIntToDate(value); + return boost::gregorian::to_iso_extended_string(date); + } + default: + return ""; + } +} + +int64_t IoTDBRpcDataSet::getTimestampByIndex(int32_t columnIndex) { + return getTimestamp(findColumnNameByIndex(columnIndex)); +} + +int64_t IoTDBRpcDataSet::getTimestamp(const std::string& columnName) { + return getLong(columnName); +} + +boost::gregorian::date IoTDBRpcDataSet::getDateByIndex(int32_t columnIndex) { + return getDate(findColumnNameByIndex(columnIndex)); +} + +boost::gregorian::date IoTDBRpcDataSet::getDate(const std::string& columnName) { + int32_t value = getInt(columnName); + return parseIntToDate(value); +} + +TSDataType::TSDataType IoTDBRpcDataSet::getDataTypeByIndex(int32_t columnIndex) { + return getDataType(findColumnNameByIndex(columnIndex)); +} + +TSDataType::TSDataType IoTDBRpcDataSet::getDataType(const std::string& columnName) { + if (columnName == TIMESTAMP_STR) { + return TSDataType::INT64; + } + int index = columnOrdinalMap_[columnName] - START_INDEX; + return index < 0 || index >= columnTypeDeduplicatedList_.size() + ? TSDataType::UNKNOWN + : columnTypeDeduplicatedList_[index]; +} + +int32_t IoTDBRpcDataSet::findColumn(const std::string& columnName) { + auto it = columnOrdinalMap_.find(columnName); + if (it != columnOrdinalMap_.end()) { + return it->second; + } + return -1; +} + +std::string IoTDBRpcDataSet::findColumnNameByIndex(int32_t columnIndex) { + if (columnIndex <= 0) { + throw IoTDBException("column index should start from 1"); + } + if (columnIndex > static_cast(columnNameList_.size())) { + throw IoTDBException( + "Column index " + std::to_string(columnIndex) + + " is out of range. Valid range is 0 to " + + std::to_string(columnNameList_.size() - 1) + ); + } + return columnNameList_[columnIndex - 1]; +} + +void IoTDBRpcDataSet::checkRecord() { + if (queryResultIndex_ > queryResultSize_ || + tsBlockIndex_ >= tsBlockSize_ || + queryResult_.empty() || + !curTsBlock_) { + throw IoTDBException("no record remains"); + } +} + +int32_t IoTDBRpcDataSet::getValueColumnStartIndex() const { + return ignoreTimestamp_ ? 0 : 1; +} + +int32_t IoTDBRpcDataSet::getColumnSize() const { + return static_cast(columnNameList_.size()); +} + +const std::vector& IoTDBRpcDataSet::getColumnTypeList() const { + return columnTypeList_; +} + +const std::vector& IoTDBRpcDataSet::getColumnNameList() const { + return columnNameList_; +} + +bool IoTDBRpcDataSet::isClosed() const { + return isClosed_; +} + +int32_t IoTDBRpcDataSet::getFetchSize() const { + return fetchSize_; +} + +void IoTDBRpcDataSet::setFetchSize(int32_t fetchSize) { + fetchSize_ = fetchSize; +} + +bool IoTDBRpcDataSet::hasCachedRecord() const { + return hasCachedRecord_; +} + +void IoTDBRpcDataSet::setHasCachedRecord(bool hasCachedRecord) { + hasCachedRecord_ = hasCachedRecord; +} + +bool IoTDBRpcDataSet::isLastReadWasNull() const { + return lastReadWasNull_; +} + +int64_t IoTDBRpcDataSet::getCurrentRowTime() const { + return time_; +} + +bool IoTDBRpcDataSet::isIgnoreTimestamp() const { + return ignoreTimestamp_; +} + +bool IoTDBRpcDataSet::hasCachedBlock() const { + return curTsBlock_ && tsBlockIndex_ < tsBlockSize_ - 1; +} + +bool IoTDBRpcDataSet::hasCachedByteBuffer() const { + return !queryResult_.empty() && queryResultIndex_ < queryResultSize_; +} diff --git a/iotdb-client/client-cpp/src/main/IoTDBRpcDataSet.h b/iotdb-client/client-cpp/src/main/IoTDBRpcDataSet.h new file mode 100644 index 0000000000000..8e3f0b3d628e4 --- /dev/null +++ b/iotdb-client/client-cpp/src/main/IoTDBRpcDataSet.h @@ -0,0 +1,150 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef IOTDB_RPC_DATA_SET_H +#define IOTDB_RPC_DATA_SET_H + +#include +#include +#include +#include +#include +#include "IClientRPCService.h" +#include +#include "TsBlock.h" + +class IoTDBRpcDataSet { +public: + static const int32_t START_INDEX; + static const std::string TIMESTAMP_STR; + + static const std::string DEFAULT_TIME_FORMAT; + static const std::string TIME_PRECISION; + static const std::string MILLISECOND; + static const std::string MICROSECOND; + static const std::string NANOSECOND; + + IoTDBRpcDataSet(const std::string& sql, + const std::vector& columnNameList, + const std::vector& columnTypeList, + const std::map& columnNameIndex, + bool ignoreTimestamp, + bool moreData, + int64_t queryId, + int64_t statementId, + std::shared_ptr client, + int64_t sessionId, + const std::vector& queryResult, + int32_t fetchSize, + int64_t timeout, + const std::string& zoneId, + const std::string& timeFormat); + + ~IoTDBRpcDataSet(); + + bool next(); + void close(bool forceClose = false); + + bool hasCachedBlock() const; + bool hasCachedByteBuffer() const; + + bool isNull(int32_t index, int32_t rowNum); + bool isNullByIndex(int32_t columnIndex); + bool isNullByColumnName(const std::string& columnName); + bool getBooleanByIndex(int32_t columnIndex); + bool getBoolean(const std::string& columnName); + double getDoubleByIndex(int32_t columnIndex); + double getDouble(const std::string& columnName); + float getFloatByIndex(int32_t columnIndex); + float getFloat(const std::string& columnName); + int32_t getIntByIndex(int32_t columnIndex); + int32_t getInt(const std::string& columnName); + int64_t getLongByIndex(int32_t columnIndex); + int64_t getLong(const std::string& columnName); + std::shared_ptr getBinaryByIndex(int32_t columnIndex); + std::shared_ptr getBinary(const std::string& columnName); + std::string getStringByIndex(int32_t columnIndex); + std::string getString(const std::string& columnName); + int64_t getTimestampByIndex(int32_t columnIndex); + int64_t getTimestamp(const std::string& columnName); + boost::gregorian::date getDateByIndex(int32_t columnIndex); + boost::gregorian::date getDate(const std::string& columnName); + + TSDataType::TSDataType getDataTypeByIndex(int32_t columnIndex); + TSDataType::TSDataType getDataType(const std::string& columnName); + int32_t findColumn(const std::string& columnName); + std::string findColumnNameByIndex(int32_t columnIndex); + int32_t getValueColumnStartIndex() const; + int32_t getColumnSize() const; + const std::vector& getColumnTypeList() const; + const std::vector& getColumnNameList() const; + bool isClosed() const; + int32_t getFetchSize() const; + void setFetchSize(int32_t fetchSize); + bool hasCachedRecord() const; + void setHasCachedRecord(bool hasCachedRecord); + bool isLastReadWasNull() const; + int64_t getCurrentRowTime() const; + bool isIgnoreTimestamp() const; + +private: + bool fetchResults(); + void constructOneRow(); + void constructOneTsBlock(); + void checkRecord(); + bool getBooleanByTsBlockColumnIndex(int32_t tsBlockColumnIndex); + std::string getStringByTsBlockColumnIndexAndDataType(int32_t index, TSDataType::TSDataType tsDataType); + double getDoubleByTsBlockColumnIndex(int32_t tsBlockColumnIndex); + float getFloatByTsBlockColumnIndex(int32_t tsBlockColumnIndex); + int32_t getIntByTsBlockColumnIndex(int32_t tsBlockColumnIndex); + int64_t getLongByTsBlockColumnIndex(int32_t tsBlockColumnIndex); + std::shared_ptr getBinaryByTsBlockColumnIndex(int32_t tsBlockColumnIndex); + std::string getStringByTsBlockColumnIndex(int32_t tsBlockColumnIndex); + + std::string sql_; + bool isClosed_; + std::shared_ptr client_; + std::vector columnNameList_; + std::vector columnTypeList_; + std::map columnOrdinalMap_; + std::vector columnTypeDeduplicatedList_; + int32_t fetchSize_; + int64_t timeout_; + bool hasCachedRecord_; + bool lastReadWasNull_; + int32_t columnSize_; + int64_t sessionId_; + int64_t queryId_; + int64_t statementId_; + int64_t time_; + bool ignoreTimestamp_; + bool moreData_; + std::vector queryResult_; + std::shared_ptr curTsBlock_; + int32_t queryResultSize_; + int32_t queryResultIndex_; + int32_t tsBlockSize_; + int32_t tsBlockIndex_; + std::string timeZoneId_; + std::string timeFormat_; + int32_t timeFactor_; + std::string timePrecision_; +}; + +#endif // IOTDB_RPC_DATA_SET_H diff --git a/iotdb-client/client-cpp/src/main/NodesSupplier.cpp b/iotdb-client/client-cpp/src/main/NodesSupplier.cpp new file mode 100644 index 0000000000000..5f268f075a108 --- /dev/null +++ b/iotdb-client/client-cpp/src/main/NodesSupplier.cpp @@ -0,0 +1,222 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +#include "NodesSupplier.h" +#include "Session.h" +#include "SessionDataSet.h" +#include +#include +#include + +const std::string NodesSupplier::SHOW_DATA_NODES_COMMAND = "SHOW DATANODES"; +const std::string NodesSupplier::STATUS_COLUMN_NAME = "Status"; +const std::string NodesSupplier::IP_COLUMN_NAME = "RpcAddress"; +const std::string NodesSupplier::PORT_COLUMN_NAME = "RpcPort"; +const std::string NodesSupplier::REMOVING_STATUS = "Removing"; + +const int64_t NodesSupplier::TIMEOUT_IN_MS = 60000; +const int NodesSupplier::FETCH_SIZE = 10000; +const int NodesSupplier::THRIFT_DEFAULT_BUFFER_SIZE = 4096; +const int NodesSupplier::THRIFT_MAX_FRAME_SIZE = 1048576; +const int NodesSupplier::CONNECTION_TIMEOUT_IN_MS = 1000; + +TEndPoint RoundRobinPolicy::select(const std::vector& nodes) { + static std::atomic_uint index{0}; + + if (nodes.empty()) { + throw IoTDBException("No available nodes"); + } + + return nodes[index++ % nodes.size()]; +} + +StaticNodesSupplier::StaticNodesSupplier(const std::vector& nodes, + NodeSelectionPolicy policy) + : availableNodes_(nodes), policy_(std::move(policy)) {} + +boost::optional StaticNodesSupplier::getQueryEndPoint() { + try { + if (availableNodes_.empty()) { + return boost::none; + } + return policy_(availableNodes_); + } catch (const IoTDBException& e) { + return boost::none; + } +} + +std::vector StaticNodesSupplier::getEndPointList() { + return availableNodes_; +} + +StaticNodesSupplier::~StaticNodesSupplier() = default; + +std::shared_ptr NodesSupplier::create( + std::vector endpoints, + std::string userName, std::string password, std::string zoneId, + int32_t thriftDefaultBufferSize, int32_t thriftMaxFrameSize, + int32_t connectionTimeoutInMs, bool useSSL, bool enableRPCCompression, + std::string version, std::chrono::milliseconds refreshInterval, + NodeSelectionPolicy policy) { + if (endpoints.empty()) { + return nullptr; + } + auto supplier = std::make_shared( + userName, password, zoneId, thriftDefaultBufferSize, + thriftMaxFrameSize, connectionTimeoutInMs, useSSL, + enableRPCCompression, version, std::move(endpoints), std::move(policy) + ); + supplier->startBackgroundRefresh(refreshInterval); + return supplier; +} + +NodesSupplier::NodesSupplier( + std::string userName, std::string password, const std::string& zoneId, + int32_t thriftDefaultBufferSize, int32_t thriftMaxFrameSize, + int32_t connectionTimeoutInMs, bool useSSL, bool enableRPCCompression, + std::string version, std::vector endpoints, NodeSelectionPolicy policy) : userName_(std::move(userName)), password_(std::move(password)), zoneId_(zoneId), + thriftDefaultBufferSize_(thriftDefaultBufferSize), thriftMaxFrameSize_(thriftMaxFrameSize), + connectionTimeoutInMs_(connectionTimeoutInMs), useSSL_(useSSL), enableRPCCompression_(enableRPCCompression), version(version), endpoints_(std::move(endpoints)), + selectionPolicy_(std::move(policy)) { + deduplicateEndpoints(); +} + +std::vector NodesSupplier::getEndPointList() { + std::lock_guard lock(mutex_); + return endpoints_; +} + +TEndPoint NodesSupplier::selectQueryEndpoint() { + std::lock_guard lock(mutex_); + try { + return selectionPolicy_(endpoints_); + } catch (const std::exception& e) { + log_error("NodesSupplier::selectQueryEndpoint exception: %s", e.what()); + throw IoTDBException("NodesSupplier::selectQueryEndpoint exception, " + std::string(e.what())); + } +} + +boost::optional NodesSupplier::getQueryEndPoint() { + try { + return selectQueryEndpoint(); + } catch (const IoTDBException& e) { + return boost::none; + } +} + +NodesSupplier::~NodesSupplier() { + stopBackgroundRefresh(); + client_->close(); +} + +void NodesSupplier::deduplicateEndpoints() { + std::vector uniqueEndpoints; + uniqueEndpoints.reserve(endpoints_.size()); + for (const auto& endpoint : endpoints_) { + if (std::find(uniqueEndpoints.begin(), uniqueEndpoints.end(), endpoint) == uniqueEndpoints.end()) { + uniqueEndpoints.push_back(endpoint); + } + } + endpoints_ = std::move(uniqueEndpoints); +} + +void NodesSupplier::startBackgroundRefresh(std::chrono::milliseconds interval) { + isRunning_ = true; + refreshThread_ = std::thread([this, interval] { + while (isRunning_) { + refreshEndpointList(); + std::unique_lock cvLock(this->mutex_); + refreshCondition_.wait_for(cvLock, interval, [this]() { + return !isRunning_.load(); + }); + } + }); +} + +std::vector NodesSupplier::fetchLatestEndpoints() { + try { + if (client_ == nullptr) { + client_ = std::make_shared(selectionPolicy_(endpoints_)); + client_->init(userName_, password_, enableRPCCompression_, zoneId_, version); + } + + auto sessionDataSet = client_->executeQueryStatement(SHOW_DATA_NODES_COMMAND); + + uint32_t columnAddrIdx = -1, columnPortIdx = -1, columnStatusIdx = -1; + auto columnNames = sessionDataSet->getColumnNames(); + for (uint32_t i = 0; i < columnNames.size(); i++) { + if (columnNames[i] == IP_COLUMN_NAME) { + columnAddrIdx = i; + } else if (columnNames[i] == PORT_COLUMN_NAME) { + columnPortIdx = i; + } else if (columnNames[i] == STATUS_COLUMN_NAME) { + columnStatusIdx = i; + } + } + + if (columnAddrIdx == -1 || columnPortIdx == -1 || columnStatusIdx == -1) { + throw IoTDBException("Required columns not found in query result."); + } + + std::vector ret; + while (sessionDataSet->hasNext()) { + auto record = sessionDataSet->next(); + std::string ip = record->fields.at(columnAddrIdx).stringV; + int32_t port = record->fields.at(columnPortIdx).intV; + std::string status = record->fields.at(columnStatusIdx).stringV; + + if (ip == "0.0.0.0" || status == REMOVING_STATUS) { + log_warn("Skipping invalid node: " + ip + ":" + to_string(port)); + continue; + } + TEndPoint endpoint; + endpoint.ip = ip; + endpoint.port = port; + ret.emplace_back(endpoint); + } + + return ret; + } catch (const IoTDBException& e) { + client_.reset(); + throw IoTDBException(std::string("NodesSupplier::fetchLatestEndpoints failed: ") + e.what()); + } +} + +void NodesSupplier::refreshEndpointList() { + try { + auto newEndpoints = fetchLatestEndpoints(); + if (newEndpoints.empty()) { + return; + } + + std::lock_guard lock(mutex_); + endpoints_.swap(newEndpoints); + deduplicateEndpoints(); + } catch (const IoTDBException& e) { + log_error(std::string("NodesSupplier::refreshEndpointList failed: ") + e.what()); + } +} + +void NodesSupplier::stopBackgroundRefresh() noexcept { + if (isRunning_.exchange(false)) { + refreshCondition_.notify_all(); + if (refreshThread_.joinable()) { + refreshThread_.join(); + } + } +} \ No newline at end of file diff --git a/iotdb-client/client-cpp/src/main/NodesSupplier.h b/iotdb-client/client-cpp/src/main/NodesSupplier.h new file mode 100644 index 0000000000000..a3cda24deac69 --- /dev/null +++ b/iotdb-client/client-cpp/src/main/NodesSupplier.h @@ -0,0 +1,137 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +#ifndef IOTDB_NODES_SUPPLIER_H +#define IOTDB_NODES_SUPPLIER_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ThriftConnection.h" + +class TEndPoint; + +class RoundRobinPolicy { +public: + static TEndPoint select(const std::vector& nodes); +}; + +class INodesSupplier { +public: + virtual ~INodesSupplier() = default; + virtual boost::optional getQueryEndPoint() = 0; + virtual std::vector getEndPointList() = 0; + using NodeSelectionPolicy = std::function&)>; +}; + +class StaticNodesSupplier : public INodesSupplier { +public: + explicit StaticNodesSupplier(const std::vector& nodes, + NodeSelectionPolicy policy = RoundRobinPolicy::select); + + boost::optional getQueryEndPoint() override; + + std::vector getEndPointList() override; + + ~StaticNodesSupplier() override; + +private: + const std::vector availableNodes_; + NodeSelectionPolicy policy_; +}; + +class NodesSupplier : public INodesSupplier { +public: + static const std::string SHOW_DATA_NODES_COMMAND; + static const std::string STATUS_COLUMN_NAME; + static const std::string IP_COLUMN_NAME; + static const std::string PORT_COLUMN_NAME; + static const std::string REMOVING_STATUS; + + static const int64_t TIMEOUT_IN_MS; + static const int FETCH_SIZE; + static const int THRIFT_DEFAULT_BUFFER_SIZE; + static const int THRIFT_MAX_FRAME_SIZE; + static const int CONNECTION_TIMEOUT_IN_MS; + + static std::shared_ptr create( + std::vector endpoints, + std::string userName, std::string password, std::string zoneId = "", + int32_t thriftDefaultBufferSize = ThriftConnection::THRIFT_DEFAULT_BUFFER_SIZE, + int32_t thriftMaxFrameSize = ThriftConnection::THRIFT_MAX_FRAME_SIZE, + int32_t connectionTimeoutInMs = ThriftConnection::CONNECTION_TIMEOUT_IN_MS, + bool useSSL = false, bool enableRPCCompression = false, + std::string version = "V_1_0", + std::chrono::milliseconds refreshInterval = std::chrono::milliseconds(TIMEOUT_IN_MS), + NodeSelectionPolicy policy = RoundRobinPolicy::select + ); + + NodesSupplier( + std::string userName, std::string password, const std::string& zoneId, + int32_t thriftDefaultBufferSize, int32_t thriftMaxFrameSize, + int32_t connectionTimeoutInMs, bool useSSL, bool enableRPCCompression, + std::string version, std::vector endpoints, NodeSelectionPolicy policy + ); + std::vector getEndPointList() override; + + boost::optional getQueryEndPoint() override; + + ~NodesSupplier() override; + +private: + std::string userName_; + std::string password_; + int32_t thriftDefaultBufferSize_; + int32_t thriftMaxFrameSize_; + int32_t connectionTimeoutInMs_; + bool useSSL_; + bool enableRPCCompression_; + std::string version; + std::string zoneId_; + + std::mutex mutex_; + std::vector endpoints_; + NodeSelectionPolicy selectionPolicy_; + + std::atomic isRunning_{false}; + std::thread refreshThread_; + std::condition_variable refreshCondition_; + + std::shared_ptr client_; + + void deduplicateEndpoints(); + + void startBackgroundRefresh(std::chrono::milliseconds interval); + + std::vector fetchLatestEndpoints(); + + void refreshEndpointList(); + + TEndPoint selectQueryEndpoint(); + + void stopBackgroundRefresh() noexcept; +}; + +#endif \ No newline at end of file diff --git a/iotdb-client/client-cpp/src/main/Session.cpp b/iotdb-client/client-cpp/src/main/Session.cpp index 9468f20dc6887..6e8d37e6199ea 100644 --- a/iotdb-client/client-cpp/src/main/Session.cpp +++ b/iotdb-client/client-cpp/src/main/Session.cpp @@ -21,6 +21,10 @@ #include #include #include +#include +#include +#include "NodesSupplier.h" +#include "SessionDataSet.h" using namespace std; @@ -33,201 +37,162 @@ static const int64_t QUERY_TIMEOUT_MS = -1; LogLevelType LOG_LEVEL = LEVEL_DEBUG; -TSDataType::TSDataType getTSDataTypeFromString(const string &str) { - // BOOLEAN, INT32, INT64, FLOAT, DOUBLE, TEXT, NULLTYPE - if (str == "BOOLEAN") return TSDataType::BOOLEAN; - else if (str == "INT32") return TSDataType::INT32; - else if (str == "INT64") return TSDataType::INT64; - else if (str == "FLOAT") return TSDataType::FLOAT; - else if (str == "DOUBLE") return TSDataType::DOUBLE; - else if (str == "TEXT") return TSDataType::TEXT; - else if (str == "NULLTYPE") return TSDataType::NULLTYPE; - return TSDataType::TEXT; -} - -void RpcUtils::verifySuccess(const TSStatus &status) { - if (status.code == TSStatusCode::MULTIPLE_ERROR) { - verifySuccess(status.subStatus); - return; - } - if (status.code != TSStatusCode::SUCCESS_STATUS - && status.code != TSStatusCode::REDIRECTION_RECOMMEND) { - throw ExecutionException(to_string(status.code) + ": " + status.message, status); - } -} - -void RpcUtils::verifySuccess(const vector &statuses) { - for (const TSStatus &status: statuses) { - if (status.code != TSStatusCode::SUCCESS_STATUS) { - throw BatchExecutionException(status.message, statuses); - } - } -} - -TSStatus RpcUtils::getStatus(TSStatusCode::TSStatusCode tsStatusCode) { - TSStatus status; - status.__set_code(tsStatusCode); - return status; -} - -TSStatus RpcUtils::getStatus(int code, const string &message) { - TSStatus status; - status.__set_code(code); - status.__set_message(message); - return status; -} - -shared_ptr RpcUtils::getTSExecuteStatementResp(TSStatusCode::TSStatusCode tsStatusCode) { - TSStatus status = getStatus(tsStatusCode); - return getTSExecuteStatementResp(status); -} - -shared_ptr -RpcUtils::getTSExecuteStatementResp(TSStatusCode::TSStatusCode tsStatusCode, const string &message) { - TSStatus status = getStatus(tsStatusCode, message); - return getTSExecuteStatementResp(status); -} - -shared_ptr RpcUtils::getTSExecuteStatementResp(const TSStatus &status) { - shared_ptr resp(new TSExecuteStatementResp()); - TSStatus tsStatus(status); - resp->status = tsStatus; - return resp; -} - -shared_ptr RpcUtils::getTSFetchResultsResp(TSStatusCode::TSStatusCode tsStatusCode) { - TSStatus status = getStatus(tsStatusCode); - return getTSFetchResultsResp(status); -} - -shared_ptr -RpcUtils::getTSFetchResultsResp(TSStatusCode::TSStatusCode tsStatusCode, const string &appendMessage) { - TSStatus status = getStatus(tsStatusCode, appendMessage); - return getTSFetchResultsResp(status); -} - -shared_ptr RpcUtils::getTSFetchResultsResp(const TSStatus &status) { - shared_ptr resp(new TSFetchResultsResp()); - TSStatus tsStatus(status); - resp->__set_status(tsStatus); - return resp; +TSDataType::TSDataType getTSDataTypeFromString(const string& str) { + // BOOLEAN, INT32, INT64, FLOAT, DOUBLE, TEXT, STRING, BLOB, TIMESTAMP, DATE, NULLTYPE + if (str == "BOOLEAN") { + return TSDataType::BOOLEAN; + } else if (str == "INT32") { + return TSDataType::INT32; + } else if (str == "INT64") { + return TSDataType::INT64; + } else if (str == "FLOAT") { + return TSDataType::FLOAT; + } else if (str == "DOUBLE") { + return TSDataType::DOUBLE; + } else if (str == "TEXT") { + return TSDataType::TEXT; + } else if (str == "TIMESTAMP") { + return TSDataType::TIMESTAMP; + } else if (str == "DATE") { + return TSDataType::DATE; + } else if (str == "BLOB") { + return TSDataType::BLOB; + } else if (str == "STRING") { + return TSDataType::STRING; + } + return TSDataType::UNKNOWN; } void Tablet::createColumns() { for (size_t i = 0; i < schemas.size(); i++) { TSDataType::TSDataType dataType = schemas[i].second; switch (dataType) { - case TSDataType::BOOLEAN: - values[i] = new bool[maxRowNumber]; - break; - case TSDataType::INT32: - values[i] = new int[maxRowNumber]; - break; - case TSDataType::INT64: - values[i] = new int64_t[maxRowNumber]; - break; - case TSDataType::FLOAT: - values[i] = new float[maxRowNumber]; - break; - case TSDataType::DOUBLE: - values[i] = new double[maxRowNumber]; - break; - case TSDataType::TEXT: - values[i] = new string[maxRowNumber]; - break; - default: - throw UnSupportedDataTypeException(string("Data type ") + to_string(dataType) + " is not supported."); + case TSDataType::BOOLEAN: + values[i] = new bool[maxRowNumber]; + break; + case TSDataType::DATE: + values[i] = new boost::gregorian::date[maxRowNumber]; + break; + case TSDataType::INT32: + values[i] = new int[maxRowNumber]; + break; + case TSDataType::TIMESTAMP: + case TSDataType::INT64: + values[i] = new int64_t[maxRowNumber]; + break; + case TSDataType::FLOAT: + values[i] = new float[maxRowNumber]; + break; + case TSDataType::DOUBLE: + values[i] = new double[maxRowNumber]; + break; + case TSDataType::STRING: + case TSDataType::BLOB: + case TSDataType::TEXT: + values[i] = new string[maxRowNumber]; + break; + default: + throw UnSupportedDataTypeException(string("Data type ") + to_string(dataType) + " is not supported."); } } } void Tablet::deleteColumns() { for (size_t i = 0; i < schemas.size(); i++) { + if (!values[i]) continue; TSDataType::TSDataType dataType = schemas[i].second; switch (dataType) { - case TSDataType::BOOLEAN: { - bool* valueBuf = (bool*)(values[i]); - delete[] valueBuf; - break; - } - case TSDataType::INT32: { - int* valueBuf = (int*)(values[i]); - delete[] valueBuf; - break; - } - case TSDataType::INT64: { - int64_t* valueBuf = (int64_t*)(values[i]); - delete[] valueBuf; - break; - } - case TSDataType::FLOAT: { - float* valueBuf = (float*)(values[i]); - delete[] valueBuf; - break; - } - case TSDataType::DOUBLE: { - double* valueBuf = (double*)(values[i]); - delete[] valueBuf; - break; - } - case TSDataType::TEXT: { - string* valueBuf = (string*)(values[i]); - delete[] valueBuf; - break; - } - default: - throw UnSupportedDataTypeException(string("Data type ") + to_string(dataType) + " is not supported."); - } - } -} - -void Tablet::addValue(size_t schemaId, size_t rowIndex, void* value) { - if (schemaId >= schemas.size()) { - char tmpStr[100]; - sprintf(tmpStr, "Tablet::addValue(), schemaId >= schemas.size(). schemaId=%ld, schemas.size()=%ld.", schemaId, schemas.size()); - throw std::out_of_range(tmpStr); - } - - if (rowIndex >= rowSize) { - char tmpStr[100]; - sprintf(tmpStr, "Tablet::addValue(), rowIndex >= rowSize. rowIndex=%ld, rowSize.size()=%ld.", rowIndex, rowSize); - throw std::out_of_range(tmpStr); - } - - TSDataType::TSDataType dataType = schemas[schemaId].second; - switch (dataType) { case TSDataType::BOOLEAN: { - bool* valueBuf = (bool*)(values[schemaId]); - valueBuf[rowIndex] = *((bool*)value); + bool* valueBuf = (bool*)(values[i]); + delete[] valueBuf; break; } case TSDataType::INT32: { - int* valueBuf = (int*)(values[schemaId]); - valueBuf[rowIndex] = *((int*)value); + int* valueBuf = (int*)(values[i]); + delete[] valueBuf; break; } + case TSDataType::DATE: { + boost::gregorian::date* valueBuf = (boost::gregorian::date*)(values[i]); + delete[] valueBuf; + break; + } + case TSDataType::TIMESTAMP: case TSDataType::INT64: { - int64_t* valueBuf = (int64_t*)(values[schemaId]); - valueBuf[rowIndex] = *((int64_t*)value); + int64_t* valueBuf = (int64_t*)(values[i]); + delete[] valueBuf; break; } case TSDataType::FLOAT: { - float* valueBuf = (float*)(values[schemaId]); - valueBuf[rowIndex] = *((float*)value); + float* valueBuf = (float*)(values[i]); + delete[] valueBuf; break; } case TSDataType::DOUBLE: { - double* valueBuf = (double*)(values[schemaId]); - valueBuf[rowIndex] = *((double*)value); + double* valueBuf = (double*)(values[i]); + delete[] valueBuf; break; } + case TSDataType::STRING: + case TSDataType::BLOB: case TSDataType::TEXT: { - string* valueBuf = (string*)(values[schemaId]); - valueBuf[rowIndex] = *(string*)value; + string* valueBuf = (string*)(values[i]); + delete[] valueBuf; break; } default: throw UnSupportedDataTypeException(string("Data type ") + to_string(dataType) + " is not supported."); + } + values[i] = nullptr; + } +} + +void Tablet::deepCopyTabletColValue(void* const* srcPtr, void** destPtr, TSDataType::TSDataType type, int maxRowNumber) { + void *src = *srcPtr; + switch (type) { + case TSDataType::BOOLEAN: + *destPtr = new bool[maxRowNumber]; + memcpy(*destPtr, src, maxRowNumber * sizeof(bool)); + break; + case TSDataType::INT32: + *destPtr = new int32_t[maxRowNumber]; + memcpy(*destPtr, src, maxRowNumber * sizeof(int32_t)); + break; + case TSDataType::INT64: + case TSDataType::TIMESTAMP: + *destPtr = new int64_t[maxRowNumber]; + memcpy(*destPtr, src, maxRowNumber * sizeof(int64_t)); + break; + case TSDataType::FLOAT: + *destPtr = new float[maxRowNumber]; + memcpy(*destPtr, src, maxRowNumber * sizeof(float)); + break; + case TSDataType::DOUBLE: + *destPtr = new double[maxRowNumber]; + memcpy(*destPtr, src, maxRowNumber * sizeof(double)); + break; + case TSDataType::DATE: { + *destPtr = new boost::gregorian::date[maxRowNumber]; + boost::gregorian::date* srcDate = static_cast(src); + boost::gregorian::date* destDate = static_cast(*destPtr); + for (size_t j = 0; j < maxRowNumber; ++j) { + destDate[j] = srcDate[j]; + } + break; + } + case TSDataType::STRING: + case TSDataType::TEXT: + case TSDataType::BLOB: { + *destPtr = new std::string[maxRowNumber]; + std::string* srcStr = static_cast(src); + std::string* destStr = static_cast(*destPtr); + for (size_t j = 0; j < maxRowNumber; ++j) { + destStr[j] = srcStr[j]; + } + break; + } + default: + break; } } @@ -246,32 +211,38 @@ size_t Tablet::getValueByteSize() { size_t valueOccupation = 0; for (size_t i = 0; i < schemas.size(); i++) { switch (schemas[i].second) { - case TSDataType::BOOLEAN: - valueOccupation += rowSize; - break; - case TSDataType::INT32: - valueOccupation += rowSize * 4; - break; - case TSDataType::INT64: - valueOccupation += rowSize * 8; - break; - case TSDataType::FLOAT: - valueOccupation += rowSize * 4; - break; - case TSDataType::DOUBLE: - valueOccupation += rowSize * 8; - break; - case TSDataType::TEXT: { - valueOccupation += rowSize * 4; - string* valueBuf = (string*)(values[i]); - for (size_t j = 0; j < rowSize; j++) { - valueOccupation += valueBuf[j].size(); - } - break; + case TSDataType::BOOLEAN: + valueOccupation += rowSize; + break; + case TSDataType::INT32: + valueOccupation += rowSize * 4; + break; + case TSDataType::DATE: + valueOccupation += rowSize * 4; + break; + case TSDataType::TIMESTAMP: + case TSDataType::INT64: + valueOccupation += rowSize * 8; + break; + case TSDataType::FLOAT: + valueOccupation += rowSize * 4; + break; + case TSDataType::DOUBLE: + valueOccupation += rowSize * 8; + break; + case TSDataType::STRING: + case TSDataType::BLOB: + case TSDataType::TEXT: { + valueOccupation += rowSize * 4; + string* valueBuf = (string*)(values[i]); + for (size_t j = 0; j < rowSize; j++) { + valueOccupation += valueBuf[j].size(); } - default: - throw UnSupportedDataTypeException( - string("Data type ") + to_string(schemas[i].second) + " is not supported."); + break; + } + default: + throw UnSupportedDataTypeException( + string("Data type ") + to_string(schemas[i].second) + " is not supported."); } } return valueOccupation; @@ -281,7 +252,18 @@ void Tablet::setAligned(bool isAligned) { this->isAligned = isAligned; } -string SessionUtils::getTime(const Tablet &tablet) { +std::shared_ptr Tablet::getDeviceID(int row) { + std::vector id_array(idColumnIndexes.size() + 1); + size_t idArrayIdx = 0; + id_array[idArrayIdx++] = this->deviceId; + for (auto idColumnIndex : idColumnIndexes) { + void* strPtr = getValue(idColumnIndex, row, TSDataType::TEXT); + id_array[idArrayIdx++] = *static_cast(strPtr); + } + return std::make_shared(id_array); +} + +string SessionUtils::getTime(const Tablet& tablet) { MyStringBuffer timeBuffer; unsigned int n = 8u * tablet.rowSize; if (n > timeBuffer.str.capacity()) { @@ -294,282 +276,134 @@ string SessionUtils::getTime(const Tablet &tablet) { return timeBuffer.str; } -string SessionUtils::getValue(const Tablet &tablet) { +string SessionUtils::getValue(const Tablet& tablet) { MyStringBuffer valueBuffer; unsigned int n = 8u * tablet.schemas.size() * tablet.rowSize; if (n > valueBuffer.str.capacity()) { valueBuffer.reserve(n); } - for (size_t i = 0; i < tablet.schemas.size(); i++) { TSDataType::TSDataType dataType = tablet.schemas[i].second; const BitMap& bitMap = tablet.bitMaps[i]; - switch (dataType) { - case TSDataType::BOOLEAN: { - bool* valueBuf = (bool*)(tablet.values[i]); - for (size_t index = 0; index < tablet.rowSize; index++) { - if (!bitMap.isMarked(index)) { - valueBuffer.putBool(valueBuf[index]); - } - else { - valueBuffer.putBool(false); - } + switch (dataType) { + case TSDataType::BOOLEAN: { + bool* valueBuf = (bool*)(tablet.values[i]); + for (size_t index = 0; index < tablet.rowSize; index++) { + if (!bitMap.isMarked(index)) { + valueBuffer.putBool(valueBuf[index]); + } + else { + valueBuffer.putBool(false); + } + } + break; + } + case TSDataType::INT32: { + int* valueBuf = (int*)(tablet.values[i]); + for (size_t index = 0; index < tablet.rowSize; index++) { + if (!bitMap.isMarked(index)) { + valueBuffer.putInt(valueBuf[index]); + } + else { + valueBuffer.putInt((numeric_limits::min)()); } - break; } - case TSDataType::INT32: { - int* valueBuf = (int*)(tablet.values[i]); - for (size_t index = 0; index < tablet.rowSize; index++) { - if (!bitMap.isMarked(index)) { - valueBuffer.putInt(valueBuf[index]); - } - else { - valueBuffer.putInt((numeric_limits::min)()); - } + break; + } + case TSDataType::DATE: { + boost::gregorian::date* valueBuf = (boost::gregorian::date*)(tablet.values[i]); + for (size_t index = 0; index < tablet.rowSize; index++) { + if (!bitMap.isMarked(index)) { + valueBuffer.putDate(valueBuf[index]); + } + else { + valueBuffer.putInt(EMPTY_DATE_INT); } - break; } - case TSDataType::INT64: { - int64_t* valueBuf = (int64_t*)(tablet.values[i]); - for (size_t index = 0; index < tablet.rowSize; index++) { - if (!bitMap.isMarked(index)) { - valueBuffer.putInt64(valueBuf[index]); - } - else { - valueBuffer.putInt64((numeric_limits::min)()); - } + break; + } + case TSDataType::TIMESTAMP: + case TSDataType::INT64: { + int64_t* valueBuf = (int64_t*)(tablet.values[i]); + for (size_t index = 0; index < tablet.rowSize; index++) { + if (!bitMap.isMarked(index)) { + valueBuffer.putInt64(valueBuf[index]); + } + else { + valueBuffer.putInt64((numeric_limits::min)()); } - break; } - case TSDataType::FLOAT: { - float* valueBuf = (float*)(tablet.values[i]); - for (size_t index = 0; index < tablet.rowSize; index++) { - if (!bitMap.isMarked(index)) { - valueBuffer.putFloat(valueBuf[index]); - } - else { - valueBuffer.putFloat((numeric_limits::min)()); - } + break; + } + case TSDataType::FLOAT: { + float* valueBuf = (float*)(tablet.values[i]); + for (size_t index = 0; index < tablet.rowSize; index++) { + if (!bitMap.isMarked(index)) { + valueBuffer.putFloat(valueBuf[index]); + } + else { + valueBuffer.putFloat((numeric_limits::min)()); } - break; } - case TSDataType::DOUBLE: { - double* valueBuf = (double*)(tablet.values[i]); - for (size_t index = 0; index < tablet.rowSize; index++) { - if (!bitMap.isMarked(index)) { - valueBuffer.putDouble(valueBuf[index]); - } - else { - valueBuffer.putDouble((numeric_limits::min)()); - } + break; + } + case TSDataType::DOUBLE: { + double* valueBuf = (double*)(tablet.values[i]); + for (size_t index = 0; index < tablet.rowSize; index++) { + if (!bitMap.isMarked(index)) { + valueBuffer.putDouble(valueBuf[index]); + } + else { + valueBuffer.putDouble((numeric_limits::min)()); } - break; } - case TSDataType::TEXT: { - string* valueBuf = (string*)(tablet.values[i]); - for (size_t index = 0; index < tablet.rowSize; index++) { + break; + } + case TSDataType::STRING: + case TSDataType::BLOB: + case TSDataType::TEXT: { + string* valueBuf = (string*)(tablet.values[i]); + for (size_t index = 0; index < tablet.rowSize; index++) { + if (!bitMap.isMarked(index)) { valueBuffer.putString(valueBuf[index]); } - break; + else { + valueBuffer.putString(""); + } } - default: - throw UnSupportedDataTypeException(string("Data type ") + to_string(dataType) + " is not supported."); + break; + } + default: + throw UnSupportedDataTypeException(string("Data type ") + to_string(dataType) + " is not supported."); } } for (size_t i = 0; i < tablet.schemas.size(); i++) { const BitMap& bitMap = tablet.bitMaps[i]; bool columnHasNull = !bitMap.isAllUnmarked(); - valueBuffer.putChar(columnHasNull ? (char) 1 : (char) 0); + valueBuffer.putChar(columnHasNull ? (char)1 : (char)0); if (columnHasNull) { const vector& bytes = bitMap.getByteArray(); - for (const char byte: bytes) { - valueBuffer.putChar(byte); + for (size_t index = 0; index < tablet.rowSize / 8 + 1; index++) { + valueBuffer.putChar(bytes[index]); } } } return valueBuffer.str; } -int SessionDataSet::getFetchSize() { - return fetchSize; -} - -void SessionDataSet::setFetchSize(int fetchSize) { - this->fetchSize = fetchSize; -} - -vector SessionDataSet::getColumnNames() { return this->columnNameList; } - -vector SessionDataSet::getColumnTypeList() { return this->columnTypeList; } - -bool SessionDataSet::hasNext() { - if (hasCachedRecord) { +bool SessionUtils::isTabletContainsSingleDevice(Tablet tablet) { + if (tablet.rowSize == 1) { return true; } - if (!tsQueryDataSetTimeBuffer.hasRemaining()) { - TSFetchResultsReq req; - req.__set_sessionId(sessionId); - req.__set_statement(sql); - req.__set_fetchSize(fetchSize); - req.__set_queryId(queryId); - req.__set_isAlign(true); - req.__set_timeout(-1); - try { - TSFetchResultsResp resp; - client->fetchResults(resp, req); - RpcUtils::verifySuccess(resp.status); - - if (!resp.hasResultSet) { - return false; - } else { - TSQueryDataSet *tsQueryDataSet = &(resp.queryDataSet); - tsQueryDataSetTimeBuffer.str = tsQueryDataSet->time; - tsQueryDataSetTimeBuffer.pos = 0; - - valueBuffers.clear(); - bitmapBuffers.clear(); - - for (size_t i = columnFieldStartIndex; i < columnNameList.size(); i++) { - if (duplicateLocation.find(i) != duplicateLocation.end()) { - continue; - } - std::string name = columnNameList[i]; - int valueIndex = columnMap[name]; - valueBuffers.emplace_back(new MyStringBuffer(tsQueryDataSet->valueList[valueIndex])); - bitmapBuffers.emplace_back(new MyStringBuffer(tsQueryDataSet->bitmapList[valueIndex])); - } - rowsIndex = 0; - } - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (exception &e) { - throw IoTDBException(string("Cannot fetch result from server: ") + e.what()); + auto firstDeviceId = tablet.getDeviceID(0); + for (int i = 1; i < tablet.rowSize; ++i) { + if (*firstDeviceId != *tablet.getDeviceID(i)) { + return false; } } - - constructOneRow(); - hasCachedRecord = true; return true; } -void SessionDataSet::constructOneRow() { - vector outFields; - int loc = 0; - for (size_t i = columnFieldStartIndex; i < columnNameList.size(); i++) { - Field field; - if (duplicateLocation.find(i) != duplicateLocation.end()) { - field = outFields[duplicateLocation[i]]; - } else { - MyStringBuffer *bitmapBuffer = bitmapBuffers[loc].get(); - // another new 8 row, should move the bitmap buffer position to next byte - if (rowsIndex % 8 == 0) { - currentBitmap[loc] = bitmapBuffer->getChar(); - } - - if (!isNull(loc, rowsIndex)) { - MyStringBuffer *valueBuffer = valueBuffers[loc].get(); - TSDataType::TSDataType dataType = getTSDataTypeFromString(columnTypeList[i]); - field.dataType = dataType; - switch (dataType) { - case TSDataType::BOOLEAN: { - bool booleanValue = valueBuffer->getBool(); - field.boolV = booleanValue; - break; - } - case TSDataType::INT32: { - int intValue = valueBuffer->getInt(); - field.intV = intValue; - break; - } - case TSDataType::INT64: { - int64_t longValue = valueBuffer->getInt64(); - field.longV = longValue; - break; - } - case TSDataType::FLOAT: { - float floatValue = valueBuffer->getFloat(); - field.floatV = floatValue; - break; - } - case TSDataType::DOUBLE: { - double doubleValue = valueBuffer->getDouble(); - field.doubleV = doubleValue; - break; - } - case TSDataType::TEXT: { - string stringValue = valueBuffer->getString(); - field.stringV = stringValue; - break; - } - default: { - throw UnSupportedDataTypeException( - string("Data type ") + columnTypeList[i] + " is not supported."); - } - } - } else { - field.dataType = TSDataType::NULLTYPE; - } - loc++; - } - outFields.push_back(field); - } - - if (!this->isIgnoreTimeStamp) { - rowRecord = RowRecord(tsQueryDataSetTimeBuffer.getInt64(), outFields); - } else { - tsQueryDataSetTimeBuffer.getInt64(); - rowRecord = RowRecord(outFields); - } - rowsIndex++; -} - -bool SessionDataSet::isNull(int index, int rowNum) { - char bitmap = currentBitmap[index]; - int shift = rowNum % 8; - return ((flag >> shift) & bitmap) == 0; -} - -RowRecord *SessionDataSet::next() { - if (!hasCachedRecord) { - if (!hasNext()) { - return nullptr; - } - } - - hasCachedRecord = false; - return &rowRecord; -} - -void SessionDataSet::closeOperationHandle(bool forceClose) { - if ((!forceClose) && (!operationIsOpen)) { - return; - } - operationIsOpen = false; - - TSCloseOperationReq closeReq; - closeReq.__set_sessionId(sessionId); - closeReq.__set_statementId(statementId); - closeReq.__set_queryId(queryId); - TSStatus tsStatus; - try { - client->closeOperation(tsStatus, closeReq); - RpcUtils::verifySuccess(tsStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); - } -} - string MeasurementNode::serialize() const { MyStringBuffer buffer; buffer.putString(getName()); @@ -589,7 +423,7 @@ string Template::serialize() const { alignedPrefix.emplace(""); } - for (const auto &child: children_) { + for (const auto& child : children_) { stack.push(make_pair("", child.second)); } @@ -609,10 +443,11 @@ string Template::serialize() const { if (cur_node_ptr->isAligned()) { alignedPrefix.emplace(fullPath); } - for (const auto &child: cur_node_ptr->getChildren()) { + for (const auto& child : cur_node_ptr->getChildren()) { stack.push(make_pair(fullPath, child.second)); } - } else { + } + else { buffer.putString(prefix); buffer.putBool(alignedPrefix.find(prefix) != alignedPrefix.end()); buffer.concat(cur_node_ptr->serialize()); @@ -628,17 +463,44 @@ string Template::serialize() const { Session::~Session() { try { close(); - } catch (const exception &e) { + } + catch (const exception& e) { log_debug(e.what()); } } +void Session::removeBrokenSessionConnection(shared_ptr sessionConnection) { + if (enableRedirection_) { + this->endPointToSessionConnection.erase(sessionConnection->getEndPoint()); + } + + auto it1 = deviceIdToEndpoint.begin(); + while (it1 != deviceIdToEndpoint.end()) { + if (it1->second == sessionConnection->getEndPoint()) { + it1 = deviceIdToEndpoint.erase(it1); + } + else { + ++it1; + } + } + + auto it2 = tableModelDeviceIdToEndpoint.begin(); + while (it2 != tableModelDeviceIdToEndpoint.end()) { + if (it2->second == sessionConnection->getEndPoint()) { + it2 = tableModelDeviceIdToEndpoint.erase(it2); + } + else { + ++it2; + } + } +} + /** * check whether the batch has been sorted * * @return whether the batch has been sorted */ -bool Session::checkSorted(const Tablet &tablet) { +bool Session::checkSorted(const Tablet& tablet) { for (size_t i = 1; i < tablet.rowSize; i++) { if (tablet.timestamps[i] < tablet.timestamps[i - 1]) { return false; @@ -647,7 +509,7 @@ bool Session::checkSorted(const Tablet &tablet) { return true; } -bool Session::checkSorted(const vector ×) { +bool Session::checkSorted(const vector& times) { for (size_t i = 1; i < times.size(); i++) { if (times[i] < times[i - 1]) { return false; @@ -656,7 +518,7 @@ bool Session::checkSorted(const vector ×) { return true; } -template +template std::vector sortList(const std::vector& valueList, const int* index, int indexLength) { std::vector sortedValues(valueList.size()); for (int i = 0; i < indexLength; i++) { @@ -665,7 +527,7 @@ std::vector sortList(const std::vector& valueList, const int* index, int i return sortedValues; } -template +template void sortValuesList(T* valueList, const int* index, size_t indexLength) { T* sortedValues = new T[indexLength]; for (int i = 0; i < indexLength; i++) { @@ -683,7 +545,7 @@ void Session::sortTablet(Tablet& tablet) { * so we can insert continuous data in value list to get a better performance */ // sort to get index, and use index to sort value list - int *index = new int[tablet.rowSize]; + int* index = new int[tablet.rowSize]; for (size_t i = 0; i < tablet.rowSize; i++) { index[i] = i; } @@ -693,39 +555,46 @@ void Session::sortTablet(Tablet& tablet) { for (size_t i = 0; i < tablet.schemas.size(); i++) { TSDataType::TSDataType dataType = tablet.schemas[i].second; switch (dataType) { - case TSDataType::BOOLEAN: { - sortValuesList((bool*)(tablet.values[i]), index, tablet.rowSize); - break; - } - case TSDataType::INT32: { - sortValuesList((int*)(tablet.values[i]), index, tablet.rowSize); - break; - } - case TSDataType::INT64: { - sortValuesList((int64_t*)(tablet.values[i]), index, tablet.rowSize); - break; - } - case TSDataType::FLOAT: { - sortValuesList((float*)(tablet.values[i]), index, tablet.rowSize); - break; - } - case TSDataType::DOUBLE: { - sortValuesList((double*)(tablet.values[i]), index, tablet.rowSize); - break; - } - case TSDataType::TEXT: { - sortValuesList((string*)(tablet.values[i]), index, tablet.rowSize); - break; - } - default: - throw UnSupportedDataTypeException(string("Data type ") + to_string(dataType) + " is not supported."); + case TSDataType::BOOLEAN: { + sortValuesList((bool*)(tablet.values[i]), index, tablet.rowSize); + break; + } + case TSDataType::INT32: { + sortValuesList((int*)(tablet.values[i]), index, tablet.rowSize); + break; + } + case TSDataType::DATE: { + sortValuesList((boost::gregorian::date*)(tablet.values[i]), index, tablet.rowSize); + break; + } + case TSDataType::TIMESTAMP: + case TSDataType::INT64: { + sortValuesList((int64_t*)(tablet.values[i]), index, tablet.rowSize); + break; + } + case TSDataType::FLOAT: { + sortValuesList((float*)(tablet.values[i]), index, tablet.rowSize); + break; + } + case TSDataType::DOUBLE: { + sortValuesList((double*)(tablet.values[i]), index, tablet.rowSize); + break; + } + case TSDataType::STRING: + case TSDataType::BLOB: + case TSDataType::TEXT: { + sortValuesList((string*)(tablet.values[i]), index, tablet.rowSize); + break; + } + default: + throw UnSupportedDataTypeException(string("Data type ") + to_string(dataType) + " is not supported."); } } delete[] index; } -void Session::sortIndexByTimestamp(int *index, std::vector ×tamps, int length) { +void Session::sortIndexByTimestamp(int* index, std::vector& timestamps, int length) { if (length <= 1) { return; } @@ -737,18 +606,19 @@ void Session::sortIndexByTimestamp(int *index, std::vector ×tamps, /** * Append value into buffer in Big Endian order to comply with IoTDB server */ -void Session::appendValues(string &buffer, const char *value, int size) { +void Session::appendValues(string& buffer, const char* value, int size) { static bool hasCheckedEndianFlag = false; static bool localCpuIsBigEndian = false; if (!hasCheckedEndianFlag) { hasCheckedEndianFlag = true; - int chk = 0x0201; //used to distinguish CPU's type (BigEndian or LittleEndian) - localCpuIsBigEndian = (0x01 != *(char *) (&chk)); + int chk = 0x0201; //used to distinguish CPU's type (BigEndian or LittleEndian) + localCpuIsBigEndian = (0x01 != *(char*)(&chk)); } if (localCpuIsBigEndian) { buffer.append(value, size); - } else { + } + else { for (int i = size - 1; i >= 0; i--) { buffer.append(value + i, 1); } @@ -756,75 +626,89 @@ void Session::appendValues(string &buffer, const char *value, int size) { } void -Session::putValuesIntoBuffer(const vector &types, const vector &values, string &buf) { +Session::putValuesIntoBuffer(const vector& types, const vector& values, string& buf) { + int32_t date; for (size_t i = 0; i < values.size(); i++) { int8_t typeNum = getDataTypeNumber(types[i]); - buf.append((char *) (&typeNum), sizeof(int8_t)); + buf.append((char*)(&typeNum), sizeof(int8_t)); switch (types[i]) { - case TSDataType::BOOLEAN: - buf.append(values[i], 1); - break; - case TSDataType::INT32: - appendValues(buf, values[i], sizeof(int32_t)); - break; - case TSDataType::INT64: - appendValues(buf, values[i], sizeof(int64_t)); - break; - case TSDataType::FLOAT: - appendValues(buf, values[i], sizeof(float)); - break; - case TSDataType::DOUBLE: - appendValues(buf, values[i], sizeof(double)); - break; - case TSDataType::TEXT: { - int32_t len = (uint32_t) strlen(values[i]); - appendValues(buf, (char *) (&len), sizeof(uint32_t)); - // no need to change the byte order of string value - buf.append(values[i], len); - break; - } - case TSDataType::NULLTYPE: - break; - default: - break; - } - } -} - -int8_t Session::getDataTypeNumber(TSDataType::TSDataType type) { - switch (type) { case TSDataType::BOOLEAN: - return 0; + buf.append(values[i], 1); + break; case TSDataType::INT32: - return 1; + appendValues(buf, values[i], sizeof(int32_t)); + break; + case TSDataType::DATE: + date = parseDateExpressionToInt(*(boost::gregorian::date*)values[i]); + appendValues(buf, (char*)&date, sizeof(int32_t)); + break; + case TSDataType::TIMESTAMP: case TSDataType::INT64: - return 2; + appendValues(buf, values[i], sizeof(int64_t)); + break; case TSDataType::FLOAT: - return 3; + appendValues(buf, values[i], sizeof(float)); + break; case TSDataType::DOUBLE: - return 4; - case TSDataType::TEXT: - return 5; + appendValues(buf, values[i], sizeof(double)); + break; + case TSDataType::STRING: + case TSDataType::BLOB: + case TSDataType::TEXT: { + int32_t len = (uint32_t)strlen(values[i]); + appendValues(buf, (char*)(&len), sizeof(uint32_t)); + // no need to change the byte order of string value + buf.append(values[i], len); + break; + } default: - return -1; + break; + } + } +} + +int8_t Session::getDataTypeNumber(TSDataType::TSDataType type) { + switch (type) { + case TSDataType::BOOLEAN: + return 0; + case TSDataType::INT32: + return 1; + case TSDataType::INT64: + return 2; + case TSDataType::FLOAT: + return 3; + case TSDataType::DOUBLE: + return 4; + case TSDataType::TEXT: + return 5; + case TSDataType::TIMESTAMP: + return 8; + case TSDataType::DATE: + return 9; + case TSDataType::BLOB: + return 10; + case TSDataType::STRING: + return 11; + default: + return -1; } } string Session::getVersionString(Version::Version version) { switch (version) { - case Version::V_0_12: - return "V_0_12"; - case Version::V_0_13: - return "V_0_13"; - case Version::V_1_0: - return "V_1_0"; - default: - return "V_0_12"; + case Version::V_0_12: + return "V_0_12"; + case Version::V_0_13: + return "V_0_13"; + case Version::V_1_0: + return "V_1_0"; + default: + return "V_0_12"; } } void Session::initZoneId() { - if (!zoneId.empty()) { + if (!zoneId_.empty()) { return; } @@ -838,7 +722,155 @@ void Session::initZoneId() { char zoneStr[32]; strftime(zoneStr, sizeof(zoneStr), "%z", &tmv); - zoneId = zoneStr; + zoneId_ = zoneStr; +} + +void Session::initNodesSupplier() { + std::vector endPoints; + TEndPoint endPoint; + endPoint.__set_ip(host_); + endPoint.__set_port(rpcPort_); + endPoints.emplace_back(endPoint); + if (enableAutoFetch_) { + nodesSupplier_ = NodesSupplier::create(endPoints, username_, password_); + } + else { + nodesSupplier_ = make_shared(endPoints); + } +} + +void Session::initDefaultSessionConnection() { + defaultEndPoint_.__set_ip(host_); + defaultEndPoint_.__set_port(rpcPort_); + defaultSessionConnection_ = make_shared(this, defaultEndPoint_, zoneId_, nodesSupplier_, fetchSize_, + 60, 500, + sqlDialect_, database_); +} + +void Session::insertStringRecordsWithLeaderCache(vector deviceIds, vector times, + vector> measurementsList, + vector> valuesList, bool isAligned) { + std::unordered_map, TSInsertStringRecordsReq> recordsGroup; + for (int i = 0; i < deviceIds.size(); i++) { + auto connection = getSessionConnection(deviceIds[i]); + if (recordsGroup.find(connection) == recordsGroup.end()) { + TSInsertStringRecordsReq request; + std::vector emptyPrefixPaths; + std::vector> emptyMeasurementsList; + vector> emptyValuesList; + std::vector emptyTimestamps; + request.__set_isAligned(isAligned); + request.__set_prefixPaths(emptyPrefixPaths); + request.__set_timestamps(emptyTimestamps); + request.__set_measurementsList(emptyMeasurementsList); + request.__set_valuesList(emptyValuesList); + recordsGroup.insert(make_pair(connection, request)); + } + TSInsertStringRecordsReq& existingReq = recordsGroup[connection]; + existingReq.prefixPaths.emplace_back(deviceIds[i]); + existingReq.timestamps.emplace_back(times[i]); + existingReq.measurementsList.emplace_back(measurementsList[i]); + existingReq.valuesList.emplace_back(valuesList[i]); + } + std::function, const TSInsertStringRecordsReq&)> consumer = + [](const std::shared_ptr& c, const TSInsertStringRecordsReq& r) { + c->insertStringRecords(r); + }; + if (recordsGroup.size() == 1) { + insertOnce(recordsGroup, consumer); + } + else { + insertByGroup(recordsGroup, consumer); + } +} + +void Session::insertRecordsWithLeaderCache(vector deviceIds, vector times, + vector> measurementsList, + const vector>& typesList, + vector> valuesList, bool isAligned) { + std::unordered_map, TSInsertRecordsReq> recordsGroup; + for (int i = 0; i < deviceIds.size(); i++) { + auto connection = getSessionConnection(deviceIds[i]); + if (recordsGroup.find(connection) == recordsGroup.end()) { + TSInsertRecordsReq request; + std::vector emptyPrefixPaths; + std::vector> emptyMeasurementsList; + std::vector emptyValuesList; + std::vector emptyTimestamps; + request.__set_isAligned(isAligned); + request.__set_prefixPaths(emptyPrefixPaths); + request.__set_timestamps(emptyTimestamps); + request.__set_measurementsList(emptyMeasurementsList); + request.__set_valuesList(emptyValuesList); + recordsGroup.insert(make_pair(connection, request)); + } + TSInsertRecordsReq& existingReq = recordsGroup[connection]; + existingReq.prefixPaths.emplace_back(deviceIds[i]); + existingReq.timestamps.emplace_back(times[i]); + existingReq.measurementsList.emplace_back(measurementsList[i]); + vector bufferList; + string buffer; + putValuesIntoBuffer(typesList[i], valuesList[i], buffer); + existingReq.valuesList.emplace_back(buffer); + recordsGroup[connection] = existingReq; + } + std::function, const TSInsertRecordsReq&)> consumer = + [](const std::shared_ptr& c, const TSInsertRecordsReq& r) { + c->insertRecords(r); + }; + if (recordsGroup.size() == 1) { + insertOnce(recordsGroup, consumer); + } + else { + insertByGroup(recordsGroup, consumer); + } +} + +void Session::insertTabletsWithLeaderCache(unordered_map tablets, bool sorted, bool isAligned) { + std::unordered_map, TSInsertTabletsReq> tabletsGroup; + if (tablets.empty()) { + throw BatchExecutionException("No tablet is inserting!"); + } + for (const auto& item : tablets) { + if (isAligned != item.second->isAligned) { + throw BatchExecutionException("The tablets should be all aligned or non-aligned!"); + } + if (!checkSorted(*(item.second))) { + sortTablet(*(item.second)); + } + auto deviceId = item.first; + auto tablet = item.second; + auto connection = getSessionConnection(deviceId); + auto it = tabletsGroup.find(connection); + if (it == tabletsGroup.end()) { + TSInsertTabletsReq request; + tabletsGroup[connection] = request; + } + TSInsertTabletsReq& existingReq = tabletsGroup[connection]; + existingReq.prefixPaths.emplace_back(tablet->deviceId); + existingReq.timestampsList.emplace_back(move(SessionUtils::getTime(*tablet))); + existingReq.valuesList.emplace_back(move(SessionUtils::getValue(*tablet))); + existingReq.sizeList.emplace_back(tablet->rowSize); + vector dataTypes; + vector measurements; + for (pair schema : tablet->schemas) { + measurements.push_back(schema.first); + dataTypes.push_back(schema.second); + } + existingReq.measurementsList.emplace_back(measurements); + existingReq.typesList.emplace_back(dataTypes); + } + + std::function, const TSInsertTabletsReq&)> consumer = + [](const std::shared_ptr& c, const TSInsertTabletsReq& r) { + c->insertTablets(r); + }; + if (tabletsGroup.size() == 1) { + insertOnce(tabletsGroup, consumer); + } + else { + insertByGroup(tabletsGroup, consumer); + } } void Session::open() { @@ -850,388 +882,299 @@ void Session::open(bool enableRPCCompression) { } void Session::open(bool enableRPCCompression, int connectionTimeoutInMs) { - if (!isClosed) { + if (!isClosed_) { return; } - shared_ptr socket(new TSocket(host, rpcPort)); - transport = std::make_shared(socket); - socket->setConnTimeout(connectionTimeoutInMs); - if (!transport->isOpen()) { - try { - transport->open(); - } - catch (TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } - } - if (enableRPCCompression) { - shared_ptr protocol(new TCompactProtocol(transport)); - client = std::make_shared(protocol); - } else { - shared_ptr protocol(new TBinaryProtocol(transport)); - client = std::make_shared(protocol); - } - - std::map configuration; - configuration["version"] = getVersionString(version); - - TSOpenSessionReq openReq; - openReq.__set_username(username); - openReq.__set_password(password); - openReq.__set_zoneId(zoneId); - openReq.__set_configuration(configuration); - try { - TSOpenSessionResp openResp; - client->openSession(openResp, openReq); - RpcUtils::verifySuccess(openResp.status); - if (protocolVersion != openResp.serverProtocolVersion) { - if (openResp.serverProtocolVersion == 0) {// less than 0.10 - throw logic_error(string("Protocol not supported, Client version is ") + to_string(protocolVersion) + - ", but Server version is " + to_string(openResp.serverProtocolVersion)); - } - } - - sessionId = openResp.sessionId; - statementId = client->requestStatementId(sessionId); - - if (!zoneId.empty()) { - setTimeZone(zoneId); - } else { - zoneId = getTimeZone(); - } - } catch (const TTransportException &e) { - log_debug(e.what()); - transport->close(); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - transport->close(); - throw; - } catch (const exception &e) { + initDefaultSessionConnection(); + } + catch (const exception& e) { log_debug(e.what()); - transport->close(); throw IoTDBException(e.what()); } + zoneId_ = defaultSessionConnection_->zoneId; - isClosed = false; + if (enableRedirection_) { + endPointToSessionConnection.insert(make_pair(defaultEndPoint_, defaultSessionConnection_)); + } + + isClosed_ = false; } void Session::close() { - if (isClosed) { + if (isClosed_) { return; } - isClosed = true; - - bool needThrowException = false; - string errMsg; - try { - TSCloseSessionReq req; - req.__set_sessionId(sessionId); - TSStatus tsStatus; - client->closeSession(tsStatus, req); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const exception &e) { - log_debug(e.what()); - errMsg = errMsg + "Session::close() client->closeSession() error, maybe remote server is down. " + e.what() + "\n" ; - needThrowException = true; - } - - try { - if (transport->isOpen()) { - transport->close(); - } - } - catch (const exception &e) { - log_debug(e.what()); - errMsg = errMsg + "Session::close() transport->close() error. " + e.what() + "\n" ; - needThrowException = true; - } - - if (needThrowException) { - throw IoTDBException(errMsg); - } + isClosed_ = true; } -void Session::insertRecord(const string &deviceId, int64_t time, - const vector &measurements, - const vector &values) { +void Session::insertRecord(const string& deviceId, int64_t time, + const vector& measurements, + const vector& values) { TSInsertStringRecordReq req; - req.__set_sessionId(sessionId); req.__set_prefixPath(deviceId); req.__set_timestamp(time); req.__set_measurements(measurements); req.__set_values(values); req.__set_isAligned(false); - TSStatus respStatus; try { - client->insertStringRecord(respStatus, req); - RpcUtils::verifySuccess(respStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); + getSessionConnection(deviceId)->insertStringRecord(req); + } + catch (RedirectException& e) { + handleRedirection(deviceId, e.endPoint); + } catch (const IoTDBConnectionException& e) { + if (enableRedirection_ && deviceIdToEndpoint.find(deviceId) != deviceIdToEndpoint.end()) { + deviceIdToEndpoint.erase(deviceId); + try { + defaultSessionConnection_->insertStringRecord(req); + } + catch (RedirectException& e) { + } + } + else { + throw e; + } } } -void Session::insertRecord(const string &prefixPath, int64_t time, - const vector &measurements, - const vector &types, - const vector &values) { +void Session::insertRecord(const string& deviceId, int64_t time, + const vector& measurements, + const vector& types, + const vector& values) { TSInsertRecordReq req; - req.__set_sessionId(sessionId); - req.__set_prefixPath(prefixPath); + req.__set_prefixPath(deviceId); req.__set_timestamp(time); req.__set_measurements(measurements); string buffer; putValuesIntoBuffer(types, values, buffer); req.__set_values(buffer); req.__set_isAligned(false); - TSStatus respStatus; try { - client->insertRecord(respStatus, req); - RpcUtils::verifySuccess(respStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); + getSessionConnection(deviceId)->insertRecord(req); + } + catch (RedirectException& e) { + handleRedirection(deviceId, e.endPoint); + } catch (const IoTDBConnectionException& e) { + if (enableRedirection_ && deviceIdToEndpoint.find(deviceId) != deviceIdToEndpoint.end()) { + deviceIdToEndpoint.erase(deviceId); + try { + defaultSessionConnection_->insertRecord(req); + } + catch (RedirectException& e) { + } + } + else { + throw e; + } } } -void Session::insertAlignedRecord(const string &deviceId, int64_t time, - const vector &measurements, - const vector &values) { +void Session::insertAlignedRecord(const string& deviceId, int64_t time, + const vector& measurements, + const vector& values) { TSInsertStringRecordReq req; - req.__set_sessionId(sessionId); req.__set_prefixPath(deviceId); req.__set_timestamp(time); req.__set_measurements(measurements); req.__set_values(values); req.__set_isAligned(true); - TSStatus respStatus; try { - client->insertStringRecord(respStatus, req); - RpcUtils::verifySuccess(respStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); + getSessionConnection(deviceId)->insertStringRecord(req); + } + catch (RedirectException& e) { + handleRedirection(deviceId, e.endPoint); + } catch (const IoTDBConnectionException& e) { + if (enableRedirection_ && deviceIdToEndpoint.find(deviceId) != deviceIdToEndpoint.end()) { + deviceIdToEndpoint.erase(deviceId); + try { + defaultSessionConnection_->insertStringRecord(req); + } + catch (RedirectException& e) { + } + } + else { + throw e; + } } } -void Session::insertAlignedRecord(const string &prefixPath, int64_t time, - const vector &measurements, - const vector &types, - const vector &values) { +void Session::insertAlignedRecord(const string& deviceId, int64_t time, + const vector& measurements, + const vector& types, + const vector& values) { TSInsertRecordReq req; - req.__set_sessionId(sessionId); - req.__set_prefixPath(prefixPath); + req.__set_prefixPath(deviceId); req.__set_timestamp(time); req.__set_measurements(measurements); string buffer; putValuesIntoBuffer(types, values, buffer); req.__set_values(buffer); - req.__set_isAligned(true); - TSStatus respStatus; + req.__set_isAligned(false); try { - client->insertRecord(respStatus, req); - RpcUtils::verifySuccess(respStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); + getSessionConnection(deviceId)->insertRecord(req); + } + catch (RedirectException& e) { + handleRedirection(deviceId, e.endPoint); + } catch (const IoTDBConnectionException& e) { + if (enableRedirection_ && deviceIdToEndpoint.find(deviceId) != deviceIdToEndpoint.end()) { + deviceIdToEndpoint.erase(deviceId); + try { + defaultSessionConnection_->insertRecord(req); + } + catch (RedirectException& e) { + } + } + else { + throw e; + } } } -void Session::insertRecords(const vector &deviceIds, - const vector ×, - const vector> &measurementsList, - const vector> &valuesList) { +void Session::insertRecords(const vector& deviceIds, + const vector& times, + const vector>& measurementsList, + const vector>& valuesList) { size_t len = deviceIds.size(); if (len != times.size() || len != measurementsList.size() || len != valuesList.size()) { logic_error e("deviceIds, times, measurementsList and valuesList's size should be equal"); throw exception(e); } - TSInsertStringRecordsReq request; - request.__set_sessionId(sessionId); - request.__set_prefixPaths(deviceIds); - request.__set_timestamps(times); - request.__set_measurementsList(measurementsList); - request.__set_valuesList(valuesList); - request.__set_isAligned(false); - try { - TSStatus respStatus; - client->insertStringRecords(respStatus, request); - RpcUtils::verifySuccess(respStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); + if (enableRedirection_) { + insertStringRecordsWithLeaderCache(deviceIds, times, measurementsList, valuesList, false); + } + else { + TSInsertStringRecordsReq request; + request.__set_prefixPaths(deviceIds); + request.__set_timestamps(times); + request.__set_measurementsList(measurementsList); + request.__set_valuesList(valuesList); + request.__set_isAligned(false); + try { + defaultSessionConnection_->insertStringRecords(request); + } + catch (RedirectException& e) { + } } } -void Session::insertRecords(const vector &deviceIds, - const vector ×, - const vector> &measurementsList, - const vector> &typesList, - const vector> &valuesList) { +void Session::insertRecords(const vector& deviceIds, + const vector& times, + const vector>& measurementsList, + const vector>& typesList, + const vector>& valuesList) { size_t len = deviceIds.size(); if (len != times.size() || len != measurementsList.size() || len != valuesList.size()) { logic_error e("deviceIds, times, measurementsList and valuesList's size should be equal"); throw exception(e); } - TSInsertRecordsReq request; - request.__set_sessionId(sessionId); - request.__set_prefixPaths(deviceIds); - request.__set_timestamps(times); - request.__set_measurementsList(measurementsList); - vector bufferList; - for (size_t i = 0; i < valuesList.size(); i++) { - string buffer; - putValuesIntoBuffer(typesList[i], valuesList[i], buffer); - bufferList.push_back(buffer); - } - request.__set_valuesList(bufferList); - request.__set_isAligned(false); - try { - TSStatus respStatus; - client->insertRecords(respStatus, request); - RpcUtils::verifySuccess(respStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); + if (enableRedirection_) { + insertRecordsWithLeaderCache(deviceIds, times, measurementsList, typesList, valuesList, false); + } + else { + TSInsertRecordsReq request; + request.__set_prefixPaths(deviceIds); + request.__set_timestamps(times); + request.__set_measurementsList(measurementsList); + vector bufferList; + for (size_t i = 0; i < valuesList.size(); i++) { + string buffer; + putValuesIntoBuffer(typesList[i], valuesList[i], buffer); + bufferList.push_back(buffer); + } + request.__set_valuesList(bufferList); + request.__set_isAligned(false); + try { + defaultSessionConnection_->insertRecords(request); + } + catch (RedirectException& e) { + } } } -void Session::insertAlignedRecords(const vector &deviceIds, - const vector ×, - const vector> &measurementsList, - const vector> &valuesList) { +void Session::insertAlignedRecords(const vector& deviceIds, + const vector& times, + const vector>& measurementsList, + const vector>& valuesList) { size_t len = deviceIds.size(); if (len != times.size() || len != measurementsList.size() || len != valuesList.size()) { logic_error e("deviceIds, times, measurementsList and valuesList's size should be equal"); throw exception(e); } - TSInsertStringRecordsReq request; - request.__set_sessionId(sessionId); - request.__set_prefixPaths(deviceIds); - request.__set_timestamps(times); - request.__set_measurementsList(measurementsList); - request.__set_valuesList(valuesList); - request.__set_isAligned(true); - try { - TSStatus respStatus; - client->insertStringRecords(respStatus, request); - RpcUtils::verifySuccess(respStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); + if (enableRedirection_) { + insertStringRecordsWithLeaderCache(deviceIds, times, measurementsList, valuesList, true); + } + else { + TSInsertStringRecordsReq request; + request.__set_prefixPaths(deviceIds); + request.__set_timestamps(times); + request.__set_measurementsList(measurementsList); + request.__set_valuesList(valuesList); + request.__set_isAligned(true); + try { + defaultSessionConnection_->insertStringRecords(request); + } + catch (RedirectException& e) { + } } } -void Session::insertAlignedRecords(const vector &deviceIds, - const vector ×, - const vector> &measurementsList, - const vector> &typesList, - const vector> &valuesList) { +void Session::insertAlignedRecords(const vector& deviceIds, + const vector& times, + const vector>& measurementsList, + const vector>& typesList, + const vector>& valuesList) { size_t len = deviceIds.size(); if (len != times.size() || len != measurementsList.size() || len != valuesList.size()) { logic_error e("deviceIds, times, measurementsList and valuesList's size should be equal"); throw exception(e); } - TSInsertRecordsReq request; - request.__set_sessionId(sessionId); - request.__set_prefixPaths(deviceIds); - request.__set_timestamps(times); - request.__set_measurementsList(measurementsList); - vector bufferList; - for (size_t i = 0; i < valuesList.size(); i++) { - string buffer; - putValuesIntoBuffer(typesList[i], valuesList[i], buffer); - bufferList.push_back(buffer); - } - request.__set_valuesList(bufferList); - request.__set_isAligned(true); - try { - TSStatus respStatus; - client->insertRecords(respStatus, request); - RpcUtils::verifySuccess(respStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); + if (enableRedirection_) { + insertRecordsWithLeaderCache(deviceIds, times, measurementsList, typesList, valuesList, true); + } + else { + TSInsertRecordsReq request; + request.__set_prefixPaths(deviceIds); + request.__set_timestamps(times); + request.__set_measurementsList(measurementsList); + vector bufferList; + for (size_t i = 0; i < valuesList.size(); i++) { + string buffer; + putValuesIntoBuffer(typesList[i], valuesList[i], buffer); + bufferList.push_back(buffer); + } + request.__set_valuesList(bufferList); + request.__set_isAligned(false); + try { + defaultSessionConnection_->insertRecords(request); + } + catch (RedirectException& e) { + } } } -void Session::insertRecordsOfOneDevice(const string &deviceId, - vector ×, - vector> &measurementsList, - vector> &typesList, - vector> &valuesList) { +void Session::insertRecordsOfOneDevice(const string& deviceId, + vector& times, + vector>& measurementsList, + vector>& typesList, + vector>& valuesList) { insertRecordsOfOneDevice(deviceId, times, measurementsList, typesList, valuesList, false); } -void Session::insertRecordsOfOneDevice(const string &deviceId, - vector ×, - vector> &measurementsList, - vector> &typesList, - vector> &valuesList, +void Session::insertRecordsOfOneDevice(const string& deviceId, + vector& times, + vector>& measurementsList, + vector>& typesList, + vector>& valuesList, bool sorted) { - if (!checkSorted(times)) { - int *index = new int[times.size()]; + int* index = new int[times.size()]; for (size_t i = 0; i < times.size(); i++) { index[i] = (int)i; } @@ -1244,7 +1187,6 @@ void Session::insertRecordsOfOneDevice(const string &deviceId, delete[] index; } TSInsertRecordsOfOneDeviceReq request; - request.__set_sessionId(sessionId); request.__set_prefixPath(deviceId); request.__set_timestamps(times); request.__set_measurementsList(measurementsList); @@ -1256,40 +1198,43 @@ void Session::insertRecordsOfOneDevice(const string &deviceId, } request.__set_valuesList(bufferList); request.__set_isAligned(false); - + TSStatus respStatus; try { - TSStatus respStatus; - client->insertRecordsOfOneDevice(respStatus, request); - RpcUtils::verifySuccess(respStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); + getSessionConnection(deviceId)->insertRecordsOfOneDevice(request); + } + catch (RedirectException& e) { + handleRedirection(deviceId, e.endPoint); + } catch (const IoTDBConnectionException& e) { + if (enableRedirection_ && deviceIdToEndpoint.find(deviceId) != deviceIdToEndpoint.end()) { + deviceIdToEndpoint.erase(deviceId); + try { + defaultSessionConnection_->insertRecordsOfOneDevice(request); + } + catch (RedirectException& e) { + } + } + else { + throw e; + } } } -void Session::insertAlignedRecordsOfOneDevice(const string &deviceId, - vector ×, - vector> &measurementsList, - vector> &typesList, - vector> &valuesList) { +void Session::insertAlignedRecordsOfOneDevice(const string& deviceId, + vector& times, + vector>& measurementsList, + vector>& typesList, + vector>& valuesList) { insertAlignedRecordsOfOneDevice(deviceId, times, measurementsList, typesList, valuesList, false); } -void Session::insertAlignedRecordsOfOneDevice(const string &deviceId, - vector ×, - vector> &measurementsList, - vector> &typesList, - vector> &valuesList, +void Session::insertAlignedRecordsOfOneDevice(const string& deviceId, + vector& times, + vector>& measurementsList, + vector>& typesList, + vector>& valuesList, bool sorted) { - if (!checkSorted(times)) { - int *index = new int[times.size()]; + int* index = new int[times.size()]; for (size_t i = 0; i < times.size(); i++) { index[i] = (int)i; } @@ -1302,7 +1247,6 @@ void Session::insertAlignedRecordsOfOneDevice(const string &deviceId, delete[] index; } TSInsertRecordsOfOneDeviceReq request; - request.__set_sessionId(sessionId); request.__set_prefixPath(deviceId); request.__set_timestamps(times); request.__set_measurementsList(measurementsList); @@ -1314,228 +1258,230 @@ void Session::insertAlignedRecordsOfOneDevice(const string &deviceId, } request.__set_valuesList(bufferList); request.__set_isAligned(true); - + TSStatus respStatus; try { - TSStatus respStatus; - client->insertRecordsOfOneDevice(respStatus, request); - RpcUtils::verifySuccess(respStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); + getSessionConnection(deviceId)->insertRecordsOfOneDevice(request); + } + catch (RedirectException& e) { + handleRedirection(deviceId, e.endPoint); + } catch (const IoTDBConnectionException& e) { + if (enableRedirection_ && deviceIdToEndpoint.find(deviceId) != deviceIdToEndpoint.end()) { + deviceIdToEndpoint.erase(deviceId); + try { + defaultSessionConnection_->insertRecordsOfOneDevice(request); + } + catch (RedirectException& e) { + } + } + else { + throw e; + } } } -void Session::insertTablet(Tablet &tablet) { +void Session::insertTablet(Tablet& tablet) { try { insertTablet(tablet, false); } - catch (const exception &e) { + catch (const exception& e) { log_debug(e.what()); logic_error error(e.what()); throw exception(error); } } -void Session::buildInsertTabletReq(TSInsertTabletReq &request, int64_t sessionId, Tablet &tablet, bool sorted) { +void Session::buildInsertTabletReq(TSInsertTabletReq& request, Tablet& tablet, bool sorted) { if ((!sorted) && !checkSorted(tablet)) { sortTablet(tablet); } - request.__set_sessionId(sessionId); request.prefixPath = tablet.deviceId; request.measurements.reserve(tablet.schemas.size()); request.types.reserve(tablet.schemas.size()); - for (pair schema: tablet.schemas) { + for (pair schema : tablet.schemas) { request.measurements.push_back(schema.first); request.types.push_back(schema.second); } - request.values = move(SessionUtils::getValue(tablet)); request.timestamps = move(SessionUtils::getTime(tablet)); request.__set_size(tablet.rowSize); request.__set_isAligned(tablet.isAligned); } -void Session::insertTablet(const TSInsertTabletReq &request){ +void Session::insertTablet(TSInsertTabletReq request) { + auto deviceId = request.prefixPath; try { - TSStatus respStatus; - client->insertTablet(respStatus, request); - RpcUtils::verifySuccess(respStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); + getSessionConnection(deviceId)->insertTablet(request); + } + catch (RedirectException& e) { + handleRedirection(deviceId, e.endPoint); + } catch (const IoTDBConnectionException& e) { + if (enableRedirection_ && deviceIdToEndpoint.find(deviceId) != deviceIdToEndpoint.end()) { + deviceIdToEndpoint.erase(deviceId); + try { + defaultSessionConnection_->insertTablet(request); + } + catch (RedirectException& e) { + } + } + else { + throw e; + } } } -void Session::insertTablet(Tablet &tablet, bool sorted) { +void Session::insertTablet(Tablet& tablet, bool sorted) { TSInsertTabletReq request; - buildInsertTabletReq(request, sessionId, tablet, sorted); + buildInsertTabletReq(request, tablet, sorted); insertTablet(request); } -void Session::insertAlignedTablet(Tablet &tablet) { +void Session::insertAlignedTablet(Tablet& tablet) { insertAlignedTablet(tablet, false); } -void Session::insertAlignedTablet(Tablet &tablet, bool sorted) { +void Session::insertAlignedTablet(Tablet& tablet, bool sorted) { tablet.setAligned(true); try { insertTablet(tablet, sorted); } - catch (const exception &e) { + catch (const exception& e) { log_debug(e.what()); logic_error error(e.what()); throw exception(error); } } -void Session::insertTablets(unordered_map &tablets) { +void Session::insertTablets(unordered_map& tablets) { try { insertTablets(tablets, false); } - catch (const exception &e) { + catch (const exception& e) { log_debug(e.what()); logic_error error(e.what()); throw exception(error); } } -void Session::insertTablets(unordered_map &tablets, bool sorted) { - TSInsertTabletsReq request; - request.__set_sessionId(sessionId); +void Session::insertTablets(unordered_map& tablets, bool sorted) { if (tablets.empty()) { throw BatchExecutionException("No tablet is inserting!"); } auto beginIter = tablets.begin(); - bool isFirstTabletAligned = ((*beginIter).second)->isAligned; - for (const auto &item: tablets) { - if (isFirstTabletAligned != item.second->isAligned) { - throw BatchExecutionException("The tablets should be all aligned or non-aligned!"); + bool isAligned = ((*beginIter).second)->isAligned; + if (enableRedirection_) { + insertTabletsWithLeaderCache(tablets, sorted, isAligned); + } + else { + TSInsertTabletsReq request; + for (const auto& item : tablets) { + if (isAligned != item.second->isAligned) { + throw BatchExecutionException("The tablets should be all aligned or non-aligned!"); + } + if (!checkSorted(*(item.second))) { + sortTablet(*(item.second)); + } + request.prefixPaths.push_back(item.second->deviceId); + vector measurements; + vector dataTypes; + for (pair schema : item.second->schemas) { + measurements.push_back(schema.first); + dataTypes.push_back(schema.second); + } + request.measurementsList.push_back(measurements); + request.typesList.push_back(dataTypes); + request.timestampsList.push_back(move(SessionUtils::getTime(*(item.second)))); + request.valuesList.push_back(move(SessionUtils::getValue(*(item.second)))); + request.sizeList.push_back(item.second->rowSize); } - if (!checkSorted(*(item.second))) { - sortTablet(*(item.second)); + request.__set_isAligned(isAligned); + try { + TSStatus respStatus; + defaultSessionConnection_->insertTablets(request); + RpcUtils::verifySuccess(respStatus); } - request.prefixPaths.push_back(item.second->deviceId); - vector measurements; - vector dataTypes; - for (pair schema: item.second->schemas) { - measurements.push_back(schema.first); - dataTypes.push_back(schema.second); + catch (RedirectException& e) { } - request.measurementsList.push_back(measurements); - request.typesList.push_back(dataTypes); - request.timestampsList.push_back(move(SessionUtils::getTime(*(item.second)))); - request.valuesList.push_back(move(SessionUtils::getValue(*(item.second)))); - request.sizeList.push_back(item.second->rowSize); - } - request.__set_isAligned(isFirstTabletAligned); - try { - TSStatus respStatus; - client->insertTablets(respStatus, request); - RpcUtils::verifySuccess(respStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); } } -void Session::insertAlignedTablets(unordered_map &tablets, bool sorted) { +void Session::insertAlignedTablets(unordered_map& tablets, bool sorted) { for (auto iter = tablets.begin(); iter != tablets.end(); iter++) { iter->second->setAligned(true); } try { insertTablets(tablets, sorted); } - catch (const exception &e) { + catch (const exception& e) { log_debug(e.what()); logic_error error(e.what()); throw exception(error); } } -void Session::testInsertRecord(const string &deviceId, int64_t time, const vector &measurements, - const vector &values) { +void Session::testInsertRecord(const string& deviceId, int64_t time, const vector& measurements, + const vector& values) { TSInsertStringRecordReq req; - req.__set_sessionId(sessionId); req.__set_prefixPath(deviceId); req.__set_timestamp(time); req.__set_measurements(measurements); req.__set_values(values); TSStatus tsStatus; try { - client->insertStringRecord(tsStatus, req); + defaultSessionConnection_->testInsertStringRecord(req); RpcUtils::verifySuccess(tsStatus); - } catch (const TTransportException &e) { + } + catch (const TTransportException& e) { log_debug(e.what()); throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { + } catch (const IoTDBException& e) { log_debug(e.what()); throw; - } catch (const exception &e) { + } catch (const exception& e) { log_debug(e.what()); throw IoTDBException(e.what()); } } -void Session::testInsertTablet(const Tablet &tablet) { +void Session::testInsertTablet(const Tablet& tablet) { TSInsertTabletReq request; - request.__set_sessionId(sessionId); request.prefixPath = tablet.deviceId; - for (pair schema: tablet.schemas) { + for (pair schema : tablet.schemas) { request.measurements.push_back(schema.first); request.types.push_back(schema.second); } request.__set_timestamps(move(SessionUtils::getTime(tablet))); request.__set_values(move(SessionUtils::getValue(tablet))); request.__set_size(tablet.rowSize); - try { TSStatus tsStatus; - client->testInsertTablet(tsStatus, request); + defaultSessionConnection_->testInsertTablet(request); RpcUtils::verifySuccess(tsStatus); - } catch (const TTransportException &e) { + } + catch (const TTransportException& e) { log_debug(e.what()); throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { + } catch (const IoTDBException& e) { log_debug(e.what()); throw; - } catch (const exception &e) { + } catch (const exception& e) { log_debug(e.what()); throw IoTDBException(e.what()); } } -void Session::testInsertRecords(const vector &deviceIds, - const vector ×, - const vector> &measurementsList, - const vector> &valuesList) { +void Session::testInsertRecords(const vector& deviceIds, + const vector& times, + const vector>& measurementsList, + const vector>& valuesList) { size_t len = deviceIds.size(); if (len != times.size() || len != measurementsList.size() || len != valuesList.size()) { logic_error error("deviceIds, times, measurementsList and valuesList's size should be equal"); throw exception(error); } TSInsertStringRecordsReq request; - request.__set_sessionId(sessionId); request.__set_prefixPaths(deviceIds); request.__set_timestamps(times); request.__set_measurementsList(measurementsList); @@ -1543,139 +1489,97 @@ void Session::testInsertRecords(const vector &deviceIds, try { TSStatus tsStatus; - client->insertStringRecords(tsStatus, request); + defaultSessionConnection_->getSessionClient()->insertStringRecords(tsStatus, request); RpcUtils::verifySuccess(tsStatus); - } catch (const TTransportException &e) { + } + catch (const TTransportException& e) { log_debug(e.what()); throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { + } catch (const IoTDBException& e) { log_debug(e.what()); throw; - } catch (const exception &e) { + } catch (const exception& e) { log_debug(e.what()); throw IoTDBException(e.what()); } } -void Session::deleteTimeseries(const string &path) { +void Session::deleteTimeseries(const string& path) { vector paths; paths.push_back(path); deleteTimeseries(paths); } -void Session::deleteTimeseries(const vector &paths) { - TSStatus tsStatus; - - try { - client->deleteTimeseries(tsStatus, sessionId, paths); - RpcUtils::verifySuccess(tsStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); - } +void Session::deleteTimeseries(const vector& paths) { + defaultSessionConnection_->deleteTimeseries(paths); } -void Session::deleteData(const string &path, int64_t endTime) { +void Session::deleteData(const string& path, int64_t endTime) { vector paths; paths.push_back(path); deleteData(paths, LONG_LONG_MIN, endTime); } -void Session::deleteData(const vector &paths, int64_t endTime) { +void Session::deleteData(const vector& paths, int64_t endTime) { deleteData(paths, LONG_LONG_MIN, endTime); } -void Session::deleteData(const vector &paths, int64_t startTime, int64_t endTime) { +void Session::deleteData(const vector& paths, int64_t startTime, int64_t endTime) { TSDeleteDataReq req; - req.__set_sessionId(sessionId); req.__set_paths(paths); req.__set_startTime(startTime); req.__set_endTime(endTime); - TSStatus tsStatus; - try { - client->deleteData(tsStatus, req); - RpcUtils::verifySuccess(tsStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); - } + defaultSessionConnection_->deleteData(req); } -void Session::setStorageGroup(const string &storageGroupId) { - TSStatus tsStatus; - try { - client->setStorageGroup(tsStatus, sessionId, storageGroupId); - RpcUtils::verifySuccess(tsStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); - } +void Session::setStorageGroup(const string& storageGroupId) { + defaultSessionConnection_->setStorageGroup(storageGroupId); } -void Session::deleteStorageGroup(const string &storageGroup) { +void Session::deleteStorageGroup(const string& storageGroup) { vector storageGroups; storageGroups.push_back(storageGroup); deleteStorageGroups(storageGroups); } -void Session::deleteStorageGroups(const vector &storageGroups) { - TSStatus tsStatus; - try { - client->deleteStorageGroups(tsStatus, sessionId, storageGroups); - RpcUtils::verifySuccess(tsStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); - } +void Session::deleteStorageGroups(const vector& storageGroups) { + defaultSessionConnection_->deleteStorageGroups(storageGroups); } -void Session::createTimeseries(const string &path, +void Session::createDatabase(const string& database) { + this->setStorageGroup(database); +} + +void Session::deleteDatabase(const string& database) { + this->deleteStorageGroups(vector{database}); +} + +void Session::deleteDatabases(const vector& databases) { + this->deleteStorageGroups(databases); +} + +void Session::createTimeseries(const string& path, TSDataType::TSDataType dataType, TSEncoding::TSEncoding encoding, CompressionType::CompressionType compressor) { try { createTimeseries(path, dataType, encoding, compressor, nullptr, nullptr, nullptr, ""); } - catch (const exception &e) { + catch (const exception& e) { log_debug(e.what()); throw IoTDBException(e.what()); } } -void Session::createTimeseries(const string &path, +void Session::createTimeseries(const string& path, TSDataType::TSDataType dataType, TSEncoding::TSEncoding encoding, CompressionType::CompressionType compressor, - map *props, - map *tags, - map *attributes, - const string &measurementAlias) { + map* props, + map* tags, + map* attributes, + const string& measurementAlias) { TSCreateTimeseriesReq req; - req.__set_sessionId(sessionId); req.__set_path(path); req.__set_dataType(dataType); req.__set_encoding(encoding); @@ -1693,52 +1597,37 @@ void Session::createTimeseries(const string &path, if (!measurementAlias.empty()) { req.__set_measurementAlias(measurementAlias); } - - TSStatus tsStatus; - try { - client->createTimeseries(tsStatus, req); - RpcUtils::verifySuccess(tsStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); - } + defaultSessionConnection_->createTimeseries(req); } -void Session::createMultiTimeseries(const vector &paths, - const vector &dataTypes, - const vector &encodings, - const vector &compressors, - vector> *propsList, - vector> *tagsList, - vector> *attributesList, - vector *measurementAliasList) { +void Session::createMultiTimeseries(const vector& paths, + const vector& dataTypes, + const vector& encodings, + const vector& compressors, + vector>* propsList, + vector>* tagsList, + vector>* attributesList, + vector* measurementAliasList) { TSCreateMultiTimeseriesReq request; - request.__set_sessionId(sessionId); request.__set_paths(paths); vector dataTypesOrdinal; dataTypesOrdinal.reserve(dataTypes.size()); - for (TSDataType::TSDataType dataType: dataTypes) { + for (TSDataType::TSDataType dataType : dataTypes) { dataTypesOrdinal.push_back(dataType); } request.__set_dataTypes(dataTypesOrdinal); vector encodingsOrdinal; encodingsOrdinal.reserve(encodings.size()); - for (TSEncoding::TSEncoding encoding: encodings) { + for (TSEncoding::TSEncoding encoding : encodings) { encodingsOrdinal.push_back(encoding); } request.__set_encodings(encodingsOrdinal); vector compressorsOrdinal; compressorsOrdinal.reserve(compressors.size()); - for (CompressionType::CompressionType compressor: compressors) { + for (CompressionType::CompressionType compressor : compressors) { compressorsOrdinal.push_back(compressor); } request.__set_compressors(compressorsOrdinal); @@ -1757,348 +1646,287 @@ void Session::createMultiTimeseries(const vector &paths, request.__set_measurementAliasList(*measurementAliasList); } - try { - TSStatus tsStatus; - client->createMultiTimeseries(tsStatus, request); - RpcUtils::verifySuccess(tsStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); - } + defaultSessionConnection_->createMultiTimeseries(request); } -void Session::createAlignedTimeseries(const std::string &deviceId, - const std::vector &measurements, - const std::vector &dataTypes, - const std::vector &encodings, - const std::vector &compressors) { +void Session::createAlignedTimeseries(const std::string& deviceId, + const std::vector& measurements, + const std::vector& dataTypes, + const std::vector& encodings, + const std::vector& compressors) { TSCreateAlignedTimeseriesReq request; - request.__set_sessionId(sessionId); request.__set_prefixPath(deviceId); request.__set_measurements(measurements); vector dataTypesOrdinal; dataTypesOrdinal.reserve(dataTypes.size()); - for (TSDataType::TSDataType dataType: dataTypes) { + for (TSDataType::TSDataType dataType : dataTypes) { dataTypesOrdinal.push_back(dataType); } request.__set_dataTypes(dataTypesOrdinal); vector encodingsOrdinal; encodingsOrdinal.reserve(encodings.size()); - for (TSEncoding::TSEncoding encoding: encodings) { + for (TSEncoding::TSEncoding encoding : encodings) { encodingsOrdinal.push_back(encoding); } request.__set_encodings(encodingsOrdinal); vector compressorsOrdinal; compressorsOrdinal.reserve(compressors.size()); - for (CompressionType::CompressionType compressor: compressors) { + for (CompressionType::CompressionType compressor : compressors) { compressorsOrdinal.push_back(compressor); } request.__set_compressors(compressorsOrdinal); - try { - TSStatus tsStatus; - client->createAlignedTimeseries(tsStatus, request); - RpcUtils::verifySuccess(tsStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); - } + defaultSessionConnection_->createAlignedTimeseries(request); } -bool Session::checkTimeseriesExists(const string &path) { +bool Session::checkTimeseriesExists(const string& path) { try { std::unique_ptr dataset = executeQueryStatement("SHOW TIMESERIES " + path); bool isExisted = dataset->hasNext(); dataset->closeOperationHandle(); return isExisted; } - catch (const exception &e) { + catch (const exception& e) { log_debug(e.what()); throw IoTDBException(e.what()); } } -int64_t Session::getSessionId() { - return sessionId; -} +shared_ptr Session::getQuerySessionConnection() { + auto endPoint = nodesSupplier_->getQueryEndPoint(); + if (!endPoint.is_initialized() || endPointToSessionConnection.empty()) { + return defaultSessionConnection_; + } -string Session::getTimeZone() { - if (!zoneId.empty()) { - return zoneId; + auto it = endPointToSessionConnection.find(endPoint.value()); + if (it != endPointToSessionConnection.end()) { + return it->second; } - TSGetTimeZoneResp resp; + + shared_ptr newConnection; try { - client->getTimeZone(resp, sessionId); - RpcUtils::verifySuccess(resp.status); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); + newConnection = make_shared(this, endPoint.value(), zoneId_, nodesSupplier_, + fetchSize_, 60, 500, sqlDialect_, database_); + endPointToSessionConnection.emplace(endPoint.value(), newConnection); + return newConnection; + } + catch (exception& e) { + log_debug("Session::getQuerySessionConnection() exception: " + e.what()); + return newConnection; + } +} + +shared_ptr Session::getSessionConnection(std::string deviceId) { + if (!enableRedirection_ || + deviceIdToEndpoint.find(deviceId) == deviceIdToEndpoint.end() || + endPointToSessionConnection.find(deviceIdToEndpoint[deviceId]) == endPointToSessionConnection.end()) { + return defaultSessionConnection_; + } + return endPointToSessionConnection.find(deviceIdToEndpoint[deviceId])->second; +} + +shared_ptr Session::getSessionConnection(std::shared_ptr deviceId) { + if (!enableRedirection_ || + tableModelDeviceIdToEndpoint.find(deviceId) == tableModelDeviceIdToEndpoint.end() || + endPointToSessionConnection.find(tableModelDeviceIdToEndpoint[deviceId]) == endPointToSessionConnection.end()) { + return defaultSessionConnection_; } - return resp.timeZone; + return endPointToSessionConnection.find(tableModelDeviceIdToEndpoint[deviceId])->second; +} + +string Session::getTimeZone() { + auto ret = defaultSessionConnection_->getTimeZone(); + return ret.timeZone; } -void Session::setTimeZone(const string &zoneId) { +void Session::setTimeZone(const string& zoneId) { TSSetTimeZoneReq req; - req.__set_sessionId(sessionId); + req.__set_sessionId(defaultSessionConnection_->sessionId); req.__set_timeZone(zoneId); - TSStatus tsStatus; - try { - client->setTimeZone(tsStatus, req); - RpcUtils::verifySuccess(tsStatus); - this->zoneId = zoneId; - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); + defaultSessionConnection_->setTimeZone(req); +} + +unique_ptr Session::executeQueryStatement(const string& sql) { + return executeQueryStatementMayRedirect(sql, QUERY_TIMEOUT_MS); +} + +unique_ptr Session::executeQueryStatement(const string& sql, int64_t timeoutInMs) { + return executeQueryStatementMayRedirect(sql, timeoutInMs); +} + +void Session::handleQueryRedirection(TEndPoint endPoint) { + if (!enableRedirection_) return; + shared_ptr newConnection; + auto it = endPointToSessionConnection.find(endPoint); + if (it != endPointToSessionConnection.end()) { + newConnection = it->second; } + else { + try { + newConnection = make_shared(this, endPoint, zoneId_, nodesSupplier_, + fetchSize_, 60, 500, sqlDialect_, database_); + + endPointToSessionConnection.emplace(endPoint, newConnection); + } + catch (exception& e) { + throw IoTDBConnectionException(e.what()); + } + } + defaultSessionConnection_ = newConnection; } -unique_ptr Session::executeQueryStatement(const string &sql) { - return executeQueryStatement(sql, QUERY_TIMEOUT_MS); +void Session::handleRedirection(const std::string& deviceId, TEndPoint endPoint) { + if (!enableRedirection_) return; + if (endPoint.ip == "127.0.0.1") return; + deviceIdToEndpoint[deviceId] = endPoint; + + shared_ptr newConnection; + auto it = endPointToSessionConnection.find(endPoint); + if (it != endPointToSessionConnection.end()) { + newConnection = it->second; + } + else { + try { + newConnection = make_shared(this, endPoint, zoneId_, nodesSupplier_, + fetchSize_, 60, 500, sqlDialect_, database_); + endPointToSessionConnection.emplace(endPoint, newConnection); + } + catch (exception& e) { + deviceIdToEndpoint.erase(deviceId); + throw IoTDBConnectionException(e.what()); + } + } } -unique_ptr Session::executeQueryStatement(const string &sql, int64_t timeoutInMs) { - TSExecuteStatementReq req; - req.__set_sessionId(sessionId); - req.__set_statementId(statementId); - req.__set_statement(sql); - req.__set_timeout(timeoutInMs); - req.__set_fetchSize(fetchSize); - TSExecuteStatementResp resp; - try { - client->executeStatement(resp, req); - RpcUtils::verifySuccess(resp.status); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - throw IoTDBException(e.what()); +void Session::handleRedirection(const std::shared_ptr& deviceId, TEndPoint endPoint) { + if (!enableRedirection_) return; + if (endPoint.ip == "127.0.0.1") return; + tableModelDeviceIdToEndpoint[deviceId] = endPoint; + + shared_ptr newConnection; + auto it = endPointToSessionConnection.find(endPoint); + if (it != endPointToSessionConnection.end()) { + newConnection = it->second; + } + else { + try { + newConnection = make_shared(this, endPoint, zoneId_, nodesSupplier_, + fetchSize_, 60, 500, sqlDialect_, database_); + endPointToSessionConnection.emplace(endPoint, newConnection); + } + catch (exception& e) { + tableModelDeviceIdToEndpoint.erase(deviceId); + throw IoTDBConnectionException(e.what()); + } } - shared_ptr queryDataSet(new TSQueryDataSet(resp.queryDataSet)); - return unique_ptr(new SessionDataSet( - sql, resp.columns, resp.dataTypeList, resp.columnNameIndexMap, resp.ignoreTimeStamp, resp.queryId, - statementId, client, sessionId, queryDataSet)); } -void Session::executeNonQueryStatement(const string &sql) { - TSExecuteStatementReq req; - req.__set_sessionId(sessionId); - req.__set_statementId(statementId); - req.__set_statement(sql); - req.__set_timeout(0); //0 means no timeout. This value keep consistent to JAVA SDK. - TSExecuteStatementResp resp; +std::unique_ptr Session::executeQueryStatementMayRedirect(const std::string& sql, int64_t timeoutInMs) { + auto sessionConnection = getQuerySessionConnection(); + if (!sessionConnection) { + log_warn("Session connection not found"); + return nullptr; + } try { - client->executeUpdateStatement(resp, req); - RpcUtils::verifySuccess(resp.status); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - throw IoTDBException(e.what()); + return sessionConnection->executeQueryStatement(sql, timeoutInMs); + } + catch (RedirectException& e) { + log_warn("Session connection redirect exception: " + e.what()); + handleQueryRedirection(e.endPoint); + try { + return defaultSessionConnection_->executeQueryStatement(sql, timeoutInMs); + } + catch (exception& e) { + log_error("Exception while executing redirected query statement: %s", e.what()); + throw ExecutionException(e.what()); + } + } catch (exception& e) { + log_error("Exception while executing query statement: %s", e.what()); + throw e; } } -unique_ptr Session::executeRawDataQuery(const vector &paths, int64_t startTime, int64_t endTime) { - TSRawDataQueryReq req; - req.__set_sessionId(sessionId); - req.__set_statementId(statementId); - req.__set_fetchSize(fetchSize); - req.__set_paths(paths); - req.__set_startTime(startTime); - req.__set_endTime(endTime); - TSExecuteStatementResp resp; +void Session::executeNonQueryStatement(const string& sql) { try { - client->executeRawDataQuery(resp, req); - RpcUtils::verifySuccess(resp.status); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { + defaultSessionConnection_->executeNonQueryStatement(sql); + } + catch (const exception& e) { throw IoTDBException(e.what()); } - shared_ptr queryDataSet(new TSQueryDataSet(resp.queryDataSet)); - return unique_ptr( - new SessionDataSet("", resp.columns, resp.dataTypeList, resp.columnNameIndexMap, resp.ignoreTimeStamp, - resp.queryId, statementId, client, sessionId, queryDataSet)); +} + +unique_ptr +Session::executeRawDataQuery(const vector& paths, int64_t startTime, int64_t endTime) { + return defaultSessionConnection_->executeRawDataQuery(paths, startTime, endTime); } -unique_ptr Session::executeLastDataQuery(const vector &paths) { +unique_ptr Session::executeLastDataQuery(const vector& paths) { return executeLastDataQuery(paths, LONG_LONG_MIN); } -unique_ptr Session::executeLastDataQuery(const vector &paths, int64_t lastTime) { - TSLastDataQueryReq req; - req.__set_sessionId(sessionId); - req.__set_statementId(statementId); - req.__set_fetchSize(fetchSize); - req.__set_paths(paths); - req.__set_time(lastTime); - TSExecuteStatementResp resp; - try { - client->executeLastDataQuery(resp, req); - RpcUtils::verifySuccess(resp.status); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - throw IoTDBException(e.what()); - } - shared_ptr queryDataSet(new TSQueryDataSet(resp.queryDataSet)); - return unique_ptr( - new SessionDataSet("", resp.columns, resp.dataTypeList, resp.columnNameIndexMap, resp.ignoreTimeStamp, - resp.queryId, statementId, client, sessionId, queryDataSet)); +unique_ptr Session::executeLastDataQuery(const vector& paths, int64_t lastTime) { + return defaultSessionConnection_->executeLastDataQuery(paths, lastTime); } -void Session::createSchemaTemplate(const Template &templ) { +void Session::createSchemaTemplate(const Template& templ) { TSCreateSchemaTemplateReq req; - req.__set_sessionId(sessionId); req.__set_name(templ.getName()); req.__set_serializedTemplate(templ.serialize()); - TSStatus tsStatus; - try { - client->createSchemaTemplate(tsStatus, req); - RpcUtils::verifySuccess(tsStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); - } + defaultSessionConnection_->createSchemaTemplate(req); } -void Session::setSchemaTemplate(const string &template_name, const string &prefix_path) { +void Session::setSchemaTemplate(const string& template_name, const string& prefix_path) { TSSetSchemaTemplateReq req; - req.__set_sessionId(sessionId); req.__set_templateName(template_name); req.__set_prefixPath(prefix_path); - TSStatus tsStatus; - try { - client->setSchemaTemplate(tsStatus, req); - RpcUtils::verifySuccess(tsStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); - } + defaultSessionConnection_->setSchemaTemplate(req); } -void Session::unsetSchemaTemplate(const string &prefix_path, const string &template_name) { +void Session::unsetSchemaTemplate(const string& prefix_path, const string& template_name) { TSUnsetSchemaTemplateReq req; - req.__set_sessionId(sessionId); req.__set_templateName(template_name); req.__set_prefixPath(prefix_path); - TSStatus tsStatus; - try { - client->unsetSchemaTemplate(tsStatus, req); - RpcUtils::verifySuccess(tsStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); - } + defaultSessionConnection_->unsetSchemaTemplate(req); } -void Session::addAlignedMeasurementsInTemplate(const string &template_name, const vector &measurements, - const vector &dataTypes, - const vector &encodings, - const vector &compressors) { +void Session::addAlignedMeasurementsInTemplate(const string& template_name, const vector& measurements, + const vector& dataTypes, + const vector& encodings, + const vector& compressors) { TSAppendSchemaTemplateReq req; - req.__set_sessionId(sessionId); req.__set_name(template_name); req.__set_measurements(measurements); req.__set_isAligned(true); vector dataTypesOrdinal; dataTypesOrdinal.reserve(dataTypes.size()); - for (TSDataType::TSDataType dataType: dataTypes) { + for (TSDataType::TSDataType dataType : dataTypes) { dataTypesOrdinal.push_back(dataType); } req.__set_dataTypes(dataTypesOrdinal); vector encodingsOrdinal; encodingsOrdinal.reserve(encodings.size()); - for (TSEncoding::TSEncoding encoding: encodings) { + for (TSEncoding::TSEncoding encoding : encodings) { encodingsOrdinal.push_back(encoding); } req.__set_encodings(encodingsOrdinal); vector compressorsOrdinal; compressorsOrdinal.reserve(compressors.size()); - for (CompressionType::CompressionType compressor: compressors) { + for (CompressionType::CompressionType compressor : compressors) { compressorsOrdinal.push_back(compressor); } req.__set_compressors(compressorsOrdinal); - TSStatus tsStatus; - try { - client->appendSchemaTemplate(tsStatus, req); - RpcUtils::verifySuccess(tsStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); - } + defaultSessionConnection_->appendSchemaTemplate(req); } -void Session::addAlignedMeasurementsInTemplate(const string &template_name, const string &measurement, +void Session::addAlignedMeasurementsInTemplate(const string& template_name, const string& measurement, TSDataType::TSDataType dataType, TSEncoding::TSEncoding encoding, CompressionType::CompressionType compressor) { vector measurements(1, measurement); @@ -2108,54 +1936,40 @@ void Session::addAlignedMeasurementsInTemplate(const string &template_name, cons addAlignedMeasurementsInTemplate(template_name, measurements, dataTypes, encodings, compressors); } -void Session::addUnalignedMeasurementsInTemplate(const string &template_name, const vector &measurements, - const vector &dataTypes, - const vector &encodings, - const vector &compressors) { +void Session::addUnalignedMeasurementsInTemplate(const string& template_name, const vector& measurements, + const vector& dataTypes, + const vector& encodings, + const vector& compressors) { TSAppendSchemaTemplateReq req; - req.__set_sessionId(sessionId); req.__set_name(template_name); req.__set_measurements(measurements); req.__set_isAligned(false); vector dataTypesOrdinal; dataTypesOrdinal.reserve(dataTypes.size()); - for (TSDataType::TSDataType dataType: dataTypes) { + for (TSDataType::TSDataType dataType : dataTypes) { dataTypesOrdinal.push_back(dataType); } req.__set_dataTypes(dataTypesOrdinal); vector encodingsOrdinal; encodingsOrdinal.reserve(encodings.size()); - for (TSEncoding::TSEncoding encoding: encodings) { + for (TSEncoding::TSEncoding encoding : encodings) { encodingsOrdinal.push_back(encoding); } req.__set_encodings(encodingsOrdinal); vector compressorsOrdinal; compressorsOrdinal.reserve(compressors.size()); - for (CompressionType::CompressionType compressor: compressors) { + for (CompressionType::CompressionType compressor : compressors) { compressorsOrdinal.push_back(compressor); } req.__set_compressors(compressorsOrdinal); - TSStatus tsStatus; - try { - client->appendSchemaTemplate(tsStatus, req); - RpcUtils::verifySuccess(tsStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); - } + defaultSessionConnection_->appendSchemaTemplate(req); } -void Session::addUnalignedMeasurementsInTemplate(const string &template_name, const string &measurement, +void Session::addUnalignedMeasurementsInTemplate(const string& template_name, const string& measurement, TSDataType::TSDataType dataType, TSEncoding::TSEncoding encoding, CompressionType::CompressionType compressor) { vector measurements(1, measurement); @@ -2165,130 +1979,67 @@ void Session::addUnalignedMeasurementsInTemplate(const string &template_name, co addUnalignedMeasurementsInTemplate(template_name, measurements, dataTypes, encodings, compressors); } -void Session::deleteNodeInTemplate(const string &template_name, const string &path) { +void Session::deleteNodeInTemplate(const string& template_name, const string& path) { TSPruneSchemaTemplateReq req; - req.__set_sessionId(sessionId); req.__set_name(template_name); req.__set_path(path); - TSStatus tsStatus; - try { - client->pruneSchemaTemplate(tsStatus, req); - RpcUtils::verifySuccess(tsStatus); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const IoTDBException &e) { - log_debug(e.what()); - throw; - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); - } + defaultSessionConnection_->pruneSchemaTemplate(req); } -int Session::countMeasurementsInTemplate(const string &template_name) { +int Session::countMeasurementsInTemplate(const string& template_name) { TSQueryTemplateReq req; - req.__set_sessionId(sessionId); req.__set_name(template_name); req.__set_queryType(TemplateQueryType::COUNT_MEASUREMENTS); - TSQueryTemplateResp resp; - try { - client->querySchemaTemplate(resp, req); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); - } + TSQueryTemplateResp resp = defaultSessionConnection_->querySchemaTemplate(req); return resp.count; } -bool Session::isMeasurementInTemplate(const string &template_name, const string &path) { +bool Session::isMeasurementInTemplate(const string& template_name, const string& path) { TSQueryTemplateReq req; - req.__set_sessionId(sessionId); req.__set_name(template_name); req.__set_measurement(path); req.__set_queryType(TemplateQueryType::IS_MEASUREMENT); - TSQueryTemplateResp resp; - try { - client->querySchemaTemplate(resp, req); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); - } + TSQueryTemplateResp resp = defaultSessionConnection_->querySchemaTemplate(req); return resp.result; } -bool Session::isPathExistInTemplate(const string &template_name, const string &path) { +bool Session::isPathExistInTemplate(const string& template_name, const string& path) { TSQueryTemplateReq req; - req.__set_sessionId(sessionId); req.__set_name(template_name); req.__set_measurement(path); req.__set_queryType(TemplateQueryType::PATH_EXIST); - TSQueryTemplateResp resp; - try { - client->querySchemaTemplate(resp, req); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); - } + TSQueryTemplateResp resp = defaultSessionConnection_->querySchemaTemplate(req); return resp.result; } -std::vector Session::showMeasurementsInTemplate(const string &template_name) { +std::vector Session::showMeasurementsInTemplate(const string& template_name) { TSQueryTemplateReq req; - req.__set_sessionId(sessionId); req.__set_name(template_name); req.__set_measurement(""); req.__set_queryType(TemplateQueryType::SHOW_MEASUREMENTS); - TSQueryTemplateResp resp; - try { - client->querySchemaTemplate(resp, req); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); - } + TSQueryTemplateResp resp = defaultSessionConnection_->querySchemaTemplate(req); return resp.measurements; } -std::vector Session::showMeasurementsInTemplate(const string &template_name, const string &pattern) { +std::vector Session::showMeasurementsInTemplate(const string& template_name, const string& pattern) { TSQueryTemplateReq req; - req.__set_sessionId(sessionId); req.__set_name(template_name); req.__set_measurement(pattern); req.__set_queryType(TemplateQueryType::SHOW_MEASUREMENTS); - TSQueryTemplateResp resp; - try { - client->querySchemaTemplate(resp, req); - } catch (const TTransportException &e) { - log_debug(e.what()); - throw IoTDBConnectionException(e.what()); - } catch (const exception &e) { - log_debug(e.what()); - throw IoTDBException(e.what()); - } + TSQueryTemplateResp resp = defaultSessionConnection_->querySchemaTemplate(req); return resp.measurements; } bool Session::checkTemplateExists(const string& template_name) { try { - std::unique_ptr dataset = executeQueryStatement("SHOW NODES IN DEVICE TEMPLATE " + template_name); + std::unique_ptr dataset = executeQueryStatement( + "SHOW NODES IN DEVICE TEMPLATE " + template_name); bool isExisted = dataset->hasNext(); dataset->closeOperationHandle(); return isExisted; } - catch (const exception &e) { - if ( strstr(e.what(), "get template info error") != NULL ) { + catch (const exception& e) { + if (strstr(e.what(), "does not exist") != NULL) { return false; } log_debug(e.what()); diff --git a/iotdb-client/client-cpp/src/main/Session.h b/iotdb-client/client-cpp/src/main/Session.h index 56418ebcec2d3..a1176e42bbe2b 100644 --- a/iotdb-client/client-cpp/src/main/Session.h +++ b/iotdb-client/client-cpp/src/main/Session.h @@ -28,19 +28,23 @@ #include #include #include -#include -#include -#include #include -#include #include #include +#include +#include #include #include #include #include #include #include "IClientRPCService.h" +#include "NodesSupplier.h" +#include "AbstractSessionBuilder.h" +#include "SessionConnection.h" +#include "SessionDataSet.h" +#include "DeviceID.h" +#include "Common.h" //== For compatible with Windows OS == #ifndef LONG_LONG_MIN @@ -59,482 +63,51 @@ using ::apache::thrift::transport::TFramedTransport; using ::apache::thrift::TException; -enum LogLevelType { - LEVEL_DEBUG = 0, - LEVEL_INFO, - LEVEL_WARN, - LEVEL_ERROR -}; -extern LogLevelType LOG_LEVEL; - -#define log_debug(fmt,...) do {if(LOG_LEVEL <= LEVEL_DEBUG) {string s=string("[DEBUG] %s:%d (%s) - ") + fmt + "\n"; printf(s.c_str(), __FILE__, __LINE__, __FUNCTION__, ##__VA_ARGS__);}} while(0) -#define log_info(fmt,...) do {if(LOG_LEVEL <= LEVEL_INFO) {string s=string("[INFO] %s:%d (%s) - ") + fmt + "\n"; printf(s.c_str(), __FILE__, __LINE__, __FUNCTION__, ##__VA_ARGS__);}} while(0) -#define log_warn(fmt,...) do {if(LOG_LEVEL <= LEVEL_WARN) {string s=string("[WARN] %s:%d (%s) - ") + fmt + "\n"; printf(s.c_str(), __FILE__, __LINE__, __FUNCTION__, ##__VA_ARGS__);}} while(0) -#define log_error(fmt,...) do {if(LOG_LEVEL <= LEVEL_ERROR) {string s=string("[ERROR] %s:%d (%s) - ") + fmt + "\n"; printf(s.c_str(), __FILE__, __LINE__, __FUNCTION__, ##__VA_ARGS__);}} while(0) - - -class IoTDBException : public std::exception { -public: - IoTDBException() {} - - explicit IoTDBException(const std::string &m) : message(m) {} - - explicit IoTDBException(const char *m) : message(m) {} - - virtual const char *what() const noexcept override { - return message.c_str(); - } -private: - std::string message; -}; - -class IoTDBConnectionException : public IoTDBException { -public: - IoTDBConnectionException() {} - - explicit IoTDBConnectionException(const char *m) : IoTDBException(m) {} - - explicit IoTDBConnectionException(const std::string &m) : IoTDBException(m) {} -}; - -class ExecutionException : public IoTDBException { -public: - ExecutionException() {} - - explicit ExecutionException(const char *m) : IoTDBException(m) {} - - explicit ExecutionException(const std::string &m) : IoTDBException(m) {} - - explicit ExecutionException(const std::string &m, const TSStatus &tsStatus) : IoTDBException(m), status(tsStatus) {} - - TSStatus status; -}; - -class BatchExecutionException : public IoTDBException { -public: - BatchExecutionException() {} - - explicit BatchExecutionException(const char *m) : IoTDBException(m) {} - - explicit BatchExecutionException(const std::string &m) : IoTDBException(m) {} - - explicit BatchExecutionException(const std::vector &statusList) : statusList(statusList) {} - - BatchExecutionException(const std::string &m, const std::vector &statusList) : IoTDBException(m), statusList(statusList) {} - - std::vector statusList; -}; - -class UnSupportedDataTypeException : public IoTDBException { -public: - UnSupportedDataTypeException() {} - - explicit UnSupportedDataTypeException(const char *m) : IoTDBException(m) {} - - explicit UnSupportedDataTypeException(const std::string &m) : IoTDBException("UnSupported dataType: " + m) {} -}; - -namespace Version { - enum Version { - V_0_12, V_0_13, V_1_0 - }; -} - -namespace CompressionType { - enum CompressionType { - UNCOMPRESSED = (char) 0, - SNAPPY = (char) 1, - GZIP = (char) 2, - LZO = (char) 3, - SDT = (char) 4, - PAA = (char) 5, - PLA = (char) 6, - LZ4 = (char) 7, - ZSTD = (char) 8, - LZMA2 = (char) 9, - }; -} - -namespace TSDataType { - enum TSDataType { - BOOLEAN = (char) 0, - INT32 = (char) 1, - INT64 = (char) 2, - FLOAT = (char) 3, - DOUBLE = (char) 4, - TEXT = (char) 5, - VECTOR = (char) 6, - NULLTYPE = (char) 7 - }; -} - -namespace TSEncoding { - enum TSEncoding { - PLAIN = (char) 0, - DICTIONARY = (char) 1, - RLE = (char) 2, - DIFF = (char) 3, - TS_2DIFF = (char) 4, - BITMAP = (char) 5, - GORILLA_V1 = (char) 6, - REGULAR = (char) 7, - GORILLA = (char) 8, - ZIGZAG = (char) 9, - CHIMP = (char) 11, - SPRINTZ = (char) 12, - RLBE = (char) 13 - }; -} - -namespace TSStatusCode { - enum TSStatusCode { - SUCCESS_STATUS = 200, - - // System level - INCOMPATIBLE_VERSION = 201, - CONFIGURATION_ERROR = 202, - START_UP_ERROR = 203, - SHUT_DOWN_ERROR = 204, - - // General Error - UNSUPPORTED_OPERATION = 300, - EXECUTE_STATEMENT_ERROR = 301, - MULTIPLE_ERROR = 302, - ILLEGAL_PARAMETER = 303, - OVERLAP_WITH_EXISTING_TASK = 304, - INTERNAL_SERVER_ERROR = 305, - - // Client, - REDIRECTION_RECOMMEND = 400, - - // Schema Engine - DATABASE_NOT_EXIST = 500, - DATABASE_ALREADY_EXISTS = 501, - SERIES_OVERFLOW = 502, - TIMESERIES_ALREADY_EXIST = 503, - TIMESERIES_IN_BLACK_LIST = 504, - ALIAS_ALREADY_EXIST = 505, - PATH_ALREADY_EXIST = 506, - METADATA_ERROR = 507, - PATH_NOT_EXIST = 508, - ILLEGAL_PATH = 509, - CREATE_TEMPLATE_ERROR = 510, - DUPLICATED_TEMPLATE = 511, - UNDEFINED_TEMPLATE = 512, - TEMPLATE_NOT_SET = 513, - DIFFERENT_TEMPLATE = 514, - TEMPLATE_IS_IN_USE = 515, - TEMPLATE_INCOMPATIBLE = 516, - SEGMENT_NOT_FOUND = 517, - PAGE_OUT_OF_SPACE = 518, - RECORD_DUPLICATED=519, - SEGMENT_OUT_OF_SPACE = 520, - PBTREE_FILE_NOT_EXISTS = 521, - OVERSIZE_RECORD = 522, - PBTREE_FILE_REDO_LOG_BROKEN = 523, - TEMPLATE_NOT_ACTIVATED = 524, - - // Storage Engine - SYSTEM_READ_ONLY = 600, - STORAGE_ENGINE_ERROR = 601, - STORAGE_ENGINE_NOT_READY = 602, - }; -} - -class RpcUtils { -public: - std::shared_ptr SUCCESS_STATUS; - - RpcUtils() { - SUCCESS_STATUS = std::make_shared(); - SUCCESS_STATUS->__set_code(TSStatusCode::SUCCESS_STATUS); - } - - static void verifySuccess(const TSStatus &status); - - static void verifySuccess(const std::vector &statuses); - - static TSStatus getStatus(TSStatusCode::TSStatusCode tsStatusCode); - - static TSStatus getStatus(int code, const std::string &message); - - static std::shared_ptr getTSExecuteStatementResp(TSStatusCode::TSStatusCode tsStatusCode); - - static std::shared_ptr - getTSExecuteStatementResp(TSStatusCode::TSStatusCode tsStatusCode, const std::string &message); - - static std::shared_ptr getTSExecuteStatementResp(const TSStatus &status); - - static std::shared_ptr getTSFetchResultsResp(TSStatusCode::TSStatusCode tsStatusCode); - - static std::shared_ptr - getTSFetchResultsResp(TSStatusCode::TSStatusCode tsStatusCode, const std::string &appendMessage); - - static std::shared_ptr getTSFetchResultsResp(const TSStatus &status); -}; - -// Simulate the ByteBuffer class in Java -class MyStringBuffer { -public: - MyStringBuffer() : pos(0) { - checkBigEndian(); - } - - explicit MyStringBuffer(const std::string& str) : str(str), pos(0) { - checkBigEndian(); - } - - void reserve(size_t n) { - str.reserve(n); - } - - void clear() { - str.clear(); - pos = 0; - } - - bool hasRemaining() { - return pos < str.size(); - } - - int getInt() { - return *(int *) getOrderedByte(4); - } - - int64_t getInt64() { -#ifdef ARCH32 - const char *buf_addr = getOrderedByte(8); - if (reinterpret_cast(buf_addr) % 4 == 0) { - return *(int64_t *)buf_addr; - } else { - char tmp_buf[8]; - memcpy(tmp_buf, buf_addr, 8); - return *(int64_t*)tmp_buf; - } -#else - return *(int64_t *) getOrderedByte(8); -#endif - } - - float getFloat() { - return *(float *) getOrderedByte(4); - } - - double getDouble() { -#ifdef ARCH32 - const char *buf_addr = getOrderedByte(8); - if (reinterpret_cast(buf_addr) % 4 == 0) { - return *(double*)buf_addr; - } else { - char tmp_buf[8]; - memcpy(tmp_buf, buf_addr, 8); - return *(double*)tmp_buf; - } -#else - return *(double *) getOrderedByte(8); -#endif - } - - char getChar() { - return str[pos++]; - } - - bool getBool() { - return getChar() == 1; - } - - std::string getString() { - size_t len = getInt(); - size_t tmpPos = pos; - pos += len; - return str.substr(tmpPos, len); - } - - void putInt(int ins) { - putOrderedByte((char *) &ins, 4); - } - - void putInt64(int64_t ins) { - putOrderedByte((char *) &ins, 8); - } - - void putFloat(float ins) { - putOrderedByte((char *) &ins, 4); - } - - void putDouble(double ins) { - putOrderedByte((char *) &ins, 8); - } - - void putChar(char ins) { - str += ins; - } - - void putBool(bool ins) { - char tmp = ins ? 1 : 0; - str += tmp; - } - - void putString(const std::string &ins) { - putInt((int)(ins.size())); - str += ins; - } - - void concat(const std::string &ins) { - str.append(ins); - } - -public: - std::string str; - size_t pos; - -private: - void checkBigEndian() { - static int chk = 0x0201; //used to distinguish CPU's type (BigEndian or LittleEndian) - isBigEndian = (0x01 != *(char *) (&chk)); - } - - const char *getOrderedByte(size_t len) { - const char *p = nullptr; - if (isBigEndian) { - p = str.c_str() + pos; - } else { - const char *tmp = str.c_str(); - for (size_t i = pos; i < pos + len; i++) { - numericBuf[pos + len - 1 - i] = tmp[i]; - } - p = numericBuf; - } - pos += len; - return p; - } - - void putOrderedByte(char *buf, int len) { - if (isBigEndian) { - str.assign(buf, len); - } else { - for (int i = len - 1; i > -1; i--) { - str += buf[i]; - } - } - } - -private: - bool isBigEndian{}; - char numericBuf[8]{}; //only be used by int, long, float, double etc. -}; - -class BitMap { -public: - /** Initialize a BitMap with given size. */ - explicit BitMap(size_t size = 0) { - resize(size); +template +void safe_cast(const T& value, Target& target) { + /* + Target Allowed Source Types + BOOLEAN BOOLEAN + INT32 INT32 + INT64 INT32 INT64 + FLOAT INT32 FLOAT + DOUBLE INT32 INT64 FLOAT DOUBLE + TEXT TEXT + */ + if (std::is_same::value) { + target = *(Target*)&value; } - - /** change the size */ - void resize(size_t size) { - this->size = size; - this->bits.resize((size >> 3) + 1); // equal to "size/8 + 1" - reset(); + else if (std::is_same::value && std::is_array::value && std::is_same< + char, typename std::remove_extent::type>::value) { + string tmp((const char*)&value); + target = *(Target*)&tmp; } - - /** mark as 1 at the given bit position. */ - bool mark(size_t position) { - if (position >= size) - return false; - - bits[position >> 3] |= (char) 1 << (position % 8); - return true; + else if (std::is_same::value && std::is_same::value) { + int64_t tmp = *(int32_t*)&value; + target = *(Target*)&tmp; } - - /** mark as 0 at the given bit position. */ - bool unmark(size_t position) { - if (position >= size) - return false; - - bits[position >> 3] &= ~((char) 1 << (position % 8)); - return true; + else if (std::is_same::value && std::is_same::value) { + float tmp = *(int32_t*)&value; + target = *(Target*)&tmp; } - - /** mark as 1 at all positions. */ - void markAll() { - std::fill(bits.begin(), bits.end(), (char) 0XFF); + else if (std::is_same::value && std::is_same::value) { + double tmp = *(int32_t*)&value; + target = *(Target*)&tmp; } - - /** mark as 0 at all positions. */ - void reset() { - std::fill(bits.begin(), bits.end(), (char) 0); + else if (std::is_same::value && std::is_same::value) { + double tmp = *(int64_t*)&value; + target = *(Target*)&tmp; } - - /** returns the value of the bit with the specified index. */ - bool isMarked(size_t position) const { - if (position >= size) - return false; - - return (bits[position >> 3] & ((char) 1 << (position % 8))) != 0; + else if (std::is_same::value && std::is_same::value) { + double tmp = *(float*)&value; + target = *(Target*)&tmp; } - - /** whether all bits are zero, i.e., no Null value */ - bool isAllUnmarked() const { - size_t j; - for (j = 0; j < size >> 3; j++) { - if (bits[j] != (char) 0) { - return false; - } - } - for (j = 0; j < size % 8; j++) { - if ((bits[size >> 3] & ((char) 1 << j)) != 0) { - return false; - } - } - return true; + else { + throw UnSupportedDataTypeException("Error: Parameter type " + + std::string(typeid(T).name()) + " cannot be converted to DataType" + + std::string(typeid(Target).name())); } - - /** whether all bits are one, i.e., all are Null */ - bool isAllMarked() const { - size_t j; - for (j = 0; j < size >> 3; j++) { - if (bits[j] != (char) 0XFF) { - return false; - } - } - for (j = 0; j < size % 8; j++) { - if ((bits[size >> 3] & ((char) 1 << j)) == 0) { - return false; - } - } - return true; - } - - const std::vector& getByteArray() const { - return this->bits; - } - - size_t getSize() const { - return this->size; - } - -private: - size_t size; - std::vector bits; -}; - -class Field { -public: - TSDataType::TSDataType dataType; - bool boolV; - int intV; - int64_t longV; - float floatV; - double doubleV; - std::string stringV; - - explicit Field(TSDataType::TSDataType a) { - dataType = a; - } - - Field() = default; -}; +} /* * A tablet data of one device, the tablet contains multiple measurements of this device that share @@ -559,13 +132,17 @@ class Tablet { public: std::string deviceId; // deviceId of this tablet - std::vector> schemas; // the list of measurement schemas for creating the tablet - std::vector timestamps; // timestamps in this tablet + std::vector> schemas; + // the list of measurement schemas for creating the tablet + std::map schemaNameIndex; // the map of schema name to index + std::vector columnTypes; // the list of column types (used in table model) + std::vector timestamps; // timestamps in this tablet std::vector values; // each object is a primitive type array, which represents values of one measurement std::vector bitMaps; // each bitmap represents the existence of each value in the current column - size_t rowSize; //the number of rows to include in this tablet - size_t maxRowNumber; // the maximum number of rows for this tablet - bool isAligned; // whether this tablet store data of aligned timeseries or not + size_t rowSize; //the number of rows to include in this tablet + size_t maxRowNumber; // the maximum number of rows for this tablet + bool isAligned; // whether this tablet store data of aligned timeseries or not + std::vector idColumnIndexes; Tablet() = default; @@ -576,9 +153,16 @@ class Tablet { * @param deviceId the name of the device specified to be written in * @param timeseries the list of measurement schemas for creating the tablet */ - Tablet(const std::string &deviceId, - const std::vector> ×eries) - : Tablet(deviceId, timeseries, DEFAULT_ROW_SIZE) {} + Tablet(const std::string& deviceId, + const std::vector>& timeseries) + : Tablet(deviceId, timeseries, DEFAULT_ROW_SIZE) { + } + + Tablet(const std::string& deviceId, + const std::vector>& timeseries, + const std::vector& columnTypes) + : Tablet(deviceId, timeseries, columnTypes, DEFAULT_ROW_SIZE) { + } /** * Return a tablet with the specified number of rows (maxBatchSize). Only @@ -588,242 +172,234 @@ class Tablet { * @param deviceId the name of the device specified to be written in * @param schemas the list of measurement schemas for creating the row * batch + * @param columnTypes the list of column types (used in table model) * @param maxRowNumber the maximum number of rows for this tablet */ - Tablet(const std::string &deviceId, const std::vector> &schemas, + Tablet(const std::string& deviceId, + const std::vector>& schemas, + int maxRowNumber) + : Tablet(deviceId, schemas, std::vector(schemas.size(), ColumnCategory::FIELD), maxRowNumber) { + } + + Tablet(const std::string& deviceId, const std::vector>& schemas, + const std::vector columnTypes, size_t maxRowNumber, bool _isAligned = false) : deviceId(deviceId), schemas(schemas), - maxRowNumber(maxRowNumber), isAligned(_isAligned) { + columnTypes(columnTypes), + maxRowNumber(maxRowNumber), isAligned(_isAligned) { // create timestamp column timestamps.resize(maxRowNumber); // create value columns values.resize(schemas.size()); createColumns(); + // init idColumnIndexs + for (size_t i = 0; i < this->columnTypes.size(); i++) { + if (this->columnTypes[i] == ColumnCategory::TAG) { + idColumnIndexes.push_back(i); + } + } // create bitMaps bitMaps.resize(schemas.size()); for (size_t i = 0; i < schemas.size(); i++) { bitMaps[i].resize(maxRowNumber); } + // create schemaNameIndex + for (size_t i = 0; i < schemas.size(); i++) { + schemaNameIndex[schemas[i].first] = i; + } this->rowSize = 0; } + Tablet(const Tablet& other) + : deviceId(other.deviceId), + schemas(other.schemas), + schemaNameIndex(other.schemaNameIndex), + columnTypes(other.columnTypes), + timestamps(other.timestamps), + maxRowNumber(other.maxRowNumber), + bitMaps(other.bitMaps), + rowSize(other.rowSize), + isAligned(other.isAligned), + idColumnIndexes(other.idColumnIndexes) { + values.resize(other.values.size()); + for (size_t i = 0; i < other.values.size(); ++i) { + if (!other.values[i]) continue; + TSDataType::TSDataType type = schemas[i].second; + deepCopyTabletColValue(&(other.values[i]), &values[i], type, maxRowNumber); + } + } + + Tablet& operator=(const Tablet& other) { + if (this != &other) { + deleteColumns(); + deviceId = other.deviceId; + schemas = other.schemas; + schemaNameIndex = other.schemaNameIndex; + columnTypes = other.columnTypes; + timestamps = other.timestamps; + maxRowNumber = other.maxRowNumber; + rowSize = other.rowSize; + isAligned = other.isAligned; + idColumnIndexes = other.idColumnIndexes; + bitMaps = other.bitMaps; + values.resize(other.values.size()); + for (size_t i = 0; i < other.values.size(); ++i) { + if (!other.values[i]) continue; + TSDataType::TSDataType type = schemas[i].second; + deepCopyTabletColValue(&(other.values[i]), &values[i], type, maxRowNumber); + } + } + return *this; + } + ~Tablet() { try { deleteColumns(); - } catch (exception &e) { + } + catch (exception& e) { log_debug(string("Tablet::~Tablet(), ") + e.what()); } } - void addValue(size_t schemaId, size_t rowIndex, void *value); - - void reset(); // Reset Tablet to the default state - set the rowSize to 0 - - size_t getTimeBytesSize(); - - size_t getValueByteSize(); // total byte size that values occupies - - void setAligned(bool isAligned); -}; - -class SessionUtils { -public: - static std::string getTime(const Tablet &tablet); - - static std::string getValue(const Tablet &tablet); -}; - -class RowRecord { -public: - int64_t timestamp; - std::vector fields; - - explicit RowRecord(int64_t timestamp) { - this->timestamp = timestamp; - } - - RowRecord(int64_t timestamp, const std::vector &fields) - : timestamp(timestamp), fields(fields) { + void addTimestamp(size_t rowIndex, int64_t timestamp) { + timestamps[rowIndex] = timestamp; + rowSize = max(rowSize, rowIndex + 1); } - explicit RowRecord(const std::vector &fields) - : timestamp(-1), fields(fields) { - } + static void deepCopyTabletColValue(void* const* srcPtr, void** destPtr, + TSDataType::TSDataType type, int maxRowNumber); - RowRecord() { - this->timestamp = -1; - } + template + void addValue(size_t schemaId, size_t rowIndex, const T& value) { + if (schemaId >= schemas.size()) { + char tmpStr[100]; + sprintf(tmpStr, "Tablet::addValue(), schemaId >= schemas.size(). schemaId=%ld, schemas.size()=%ld.", + schemaId, schemas.size()); + throw std::out_of_range(tmpStr); + } - void addField(const Field &f) { - this->fields.push_back(f); - } + if (rowIndex >= rowSize) { + char tmpStr[100]; + sprintf(tmpStr, "Tablet::addValue(), rowIndex >= rowSize. rowIndex=%ld, rowSize.size()=%ld.", rowIndex, + rowSize); + throw std::out_of_range(tmpStr); + } - std::string toString() { - std::string ret; - if (this->timestamp != -1) { - ret.append(std::to_string(timestamp)); - ret.append("\t"); + TSDataType::TSDataType dataType = schemas[schemaId].second; + switch (dataType) { + case TSDataType::BOOLEAN: { + safe_cast(value, ((bool*)values[schemaId])[rowIndex]); + break; } - for (size_t i = 0; i < fields.size(); i++) { - if (i != 0) { - ret.append("\t"); - } - TSDataType::TSDataType dataType = fields[i].dataType; - switch (dataType) { - case TSDataType::BOOLEAN: - ret.append(fields[i].boolV ? "true" : "false"); - break; - case TSDataType::INT32: - ret.append(std::to_string(fields[i].intV)); - break; - case TSDataType::INT64: - ret.append(std::to_string(fields[i].longV)); - break; - case TSDataType::FLOAT: - ret.append(std::to_string(fields[i].floatV)); - break; - case TSDataType::DOUBLE: - ret.append(std::to_string(fields[i].doubleV)); - break; - case TSDataType::TEXT: - ret.append(fields[i].stringV); - break; - case TSDataType::NULLTYPE: - ret.append("NULL"); - break; - default: - break; - } + case TSDataType::INT32: { + safe_cast(value, ((int*)values[schemaId])[rowIndex]); + break; } - ret.append("\n"); - return ret; - } -}; - -class SessionDataSet { -private: - const string TIMESTAMP_STR = "Time"; - bool hasCachedRecord = false; - std::string sql; - int64_t queryId; - int64_t statementId; - int64_t sessionId; - std::shared_ptr client; - int fetchSize = 1024; - std::vector columnNameList; - std::vector columnTypeList; - // duplicated column index -> origin index - std::unordered_map duplicateLocation; - // column name -> column location - std::unordered_map columnMap; - // column size - int columnSize = 0; - int columnFieldStartIndex = 0; //Except Timestamp column, 1st field's pos in columnNameList - bool isIgnoreTimeStamp = false; - - int rowsIndex = 0; // used to record the row index in current TSQueryDataSet - std::shared_ptr tsQueryDataSet; - MyStringBuffer tsQueryDataSetTimeBuffer; - std::vector> valueBuffers; - std::vector> bitmapBuffers; - RowRecord rowRecord; - char *currentBitmap = nullptr; // used to cache the current bitmap for every column - static const int flag = 0x80; // used to do `or` operation with bitmap to judge whether the value is null - - bool operationIsOpen = false; - -public: - SessionDataSet(const std::string &sql, - const std::vector &columnNameList, - const std::vector &columnTypeList, - std::map &columnNameIndexMap, - bool isIgnoreTimeStamp, - int64_t queryId, int64_t statementId, - std::shared_ptr client, int64_t sessionId, - const std::shared_ptr &queryDataSet) : tsQueryDataSetTimeBuffer(queryDataSet->time) { - this->sessionId = sessionId; - this->sql = sql; - this->queryId = queryId; - this->statementId = statementId; - this->client = client; - this->currentBitmap = new char[columnNameList.size()]; - this->isIgnoreTimeStamp = isIgnoreTimeStamp; - if (!isIgnoreTimeStamp) { - columnFieldStartIndex = 1; - this->columnNameList.push_back(TIMESTAMP_STR); - this->columnTypeList.push_back("INT64"); + case TSDataType::DATE: { + safe_cast(value, ((boost::gregorian::date*)values[schemaId])[rowIndex]); + break; } - this->columnNameList.insert(this->columnNameList.end(), columnNameList.begin(), columnNameList.end()); - this->columnTypeList.insert(this->columnTypeList.end(), columnTypeList.begin(), columnTypeList.end()); - - valueBuffers.reserve(queryDataSet->valueList.size()); - bitmapBuffers.reserve(queryDataSet->bitmapList.size()); - int deduplicateIdx = 0; - std::unordered_map columnToFirstIndexMap; - for (size_t i = columnFieldStartIndex; i < this->columnNameList.size(); i++) { - std::string name = this->columnNameList[i]; - if (this->columnMap.find(name) != this->columnMap.end()) { - duplicateLocation[i] = columnToFirstIndexMap[name]; - } else { - columnToFirstIndexMap[name] = i; - if (!columnNameIndexMap.empty()) { - int valueIndex = columnNameIndexMap[name]; - this->columnMap[name] = valueIndex; - this->valueBuffers.emplace_back(new MyStringBuffer(queryDataSet->valueList[valueIndex])); - this->bitmapBuffers.emplace_back(new MyStringBuffer(queryDataSet->bitmapList[valueIndex])); - } else { - this->columnMap[name] = deduplicateIdx; - this->valueBuffers.emplace_back(new MyStringBuffer(queryDataSet->valueList[deduplicateIdx])); - this->bitmapBuffers.emplace_back(new MyStringBuffer(queryDataSet->bitmapList[deduplicateIdx])); - } - deduplicateIdx++; - } + case TSDataType::TIMESTAMP: + case TSDataType::INT64: { + safe_cast(value, ((int64_t*)values[schemaId])[rowIndex]); + break; } - this->tsQueryDataSet = queryDataSet; + case TSDataType::FLOAT: { + safe_cast(value, ((float*)values[schemaId])[rowIndex]); + break; + } + case TSDataType::DOUBLE: { + safe_cast(value, ((double*)values[schemaId])[rowIndex]); + break; + } + case TSDataType::BLOB: + case TSDataType::STRING: + case TSDataType::TEXT: { + safe_cast(value, ((string*)values[schemaId])[rowIndex]); + break; + } + default: + throw UnSupportedDataTypeException(string("Data type ") + to_string(dataType) + " is not supported."); + } + } - operationIsOpen = true; + template + void addValue(const string& schemaName, size_t rowIndex, const T& value) { + if (schemaNameIndex.find(schemaName) == schemaNameIndex.end()) { + throw SchemaNotFoundException(string("Schema ") + schemaName + " not found."); + } + size_t schemaId = schemaNameIndex[schemaName]; + addValue(schemaId, rowIndex, value); } - ~SessionDataSet() { - try { - closeOperationHandle(); - } catch (exception &e) { - log_debug(string("SessionDataSet::~SessionDataSet(), ") + e.what()); + + void* getValue(size_t schemaId, size_t rowIndex, TSDataType::TSDataType dataType) { + if (schemaId >= schemas.size()) { + throw std::out_of_range("Tablet::getValue schemaId out of range: " + + std::to_string(schemaId)); + } + if (rowIndex >= rowSize) { + throw std::out_of_range("Tablet::getValue rowIndex out of range: " + + std::to_string(rowIndex)); } - if (currentBitmap != nullptr) { - delete[] currentBitmap; - currentBitmap = nullptr; + switch (dataType) { + case TSDataType::BOOLEAN: + return &(reinterpret_cast(values[schemaId])[rowIndex]); + case TSDataType::INT32: + return &(reinterpret_cast(values[schemaId])[rowIndex]); + case TSDataType::DATE: + return &(reinterpret_cast(values[schemaId])[rowIndex]); + case TSDataType::TIMESTAMP: + case TSDataType::INT64: + return &(reinterpret_cast(values[schemaId])[rowIndex]); + case TSDataType::FLOAT: + return &(reinterpret_cast(values[schemaId])[rowIndex]); + case TSDataType::DOUBLE: + return &(reinterpret_cast(values[schemaId])[rowIndex]); + case TSDataType::BLOB: + case TSDataType::STRING: + case TSDataType::TEXT: + return &(reinterpret_cast(values[schemaId])[rowIndex]); + default: + throw UnSupportedDataTypeException("Unsupported data type: " + + std::to_string(dataType)); } } - int getFetchSize(); + std::shared_ptr getDeviceID(int i); - void setFetchSize(int fetchSize); + std::vector> getSchemas() const { + return schemas; + } - std::vector getColumnNames(); + void reset(); // Reset Tablet to the default state - set the rowSize to 0 - std::vector getColumnTypeList(); + size_t getTimeBytesSize(); - bool hasNext(); + size_t getValueByteSize(); // total byte size that values occupies - void constructOneRow(); + void setAligned(bool isAligned); +}; - bool isNull(int index, int rowNum); +class SessionUtils { +public: + static std::string getTime(const Tablet& tablet); - RowRecord *next(); + static std::string getValue(const Tablet& tablet); - void closeOperationHandle(bool forceClose = false); + static bool isTabletContainsSingleDevice(Tablet tablet); }; class TemplateNode { public: - explicit TemplateNode(const std::string &name) : name_(name) {} + explicit TemplateNode(const std::string& name) : name_(name) { + } - const std::string &getName() const { + const std::string& getName() const { return name_; } - virtual const std::unordered_map> &getChildren() const { + virtual const std::unordered_map>& getChildren() const { throw BatchExecutionException("Should call exact sub class!"); } @@ -843,8 +419,7 @@ class TemplateNode { class MeasurementNode : public TemplateNode { public: - - MeasurementNode(const std::string &name_, TSDataType::TSDataType data_type_, TSEncoding::TSEncoding encoding_, + MeasurementNode(const std::string& name_, TSDataType::TSDataType data_type_, TSEncoding::TSEncoding encoding_, CompressionType::CompressionType compression_type_) : TemplateNode(name_) { this->data_type_ = data_type_; this->encoding_ = encoding_; @@ -877,28 +452,28 @@ class MeasurementNode : public TemplateNode { class InternalNode : public TemplateNode { public: + InternalNode(const std::string& name, bool is_aligned) : TemplateNode(name), is_aligned_(is_aligned) { + } - InternalNode(const std::string &name, bool is_aligned) : TemplateNode(name), is_aligned_(is_aligned) {} - - void addChild(const InternalNode &node) { + void addChild(const InternalNode& node) { if (this->children_.count(node.getName())) { throw BatchExecutionException("Duplicated child of node in template."); } this->children_[node.getName()] = std::make_shared(node); } - void addChild(const MeasurementNode &node) { + void addChild(const MeasurementNode& node) { if (this->children_.count(node.getName())) { throw BatchExecutionException("Duplicated child of node in template."); } this->children_[node.getName()] = std::make_shared(node); } - void deleteChild(const TemplateNode &node) { + void deleteChild(const TemplateNode& node) { this->children_.erase(node.getName()); } - const std::unordered_map> &getChildren() const override { + const std::unordered_map>& getChildren() const override { return children_; } @@ -916,17 +491,17 @@ class InternalNode : public TemplateNode { }; namespace TemplateQueryType { - enum TemplateQueryType { - COUNT_MEASUREMENTS, IS_MEASUREMENT, PATH_EXIST, SHOW_MEASUREMENTS - }; +enum TemplateQueryType { + COUNT_MEASUREMENTS, IS_MEASUREMENT, PATH_EXIST, SHOW_MEASUREMENTS +}; } class Template { public: + Template(const std::string& name, bool is_aligned) : name_(name), is_aligned_(is_aligned) { + } - Template(const std::string &name, bool is_aligned) : name_(name), is_aligned_(is_aligned) {} - - const std::string &getName() const { + const std::string& getName() const { return name_; } @@ -934,14 +509,14 @@ class Template { return is_aligned_; } - void addToTemplate(const InternalNode &child) { + void addToTemplate(const InternalNode& child) { if (this->children_.count(child.getName())) { throw BatchExecutionException("Duplicated child of node in template."); } this->children_[child.getName()] = std::make_shared(child); } - void addToTemplate(const MeasurementNode &child) { + void addToTemplate(const MeasurementNode& child) { if (this->children_.count(child.getName())) { throw BatchExecutionException("Duplicated child of node in template."); } @@ -958,43 +533,70 @@ class Template { class Session { private: - std::string host; - int rpcPort; - std::string username; - std::string password; - const TSProtocolVersion::type protocolVersion = TSProtocolVersion::IOTDB_SERVICE_PROTOCOL_V3; - std::shared_ptr client; - std::shared_ptr transport; - bool isClosed = true; - int64_t sessionId; - int64_t statementId; - std::string zoneId; - int fetchSize; + std::string host_; + int rpcPort_; + std::string username_; + std::string password_; + const TSProtocolVersion::type protocolVersion_ = TSProtocolVersion::IOTDB_SERVICE_PROTOCOL_V3; + bool isClosed_ = true; + std::string zoneId_; + int fetchSize_; const static int DEFAULT_FETCH_SIZE = 10000; const static int DEFAULT_TIMEOUT_MS = 0; Version::Version version; + std::string sqlDialect_ = "tree"; // default sql dialect + std::string database_; + bool enableAutoFetch_ = true; + bool enableRedirection_ = true; + std::shared_ptr nodesSupplier_; + friend class SessionConnection; + friend class TableSession; + std::shared_ptr defaultSessionConnection_; + + TEndPoint defaultEndPoint_; + + struct TEndPointHash { + size_t operator()(const TEndPoint& endpoint) const { + return std::hash()(endpoint.ip) ^ std::hash()(endpoint.port); + } + }; + + struct TEndPointEqual { + bool operator()(const TEndPoint& lhs, const TEndPoint& rhs) const { + return lhs.ip == rhs.ip && lhs.port == rhs.port; + } + }; + + using EndPointSessionMap = std::unordered_map< + TEndPoint, shared_ptr, TEndPointHash, TEndPointEqual>; + EndPointSessionMap endPointToSessionConnection; + std::unordered_map deviceIdToEndpoint; + std::unordered_map, TEndPoint> tableModelDeviceIdToEndpoint; private: - static bool checkSorted(const Tablet &tablet); + void removeBrokenSessionConnection(shared_ptr sessionConnection); - static bool checkSorted(const std::vector ×); + static bool checkSorted(const Tablet& tablet); - static void sortTablet(Tablet &tablet); + static bool checkSorted(const std::vector& times); - static void sortIndexByTimestamp(int *index, std::vector ×tamps, int length); + static void sortTablet(Tablet& tablet); - void appendValues(std::string &buffer, const char *value, int size); + static void sortIndexByTimestamp(int* index, std::vector& timestamps, int length); + + void appendValues(std::string& buffer, const char* value, int size); void - putValuesIntoBuffer(const std::vector &types, const std::vector &values, - std::string &buf); + putValuesIntoBuffer(const std::vector& types, const std::vector& values, + std::string& buf); int8_t getDataTypeNumber(TSDataType::TSDataType type); struct TsCompare { - std::vector ×tamps; + std::vector& timestamps; - explicit TsCompare(std::vector &inTimestamps) : timestamps(inTimestamps) {}; + explicit TsCompare(std::vector& inTimestamps) : timestamps(inTimestamps) { + }; bool operator()(int i, int j) { return (timestamps[i] < timestamps[j]); }; }; @@ -1003,51 +605,121 @@ class Session { void initZoneId(); + void initNodesSupplier(); + + void initDefaultSessionConnection(); + + template + void insertByGroup(std::unordered_map, T>& insertGroup, + InsertConsumer insertConsumer); + + template + void insertOnce(std::unordered_map, T>& insertGroup, + InsertConsumer insertConsumer); + + void insertStringRecordsWithLeaderCache(vector deviceIds, vector times, + vector> measurementsList, vector> valuesList, + bool isAligned); + + void insertRecordsWithLeaderCache(vector deviceIds, vector times, + vector> measurementsList, + const vector>& typesList, + vector> valuesList, bool isAligned); + + void insertTabletsWithLeaderCache(unordered_map tablets, bool sorted, bool isAligned); + + shared_ptr getQuerySessionConnection(); + + shared_ptr getSessionConnection(std::string deviceId); + + shared_ptr getSessionConnection(std::shared_ptr deviceId); + + void handleQueryRedirection(TEndPoint endPoint); + + void handleRedirection(const std::string& deviceId, TEndPoint endPoint); + + void handleRedirection(const std::shared_ptr& deviceId, TEndPoint endPoint); + + void setSqlDialect(const std::string& dialect) { + this->sqlDialect_ = dialect; + } + + void setDatabase(const std::string& database) { + this->database_ = database; + } + + string getDatabase() { + return database_; + } + + void changeDatabase(string database) { + this->database_ = database; + } + public: - Session(const std::string &host, int rpcPort) : username("user"), password("password"), version(Version::V_1_0) { - this->host = host; - this->rpcPort = rpcPort; + Session(const std::string& host, int rpcPort) : username_("root"), password_("root"), version(Version::V_1_0) { + this->host_ = host; + this->rpcPort_ = rpcPort; initZoneId(); + initNodesSupplier(); } - Session(const std::string &host, int rpcPort, const std::string &username, const std::string &password) - : fetchSize(DEFAULT_FETCH_SIZE) { - this->host = host; - this->rpcPort = rpcPort; - this->username = username; - this->password = password; + Session(const std::string& host, int rpcPort, const std::string& username, const std::string& password) + : fetchSize_(DEFAULT_FETCH_SIZE) { + this->host_ = host; + this->rpcPort_ = rpcPort; + this->username_ = username; + this->password_ = password; this->version = Version::V_1_0; initZoneId(); + initNodesSupplier(); } - Session(const std::string &host, int rpcPort, const std::string &username, const std::string &password, - const std::string &zoneId, int fetchSize = DEFAULT_FETCH_SIZE) { - this->host = host; - this->rpcPort = rpcPort; - this->username = username; - this->password = password; - this->zoneId = zoneId; - this->fetchSize = fetchSize; + Session(const std::string& host, int rpcPort, const std::string& username, const std::string& password, + const std::string& zoneId, int fetchSize = DEFAULT_FETCH_SIZE) { + this->host_ = host; + this->rpcPort_ = rpcPort; + this->username_ = username; + this->password_ = password; + this->zoneId_ = zoneId; + this->fetchSize_ = fetchSize; + this->version = Version::V_1_0; + initZoneId(); + initNodesSupplier(); + } + + Session(const std::string& host, const std::string& rpcPort, const std::string& username = "user", + const std::string& password = "password", const std::string& zoneId = "", + int fetchSize = DEFAULT_FETCH_SIZE) { + this->host_ = host; + this->rpcPort_ = stoi(rpcPort); + this->username_ = username; + this->password_ = password; + this->zoneId_ = zoneId; + this->fetchSize_ = fetchSize; this->version = Version::V_1_0; initZoneId(); + initNodesSupplier(); } - Session(const std::string &host, const std::string &rpcPort, const std::string &username = "user", - const std::string &password = "password", const std::string &zoneId="", int fetchSize = DEFAULT_FETCH_SIZE) { - this->host = host; - this->rpcPort = stoi(rpcPort); - this->username = username; - this->password = password; - this->zoneId = zoneId; - this->fetchSize = fetchSize; + Session(AbstractSessionBuilder* builder) { + this->host_ = builder->host; + this->rpcPort_ = builder->rpcPort; + this->username_ = builder->username; + this->password_ = builder->password; + this->zoneId_ = builder->zoneId; + this->fetchSize_ = builder->fetchSize; this->version = Version::V_1_0; + this->sqlDialect_ = builder->sqlDialect; + this->database_ = builder->database; + this->enableAutoFetch_ = builder->enableAutoFetch; + this->enableRedirection_ = builder->enableRedirections; initZoneId(); + initNodesSupplier(); } ~Session(); - int64_t getSessionId(); - void open(); void open(bool enableRPCCompression); @@ -1056,196 +728,287 @@ class Session { void close(); - void setTimeZone(const std::string &zoneId); + void setTimeZone(const std::string& zoneId); std::string getTimeZone(); - void insertRecord(const std::string &deviceId, int64_t time, const std::vector &measurements, - const std::vector &values); - - void insertRecord(const std::string &deviceId, int64_t time, const std::vector &measurements, - const std::vector &types, const std::vector &values); - - void insertAlignedRecord(const std::string &deviceId, int64_t time, const std::vector &measurements, - const std::vector &values); - - void insertAlignedRecord(const std::string &deviceId, int64_t time, const std::vector &measurements, - const std::vector &types, const std::vector &values); - - void insertRecords(const std::vector &deviceIds, - const std::vector ×, - const std::vector> &measurementsList, - const std::vector> &valuesList); - - void insertRecords(const std::vector &deviceIds, - const std::vector ×, - const std::vector> &measurementsList, - const std::vector> &typesList, - const std::vector> &valuesList); - - void insertAlignedRecords(const std::vector &deviceIds, - const std::vector ×, - const std::vector> &measurementsList, - const std::vector> &valuesList); - - void insertAlignedRecords(const std::vector &deviceIds, - const std::vector ×, - const std::vector> &measurementsList, - const std::vector> &typesList, - const std::vector> &valuesList); - - void insertRecordsOfOneDevice(const std::string &deviceId, - std::vector ×, - std::vector> &measurementsList, - std::vector> &typesList, - std::vector> &valuesList); - - void insertRecordsOfOneDevice(const std::string &deviceId, - std::vector ×, - std::vector> &measurementsList, - std::vector> &typesList, - std::vector> &valuesList, + void insertRecord(const std::string& deviceId, int64_t time, const std::vector& measurements, + const std::vector& values); + + void insertRecord(const std::string& deviceId, int64_t time, const std::vector& measurements, + const std::vector& types, const std::vector& values); + + void insertAlignedRecord(const std::string& deviceId, int64_t time, const std::vector& measurements, + const std::vector& values); + + void insertAlignedRecord(const std::string& deviceId, int64_t time, const std::vector& measurements, + const std::vector& types, const std::vector& values); + + void insertRecords(const std::vector& deviceIds, + const std::vector& times, + const std::vector>& measurementsList, + const std::vector>& valuesList); + + void insertRecords(const std::vector& deviceIds, + const std::vector& times, + const std::vector>& measurementsList, + const std::vector>& typesList, + const std::vector>& valuesList); + + void insertAlignedRecords(const std::vector& deviceIds, + const std::vector& times, + const std::vector>& measurementsList, + const std::vector>& valuesList); + + void insertAlignedRecords(const std::vector& deviceIds, + const std::vector& times, + const std::vector>& measurementsList, + const std::vector>& typesList, + const std::vector>& valuesList); + + void insertRecordsOfOneDevice(const std::string& deviceId, + std::vector& times, + std::vector>& measurementsList, + std::vector>& typesList, + std::vector>& valuesList); + + void insertRecordsOfOneDevice(const std::string& deviceId, + std::vector& times, + std::vector>& measurementsList, + std::vector>& typesList, + std::vector>& valuesList, bool sorted); - void insertAlignedRecordsOfOneDevice(const std::string &deviceId, - std::vector ×, - std::vector> &measurementsList, - std::vector> &typesList, - std::vector> &valuesList); - - void insertAlignedRecordsOfOneDevice(const std::string &deviceId, - std::vector ×, - std::vector> &measurementsList, - std::vector> &typesList, - std::vector> &valuesList, + void insertAlignedRecordsOfOneDevice(const std::string& deviceId, + std::vector& times, + std::vector>& measurementsList, + std::vector>& typesList, + std::vector>& valuesList); + + void insertAlignedRecordsOfOneDevice(const std::string& deviceId, + std::vector& times, + std::vector>& measurementsList, + std::vector>& typesList, + std::vector>& valuesList, bool sorted); - void insertTablet(Tablet &tablet); + void insertTablet(Tablet& tablet); + + void insertTablet(Tablet& tablet, bool sorted); - void insertTablet(Tablet &tablet, bool sorted); + static void buildInsertTabletReq(TSInsertTabletReq& request, Tablet& tablet, bool sorted); - static void buildInsertTabletReq(TSInsertTabletReq &request, int64_t sessionId, Tablet &tablet, bool sorted); + void insertTablet(TSInsertTabletReq request); - void insertTablet(const TSInsertTabletReq &request); + void insertAlignedTablet(Tablet& tablet); - void insertAlignedTablet(Tablet &tablet); + void insertAlignedTablet(Tablet& tablet, bool sorted); - void insertAlignedTablet(Tablet &tablet, bool sorted); + void insertTablets(std::unordered_map& tablets); - void insertTablets(std::unordered_map &tablets); + void insertTablets(std::unordered_map& tablets, bool sorted); - void insertTablets(std::unordered_map &tablets, bool sorted); + void insertAlignedTablets(std::unordered_map& tablets, bool sorted = false); - void insertAlignedTablets(std::unordered_map &tablets, bool sorted = false); + void testInsertRecord(const std::string& deviceId, int64_t time, + const std::vector& measurements, + const std::vector& values); - void testInsertRecord(const std::string &deviceId, int64_t time, - const std::vector &measurements, - const std::vector &values); + void testInsertTablet(const Tablet& tablet); - void testInsertTablet(const Tablet &tablet); + void testInsertRecords(const std::vector& deviceIds, + const std::vector& times, + const std::vector>& measurementsList, + const std::vector>& valuesList); - void testInsertRecords(const std::vector &deviceIds, - const std::vector ×, - const std::vector> &measurementsList, - const std::vector> &valuesList); + void deleteTimeseries(const std::string& path); - void deleteTimeseries(const std::string &path); + void deleteTimeseries(const std::vector& paths); - void deleteTimeseries(const std::vector &paths); + void deleteData(const std::string& path, int64_t endTime); - void deleteData(const std::string &path, int64_t endTime); + void deleteData(const std::vector& paths, int64_t endTime); - void deleteData(const std::vector &paths, int64_t endTime); + void deleteData(const std::vector& paths, int64_t startTime, int64_t endTime); - void deleteData(const std::vector &paths, int64_t startTime, int64_t endTime); + void setStorageGroup(const std::string& storageGroupId); - void setStorageGroup(const std::string &storageGroupId); + void deleteStorageGroup(const std::string& storageGroup); - void deleteStorageGroup(const std::string &storageGroup); + void deleteStorageGroups(const std::vector& storageGroups); - void deleteStorageGroups(const std::vector &storageGroups); + void createDatabase(const std::string& database); - void createTimeseries(const std::string &path, TSDataType::TSDataType dataType, TSEncoding::TSEncoding encoding, + void deleteDatabase(const std::string& database); + + void deleteDatabases(const std::vector& databases); + + void createTimeseries(const std::string& path, TSDataType::TSDataType dataType, TSEncoding::TSEncoding encoding, CompressionType::CompressionType compressor); - void createTimeseries(const std::string &path, TSDataType::TSDataType dataType, TSEncoding::TSEncoding encoding, + void createTimeseries(const std::string& path, TSDataType::TSDataType dataType, TSEncoding::TSEncoding encoding, CompressionType::CompressionType compressor, - std::map *props, std::map *tags, - std::map *attributes, - const std::string &measurementAlias); + std::map* props, std::map* tags, + std::map* attributes, + const std::string& measurementAlias); + + void createMultiTimeseries(const std::vector& paths, + const std::vector& dataTypes, + const std::vector& encodings, + const std::vector& compressors, + std::vector>* propsList, + std::vector>* tagsList, + std::vector>* attributesList, + std::vector* measurementAliasList); - void createMultiTimeseries(const std::vector &paths, - const std::vector &dataTypes, - const std::vector &encodings, - const std::vector &compressors, - std::vector> *propsList, - std::vector> *tagsList, - std::vector> *attributesList, - std::vector *measurementAliasList); + void createAlignedTimeseries(const std::string& deviceId, + const std::vector& measurements, + const std::vector& dataTypes, + const std::vector& encodings, + const std::vector& compressors); - void createAlignedTimeseries(const std::string &deviceId, - const std::vector &measurements, - const std::vector &dataTypes, - const std::vector &encodings, - const std::vector &compressors); + bool checkTimeseriesExists(const std::string& path); - bool checkTimeseriesExists(const std::string &path); + std::unique_ptr executeQueryStatement(const std::string& sql); - std::unique_ptr executeQueryStatement(const std::string &sql) ; + std::unique_ptr executeQueryStatement(const std::string& sql, int64_t timeoutInMs); - std::unique_ptr executeQueryStatement(const std::string &sql, int64_t timeoutInMs) ; + std::unique_ptr executeQueryStatementMayRedirect(const std::string& sql, int64_t timeoutInMs); - void executeNonQueryStatement(const std::string &sql); + void executeNonQueryStatement(const std::string& sql); - std::unique_ptr executeRawDataQuery(const std::vector &paths, int64_t startTime, int64_t endTime); + std::unique_ptr executeRawDataQuery(const std::vector& paths, int64_t startTime, + int64_t endTime); - std::unique_ptr executeLastDataQuery(const std::vector &paths); + std::unique_ptr executeLastDataQuery(const std::vector& paths); - std::unique_ptr executeLastDataQuery(const std::vector &paths, int64_t lastTime); + std::unique_ptr executeLastDataQuery(const std::vector& paths, int64_t lastTime); - void createSchemaTemplate(const Template &templ); + void createSchemaTemplate(const Template& templ); - void setSchemaTemplate(const std::string &template_name, const std::string &prefix_path); + void setSchemaTemplate(const std::string& template_name, const std::string& prefix_path); - void unsetSchemaTemplate(const std::string &prefix_path, const std::string &template_name); + void unsetSchemaTemplate(const std::string& prefix_path, const std::string& template_name); - void addAlignedMeasurementsInTemplate(const std::string &template_name, - const std::vector &measurements, - const std::vector &dataTypes, - const std::vector &encodings, - const std::vector &compressors); + void addAlignedMeasurementsInTemplate(const std::string& template_name, + const std::vector& measurements, + const std::vector& dataTypes, + const std::vector& encodings, + const std::vector& compressors); - void addAlignedMeasurementsInTemplate(const std::string &template_name, - const std::string &measurement, + void addAlignedMeasurementsInTemplate(const std::string& template_name, + const std::string& measurement, TSDataType::TSDataType dataType, TSEncoding::TSEncoding encoding, CompressionType::CompressionType compressor); - void addUnalignedMeasurementsInTemplate(const std::string &template_name, - const std::vector &measurements, - const std::vector &dataTypes, - const std::vector &encodings, - const std::vector &compressors); + void addUnalignedMeasurementsInTemplate(const std::string& template_name, + const std::vector& measurements, + const std::vector& dataTypes, + const std::vector& encodings, + const std::vector& compressors); - void addUnalignedMeasurementsInTemplate(const std::string &template_name, - const std::string &measurement, + void addUnalignedMeasurementsInTemplate(const std::string& template_name, + const std::string& measurement, TSDataType::TSDataType dataType, TSEncoding::TSEncoding encoding, CompressionType::CompressionType compressor); - void deleteNodeInTemplate(const std::string &template_name, const std::string &path); + void deleteNodeInTemplate(const std::string& template_name, const std::string& path); - int countMeasurementsInTemplate(const std::string &template_name); + int countMeasurementsInTemplate(const std::string& template_name); - bool isMeasurementInTemplate(const std::string &template_name, const std::string &path); + bool isMeasurementInTemplate(const std::string& template_name, const std::string& path); - bool isPathExistInTemplate(const std::string &template_name, const std::string &path); + bool isPathExistInTemplate(const std::string& template_name, const std::string& path); - std::vector showMeasurementsInTemplate(const std::string &template_name); + std::vector showMeasurementsInTemplate(const std::string& template_name); - std::vector showMeasurementsInTemplate(const std::string &template_name, const std::string &pattern); + std::vector showMeasurementsInTemplate(const std::string& template_name, const std::string& pattern); - bool checkTemplateExists(const std::string &template_name); + bool checkTemplateExists(const std::string& template_name); }; +template +void Session::insertByGroup(std::unordered_map, T>& insertGroup, + InsertConsumer insertConsumer) { + std::vector> futures; + + for (auto& entry : insertGroup) { + auto connection = entry.first; + auto& req = entry.second; + futures.emplace_back(std::async(std::launch::async, [=, &req]() mutable { + try { + insertConsumer(connection, req); + } + catch (const RedirectException& e) { + for (const auto& deviceEndPoint : e.deviceEndPointMap) { + handleRedirection(deviceEndPoint.first, deviceEndPoint.second); + } + } catch (const IoTDBConnectionException& e) { + if (endPointToSessionConnection.size() > 1) { + removeBrokenSessionConnection(connection); + try { + insertConsumer(defaultSessionConnection_, req); + } + catch (const RedirectException&) { + } + } + else { + throw; + } + } catch (const std::exception& e) { + log_debug(e.what()); + throw IoTDBException(e.what()); + } + })); + } + + std::string errorMessages; + for (auto& f : futures) { + try { + f.get(); + } + catch (const IoTDBConnectionException& e) { + throw; + } catch (const std::exception& e) { + if (!errorMessages.empty()) { + errorMessages += ";"; + } + errorMessages += e.what(); + } + } + + if (!errorMessages.empty()) { + throw StatementExecutionException(errorMessages); + } +} + +template +void Session::insertOnce(std::unordered_map, T>& insertGroup, + InsertConsumer insertConsumer) { + auto connection = insertGroup.begin()->first; + auto req = insertGroup.begin()->second; + try { + insertConsumer(connection, req); + } + catch (RedirectException e) { + for (const auto& deviceEndPoint : e.deviceEndPointMap) { + handleRedirection(deviceEndPoint.first, deviceEndPoint.second); + } + } catch (IoTDBConnectionException e) { + if (endPointToSessionConnection.size() > 1) { + removeBrokenSessionConnection(connection); + try { + insertConsumer(defaultSessionConnection_, req); + } + catch (RedirectException e) { + } + } + else { + throw e; + } + } +} + #endif // IOTDB_SESSION_H diff --git a/iotdb-client/client-cpp/src/main/SessionConnection.cpp b/iotdb-client/client-cpp/src/main/SessionConnection.cpp new file mode 100644 index 0000000000000..e52ccd768a989 --- /dev/null +++ b/iotdb-client/client-cpp/src/main/SessionConnection.cpp @@ -0,0 +1,673 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +#include "SessionConnection.h" +#include "Session.h" +#include "common_types.h" +#include + +#include + +#include "SessionDataSet.h" + +using namespace apache::thrift; +using namespace apache::thrift::protocol; +using namespace apache::thrift::transport; + +SessionConnection::SessionConnection(Session* session_ptr, const TEndPoint& endpoint, + const std::string& zoneId, + std::shared_ptr nodeSupplier, + int fetchSize, + int maxRetries, + int64_t retryInterval, + std::string dialect, + std::string db) + : session(session_ptr), + zoneId(zoneId), + endPoint(endpoint), + availableNodes(std::move(nodeSupplier)), + fetchSize(fetchSize), + maxRetryCount(maxRetries), + retryIntervalMs(retryInterval), + sqlDialect(std::move(dialect)), + database(std::move(db)) { + this->zoneId = zoneId.empty() ? getSystemDefaultZoneId() : zoneId; + endPointList.push_back(endpoint); + init(endPoint); +} + +void SessionConnection::close() { + bool needThrowException = false; + string errMsg; + session = nullptr; + try { + TSCloseSessionReq req; + req.__set_sessionId(sessionId); + TSStatus tsStatus; + client->closeSession(tsStatus, req); + } + catch (const TTransportException& e) { + log_debug(e.what()); + throw IoTDBConnectionException(e.what()); + } catch (const exception& e) { + log_debug(e.what()); + errMsg = errMsg + "Session::close() client->closeSession() error, maybe remote server is down. " + e.what() + + "\n"; + needThrowException = true; + } + + try { + if (transport->isOpen()) { + transport->close(); + } + } + catch (const exception& e) { + log_debug(e.what()); + errMsg = errMsg + "Session::close() transport->close() error. " + e.what() + "\n"; + needThrowException = true; + } + + if (needThrowException) { + throw IoTDBException(errMsg); + } +} + +SessionConnection::~SessionConnection() { + try { + close(); + } + catch (const exception& e) { + log_debug(e.what()); + } +} + +void SessionConnection::init(const TEndPoint& endpoint) { + shared_ptr socket(new TSocket(endpoint.ip, endpoint.port)); + transport = std::make_shared(socket); + socket->setConnTimeout(connectionTimeoutInMs); + if (!transport->isOpen()) { + try { + transport->open(); + } + catch (TTransportException& e) { + log_debug(e.what()); + throw IoTDBConnectionException(e.what()); + } + } + if (enableRPCCompression) { + shared_ptr protocol(new TCompactProtocol(transport)); + client = std::make_shared(protocol); + } + else { + shared_ptr protocol(new TBinaryProtocol(transport)); + client = std::make_shared(protocol); + } + + std::map configuration; + configuration["version"] = session->getVersionString(session->version); + configuration["sql_dialect"] = sqlDialect; + if (database != "") { + configuration["db"] = database; + } + TSOpenSessionReq openReq; + openReq.__set_username(session->username_); + openReq.__set_password(session->password_); + openReq.__set_zoneId(zoneId); + openReq.__set_configuration(configuration); + try { + TSOpenSessionResp openResp; + client->openSession(openResp, openReq); + RpcUtils::verifySuccess(openResp.status); + if (session->protocolVersion_ != openResp.serverProtocolVersion) { + if (openResp.serverProtocolVersion == 0) { + // less than 0.10 + throw logic_error(string("Protocol not supported, Client version is ") + + to_string(session->protocolVersion_) + + ", but Server version is " + to_string(openResp.serverProtocolVersion)); + } + } + + sessionId = openResp.sessionId; + statementId = client->requestStatementId(sessionId); + + if (!zoneId.empty()) { + setTimeZone(zoneId); + } + } + catch (const TTransportException& e) { + log_debug(e.what()); + transport->close(); + throw IoTDBConnectionException(e.what()); + } catch (const IoTDBException& e) { + log_debug(e.what()); + transport->close(); + throw; + } catch (const exception& e) { + log_debug(e.what()); + transport->close(); + throw; + } +} + +std::unique_ptr SessionConnection::executeQueryStatement(const std::string& sql, int64_t timeoutInMs) { + TSExecuteStatementReq req; + req.__set_sessionId(sessionId); + req.__set_statementId(statementId); + req.__set_statement(sql); + req.__set_timeout(timeoutInMs); + req.__set_enableRedirectQuery(true); + + auto result = callWithRetryAndReconnect( + [this, &req]() { + TSExecuteStatementResp resp; + client->executeQueryStatementV2(resp, req); + return resp; + }, + [](const TSExecuteStatementResp& resp) { + return resp.status; + } + ); + TSExecuteStatementResp resp = result.getResult(); + if (result.getRetryAttempts() == 0) { + RpcUtils::verifySuccessWithRedirection(resp.status); + } + else { + RpcUtils::verifySuccess(resp.status); + } + + return std::unique_ptr(new SessionDataSet(sql, resp.columns, resp.dataTypeList, + resp.columnNameIndexMap, resp.queryId, statementId, + client, sessionId, resp.queryResult, resp.ignoreTimeStamp, + connectionTimeoutInMs, resp.moreData, fetchSize, zoneId)); +} + +std::unique_ptr SessionConnection::executeRawDataQuery(const std::vector& paths, + int64_t startTime, int64_t endTime) { + TSRawDataQueryReq req; + req.__set_sessionId(sessionId); + req.__set_statementId(statementId); + req.__set_fetchSize(fetchSize); + req.__set_paths(paths); + req.__set_startTime(startTime); + req.__set_endTime(endTime); + auto result = callWithRetryAndReconnect( + [this, &req]() { + TSExecuteStatementResp resp; + client->executeRawDataQueryV2(resp, req); + return resp; + }, + [](const TSExecuteStatementResp& resp) { + return resp.status; + } + ); + TSExecuteStatementResp resp = result.getResult(); + if (result.getRetryAttempts() == 0) { + RpcUtils::verifySuccessWithRedirection(resp.status); + } + else { + RpcUtils::verifySuccess(resp.status); + } + return std::unique_ptr(new SessionDataSet("", resp.columns, resp.dataTypeList, + resp.columnNameIndexMap, resp.queryId, statementId, + client, sessionId, resp.queryResult, resp.ignoreTimeStamp, + connectionTimeoutInMs, resp.moreData, fetchSize, zoneId)); +} + +std::unique_ptr SessionConnection::executeLastDataQuery(const std::vector& paths, + int64_t lastTime) { + TSLastDataQueryReq req; + req.__set_sessionId(sessionId); + req.__set_statementId(statementId); + req.__set_fetchSize(fetchSize); + req.__set_paths(paths); + req.__set_time(lastTime); + + auto result = callWithRetryAndReconnect( + [this, &req]() { + TSExecuteStatementResp resp; + client->executeLastDataQuery(resp, req); + return resp; + }, + [](const TSExecuteStatementResp& resp) { + return resp.status; + } + ); + TSExecuteStatementResp resp = result.getResult(); + if (result.getRetryAttempts() == 0) { + RpcUtils::verifySuccessWithRedirection(resp.status); + } + else { + RpcUtils::verifySuccess(resp.status); + } + return std::unique_ptr(new SessionDataSet("", resp.columns, resp.dataTypeList, + resp.columnNameIndexMap, resp.queryId, statementId, + client, sessionId, resp.queryResult, resp.ignoreTimeStamp, + connectionTimeoutInMs, resp.moreData, fetchSize, zoneId)); +} + +void SessionConnection::executeNonQueryStatement(const string& sql) { + TSExecuteStatementReq req; + req.__set_sessionId(sessionId); + req.__set_statementId(statementId); + req.__set_statement(sql); + req.__set_timeout(0); //0 means no timeout. This value keep consistent to JAVA SDK. + TSExecuteStatementResp resp; + try { + client->executeUpdateStatementV2(resp, req); + RpcUtils::verifySuccess(resp.status); + } + catch (const TTransportException& e) { + log_debug(e.what()); + throw IoTDBConnectionException(e.what()); + } catch (const IoTDBException& e) { + log_debug(e.what()); + throw; + } catch (const exception& e) { + throw IoTDBException(e.what()); + } +} + +const TEndPoint& SessionConnection::getEndPoint() { + return endPoint; +} + +void SessionConnection::setTimeZone(const std::string& newZoneId) { + TSSetTimeZoneReq req; + req.__set_sessionId(sessionId); + req.__set_timeZone(newZoneId); + + try { + TSStatus tsStatus; + client->setTimeZone(tsStatus, req); + zoneId = newZoneId; + } + catch (const TException& e) { + throw IoTDBConnectionException(e.what()); + } +} + +std::string SessionConnection::getSystemDefaultZoneId() { + time_t ts = 0; + struct tm tmv{}; +#if defined(_WIN64) || defined (WIN32) || defined (_WIN32) + localtime_s(&tmv, &ts); +#else + localtime_r(&ts, &tmv); +#endif + char zoneStr[32]; + strftime(zoneStr, sizeof(zoneStr), "%z", &tmv); + return zoneStr; +} + +bool SessionConnection::reconnect() { + bool reconnect = false; + for (int i = 1; i <= 3; i++) { + if (transport != nullptr) { + transport->close(); + endPointList = std::move(availableNodes->getEndPointList()); + int currHostIndex = rand() % endPointList.size(); + int tryHostNum = 0; + for (int j = currHostIndex; j < endPointList.size(); j++) { + if (tryHostNum == endPointList.size()) { + break; + } + this->endPoint = endPointList[j]; + if (j == endPointList.size() - 1) { + j = -1; + } + tryHostNum++; + try { + init(this->endPoint); + reconnect = true; + } + catch (const IoTDBConnectionException& e) { + log_warn("The current node may have been down, connection exception: %s", e.what()); + continue; + } catch (exception& e) { + log_warn("login in failed, because %s", e.what()); + } + break; + } + } + if (reconnect) { + session->removeBrokenSessionConnection(shared_from_this()); + session->defaultEndPoint_ = this->endPoint; + session->defaultSessionConnection_ = shared_from_this(); + session->endPointToSessionConnection.insert(make_pair(this->endPoint, shared_from_this())); + } + } + return reconnect; +} + +void SessionConnection::insertStringRecord(const TSInsertStringRecordReq& request) { + auto rpc = [this, request]() { + return this->insertStringRecordInternal(request); + }; + callWithRetryAndVerifyWithRedirection(rpc); +} + +void SessionConnection::insertRecord(const TSInsertRecordReq& request) { + auto rpc = [this, request]() { + return this->insertRecordInternal(request); + }; + callWithRetryAndVerifyWithRedirection(rpc); +} + +void SessionConnection::insertStringRecords(const TSInsertStringRecordsReq& request) { + auto rpc = [this, request]() { + return this->insertStringRecordsInternal(request); + }; + callWithRetryAndVerifyWithRedirection(rpc); +} + +void SessionConnection::insertRecords(const TSInsertRecordsReq& request) { + auto rpc = [this, request]() { + return this->insertRecordsInternal(request); + }; + callWithRetryAndVerifyWithRedirection(rpc); +} + +void SessionConnection::insertRecordsOfOneDevice(TSInsertRecordsOfOneDeviceReq request) { + auto rpc = [this, request]() { + return this->insertRecordsOfOneDeviceInternal(request); + }; + callWithRetryAndVerifyWithRedirection(rpc); +} + +void SessionConnection::insertStringRecordsOfOneDevice(TSInsertStringRecordsOfOneDeviceReq request) { + auto rpc = [this, request]() { + return this->insertStringRecordsOfOneDeviceInternal(request); + }; + callWithRetryAndVerifyWithRedirection(rpc); +} + +void SessionConnection::insertTablet(TSInsertTabletReq request) { + auto rpc = [this, request]() { + return this->insertTabletInternal(request); + }; + callWithRetryAndVerifyWithRedirection(rpc); +} + +void SessionConnection::insertTablets(TSInsertTabletsReq request) { + auto rpc = [this, request]() { + return this->insertTabletsInternal(request); + }; + callWithRetryAndVerifyWithRedirection(rpc); +} + +void SessionConnection::testInsertStringRecord(TSInsertStringRecordReq& request) { + auto rpc = [this, &request]() { + request.sessionId = sessionId; + TSStatus ret; + client->testInsertStringRecord(ret, request); + return ret; + }; + auto status = callWithRetryAndReconnect(rpc).getResult(); + RpcUtils::verifySuccess(status); +} + +void SessionConnection::testInsertTablet(TSInsertTabletReq& request) { + auto rpc = [this, &request]() { + request.sessionId = sessionId; + TSStatus ret; + client->testInsertTablet(ret, request); + return ret; + }; + auto status = callWithRetryAndReconnect(rpc).getResult(); + RpcUtils::verifySuccess(status); +} + +void SessionConnection::testInsertRecords(TSInsertRecordsReq& request) { + auto rpc = [this, &request]() { + request.sessionId = sessionId; + TSStatus ret; + client->testInsertRecords(ret, request); + return ret; + }; + auto status = callWithRetryAndReconnect(rpc).getResult(); + RpcUtils::verifySuccess(status); +} + +void SessionConnection::deleteTimeseries(const vector& paths) { + auto rpc = [this, &paths]() { + TSStatus ret; + client->deleteTimeseries(ret, sessionId, paths); + return ret; + }; + callWithRetryAndVerify(rpc); +} + +void SessionConnection::deleteData(const TSDeleteDataReq& request) { + auto rpc = [this, request]() { + return this->deleteDataInternal(request); + }; + callWithRetryAndVerify(rpc); +} + +void SessionConnection::setStorageGroup(const string& storageGroupId) { + auto rpc = [this, &storageGroupId]() { + TSStatus ret; + client->setStorageGroup(ret, sessionId, storageGroupId); + return ret; + }; + auto ret = callWithRetryAndReconnect(rpc); + RpcUtils::verifySuccess(ret.getResult()); +} + +void SessionConnection::deleteStorageGroups(const vector& storageGroups) { + auto rpc = [this, &storageGroups]() { + TSStatus ret; + client->deleteStorageGroups(ret, sessionId, storageGroups); + return ret; + }; + auto ret = callWithRetryAndReconnect(rpc); + RpcUtils::verifySuccess(ret.getResult()); +} + +void SessionConnection::createTimeseries(TSCreateTimeseriesReq& req) { + auto rpc = [this, &req]() { + TSStatus ret; + req.sessionId = sessionId; + client->createTimeseries(ret, req); + return ret; + }; + auto ret = callWithRetryAndReconnect(rpc); + RpcUtils::verifySuccess(ret.getResult()); +} + +void SessionConnection::createMultiTimeseries(TSCreateMultiTimeseriesReq& req) { + auto rpc = [this, &req]() { + TSStatus ret; + req.sessionId = sessionId; + client->createMultiTimeseries(ret, req); + return ret; + }; + auto ret = callWithRetryAndReconnect(rpc); + RpcUtils::verifySuccess(ret.getResult()); +} + +void SessionConnection::createAlignedTimeseries(TSCreateAlignedTimeseriesReq& req) { + auto rpc = [this, &req]() { + TSStatus ret; + req.sessionId = sessionId; + client->createAlignedTimeseries(ret, req); + return ret; + }; + auto ret = callWithRetryAndReconnect(rpc); + RpcUtils::verifySuccess(ret.getResult()); +} + +TSGetTimeZoneResp SessionConnection::getTimeZone() { + auto rpc = [this]() { + TSGetTimeZoneResp resp; + client->getTimeZone(resp, sessionId); + zoneId = resp.timeZone; + return resp; + }; + auto ret = callWithRetryAndReconnect(rpc, + [](const TSGetTimeZoneResp& resp) { + return resp.status; + }); + RpcUtils::verifySuccess(ret.getResult().status); + return ret.result; +} + +void SessionConnection::setTimeZone(TSSetTimeZoneReq& req) { + auto rpc = [this, &req]() { + TSStatus ret; + req.sessionId = sessionId; + client->setTimeZone(ret, req); + zoneId = req.timeZone; + return ret; + }; + auto ret = callWithRetryAndReconnect(rpc); + RpcUtils::verifySuccess(ret.getResult()); +} + +void SessionConnection::createSchemaTemplate(TSCreateSchemaTemplateReq req) { + auto rpc = [this, &req]() { + TSStatus ret; + req.sessionId = sessionId; + client->createSchemaTemplate(ret, req); + return ret; + }; + auto ret = callWithRetryAndReconnect(rpc); + RpcUtils::verifySuccess(ret.getResult()); +} + +void SessionConnection::setSchemaTemplate(TSSetSchemaTemplateReq req) { + auto rpc = [this, &req]() { + TSStatus ret; + req.sessionId = sessionId; + client->setSchemaTemplate(ret, req); + return ret; + }; + auto ret = callWithRetryAndReconnect(rpc); + RpcUtils::verifySuccess(ret.getResult()); +} + +void SessionConnection::unsetSchemaTemplate(TSUnsetSchemaTemplateReq req) { + auto rpc = [this, &req]() { + TSStatus ret; + req.sessionId = sessionId; + client->unsetSchemaTemplate(ret, req); + return ret; + }; + auto ret = callWithRetryAndReconnect(rpc); + RpcUtils::verifySuccess(ret.getResult()); +} + +void SessionConnection::appendSchemaTemplate(TSAppendSchemaTemplateReq req) { + auto rpc = [this, &req]() { + TSStatus ret; + req.sessionId = sessionId; + client->appendSchemaTemplate(ret, req); + return ret; + }; + auto ret = callWithRetryAndReconnect(rpc); + RpcUtils::verifySuccess(ret.getResult()); +} + +void SessionConnection::pruneSchemaTemplate(TSPruneSchemaTemplateReq req) { + auto rpc = [this, &req]() { + TSStatus ret; + req.sessionId = sessionId; + client->pruneSchemaTemplate(ret, req); + return ret; + }; + auto ret = callWithRetryAndReconnect(rpc); + RpcUtils::verifySuccess(ret.getResult()); +} + +TSQueryTemplateResp SessionConnection::querySchemaTemplate(TSQueryTemplateReq req) { + auto rpc = [this, &req]() { + TSQueryTemplateResp ret; + req.sessionId = sessionId; + client->querySchemaTemplate(ret, req); + return ret; + }; + auto ret = callWithRetryAndReconnect(rpc, + [](const TSQueryTemplateResp& resp) { + return resp.status; + }); + RpcUtils::verifySuccess(ret.getResult().status); + return ret.getResult(); +} + +TSStatus SessionConnection::insertStringRecordInternal(TSInsertStringRecordReq request) { + request.sessionId = sessionId; + TSStatus ret; + client->insertStringRecord(ret, request); + return ret; +} + +TSStatus SessionConnection::insertRecordInternal(TSInsertRecordReq request) { + request.sessionId = sessionId; + TSStatus ret; + client->insertRecord(ret, request); + return ret; +} + +TSStatus SessionConnection::insertStringRecordsInternal(TSInsertStringRecordsReq request) { + request.sessionId = sessionId; + TSStatus ret; + client->insertStringRecords(ret, request); + return ret; +} + +TSStatus SessionConnection::insertRecordsInternal(TSInsertRecordsReq request) { + request.sessionId = sessionId; + TSStatus ret; + client->insertRecords(ret, request); + return ret; +} + +TSStatus SessionConnection::insertRecordsOfOneDeviceInternal(TSInsertRecordsOfOneDeviceReq request) { + request.sessionId = sessionId; + TSStatus ret; + client->insertRecordsOfOneDevice(ret, request); + return ret; +} + +TSStatus SessionConnection::insertStringRecordsOfOneDeviceInternal(TSInsertStringRecordsOfOneDeviceReq request) { + request.sessionId = sessionId; + TSStatus ret; + client->insertStringRecordsOfOneDevice(ret, request); + return ret; +} + +TSStatus SessionConnection::insertTabletInternal(TSInsertTabletReq request) { + request.sessionId = sessionId; + TSStatus ret; + client->insertTablet(ret, request); + return ret; +} + +TSStatus SessionConnection::insertTabletsInternal(TSInsertTabletsReq request) { + request.sessionId = sessionId; + TSStatus ret; + client->insertTablets(ret, request); + return ret; +} + +TSStatus SessionConnection::deleteDataInternal(TSDeleteDataReq request) { + request.sessionId = sessionId; + TSStatus ret; + client->deleteData(ret, request); + return ret; +} diff --git a/iotdb-client/client-cpp/src/main/SessionConnection.h b/iotdb-client/client-cpp/src/main/SessionConnection.h new file mode 100644 index 0000000000000..093c48aae452d --- /dev/null +++ b/iotdb-client/client-cpp/src/main/SessionConnection.h @@ -0,0 +1,364 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +#ifndef IOTDB_SESSIONCONNECTION_H +#define IOTDB_SESSIONCONNECTION_H + +#include +#include +#include +#include +#include "IClientRPCService.h" +#include "common_types.h" +#include "NodesSupplier.h" +#include "Common.h" + +class SessionDataSet; +class Session; + +class SessionConnection : public std::enable_shared_from_this { +public: + SessionConnection(Session* session_ptr, const TEndPoint& endpoint, + const std::string& zoneId, + std::shared_ptr nodeSupplier, + int fetchSize = 10000, + int maxRetries = 60, + int64_t retryInterval = 500, + std::string dialect = "tree", + std::string db = ""); + + ~SessionConnection(); + + void setTimeZone(const std::string& newZoneId); + + + const TEndPoint& getEndPoint(); + + void init(const TEndPoint& endpoint); + + void insertStringRecord(const TSInsertStringRecordReq& request); + + void insertRecord(const TSInsertRecordReq& request); + + void insertStringRecords(const TSInsertStringRecordsReq& request); + + void insertRecords(const TSInsertRecordsReq& request); + + void insertRecordsOfOneDevice(TSInsertRecordsOfOneDeviceReq request); + + void insertStringRecordsOfOneDevice(TSInsertStringRecordsOfOneDeviceReq request); + + void insertTablet(TSInsertTabletReq request); + + void insertTablets(TSInsertTabletsReq request); + + void testInsertStringRecord(TSInsertStringRecordReq& request); + + void testInsertTablet(TSInsertTabletReq& request); + + void testInsertRecords(TSInsertRecordsReq& request); + + void deleteTimeseries(const vector& paths); + + void deleteData(const TSDeleteDataReq& request); + + void setStorageGroup(const string& storageGroupId); + + void deleteStorageGroups(const vector& storageGroups); + + void createTimeseries(TSCreateTimeseriesReq& req); + + void createMultiTimeseries(TSCreateMultiTimeseriesReq& req); + + void createAlignedTimeseries(TSCreateAlignedTimeseriesReq& req); + + TSGetTimeZoneResp getTimeZone(); + + void setTimeZone(TSSetTimeZoneReq& req); + + void createSchemaTemplate(TSCreateSchemaTemplateReq req); + + void setSchemaTemplate(TSSetSchemaTemplateReq req); + + void unsetSchemaTemplate(TSUnsetSchemaTemplateReq req); + + void appendSchemaTemplate(TSAppendSchemaTemplateReq req); + + void pruneSchemaTemplate(TSPruneSchemaTemplateReq req); + + TSQueryTemplateResp querySchemaTemplate(TSQueryTemplateReq req); + + std::unique_ptr executeRawDataQuery(const std::vector& paths, int64_t startTime, + int64_t endTime); + + std::unique_ptr executeLastDataQuery(const std::vector& paths, int64_t lastTime); + + void executeNonQueryStatement(const std::string& sql); + + std::unique_ptr executeQueryStatement(const std::string& sql, int64_t timeoutInMs = -1); + + std::shared_ptr getSessionClient() { + return client; + } + + friend class Session; + +private: + void close(); + std::string getSystemDefaultZoneId(); + bool reconnect(); + + template + struct RetryResult { + T result; + std::exception_ptr exception; + int retryAttempts; + + RetryResult(T r, std::exception_ptr e, int a) + : result(r), exception(e), retryAttempts(a) { + } + + int getRetryAttempts() const { return retryAttempts; } + T getResult() const { return result; } + std::exception_ptr getException() const { return exception; } + }; + + template + void callWithRetryAndVerifyWithRedirection(std::function rpc); + + template + void callWithRetryAndVerifyWithRedirectionForMultipleDevices( + std::function rpc, const vector& deviceIds); + + template + RetryResult callWithRetryAndVerify(std::function rpc); + + template + RetryResult callWithRetry(std::function rpc); + + template + RetryResult callWithRetryAndReconnect(RpcFunc rpc); + + template + RetryResult callWithRetryAndReconnect(RpcFunc rpc, StatusGetter statusGetter); + + template + RetryResult callWithRetryAndReconnect(RpcFunc rpc, ShouldRetry shouldRetry, ForceReconnect forceReconnect); + + TSStatus insertStringRecordInternal(TSInsertStringRecordReq request); + + TSStatus insertRecordInternal(TSInsertRecordReq request); + + TSStatus insertStringRecordsInternal(TSInsertStringRecordsReq request); + + TSStatus insertRecordsInternal(TSInsertRecordsReq request); + + TSStatus insertRecordsOfOneDeviceInternal(TSInsertRecordsOfOneDeviceReq request); + + TSStatus insertStringRecordsOfOneDeviceInternal(TSInsertStringRecordsOfOneDeviceReq request); + + TSStatus insertTabletInternal(TSInsertTabletReq request); + + TSStatus insertTabletsInternal(TSInsertTabletsReq request); + + TSStatus deleteDataInternal(TSDeleteDataReq request); + + std::shared_ptr transport; + std::shared_ptr client; + Session* session; + int64_t sessionId{}; + int64_t statementId{}; + int64_t connectionTimeoutInMs{}; + bool enableRPCCompression = false; + std::string zoneId; + TEndPoint endPoint; + std::vector endPointList; + std::shared_ptr availableNodes; + int fetchSize; + int maxRetryCount; + int64_t retryIntervalMs; + std::string sqlDialect; + std::string database; + int timeFactor = 1000; +}; + +template +SessionConnection::RetryResult SessionConnection::callWithRetry(std::function rpc) { + std::exception_ptr lastException = nullptr; + TSStatus status; + int i; + for (i = 0; i <= maxRetryCount; i++) { + if (i > 0) { + lastException = nullptr; + status = TSStatus(); + try { + std::this_thread::sleep_for( + std::chrono::milliseconds(retryIntervalMs)); + } + catch (const std::exception& e) { + break; + } + if (!reconnect()) { + continue; + } + } + + try { + status = rpc(); + if (status.__isset.needRetry && status.needRetry) { + continue; + } + break; + } + catch (...) { + lastException = std::current_exception(); + } + } + return {status, lastException, i}; +} + +template +void SessionConnection::callWithRetryAndVerifyWithRedirection(std::function rpc) { + auto result = callWithRetry(rpc); + + auto status = result.getResult(); + if (result.getRetryAttempts() == 0) { + RpcUtils::verifySuccessWithRedirection(status); + } + else { + RpcUtils::verifySuccess(status); + } + + if (result.getException()) { + try { + std::rethrow_exception(result.getException()); + } + catch (const std::exception& e) { + throw IoTDBConnectionException(e.what()); + } + } +} + +template +void SessionConnection::callWithRetryAndVerifyWithRedirectionForMultipleDevices( + std::function rpc, const vector& deviceIds) { + auto result = callWithRetry(rpc); + auto status = result.getResult(); + if (result.getRetryAttempts() == 0) { + RpcUtils::verifySuccessWithRedirectionForMultiDevices(status, deviceIds); + } + else { + RpcUtils::verifySuccess(status); + } + if (result.getException()) { + try { + std::rethrow_exception(result.getException()); + } + catch (const std::exception& e) { + throw IoTDBConnectionException(e.what()); + } + } + result.exception = nullptr; +} + +template +SessionConnection::RetryResult SessionConnection::callWithRetryAndVerify(std::function rpc) { + auto result = callWithRetry(rpc); + RpcUtils::verifySuccess(result.getResult()); + if (result.getException()) { + try { + std::rethrow_exception(result.getException()); + } + catch (const std::exception& e) { + throw IoTDBConnectionException(e.what()); + } + } + return result; +} + +template +SessionConnection::RetryResult SessionConnection::callWithRetryAndReconnect(RpcFunc rpc) { + return callWithRetryAndReconnect(rpc, + [](const TSStatus& status) { + return status.__isset.needRetry && status.needRetry; + }, + [](const TSStatus& status) { + return status.code == TSStatusCode::PLAN_FAILED_NETWORK_PARTITION; + } + ); +} + +template +SessionConnection::RetryResult SessionConnection::callWithRetryAndReconnect(RpcFunc rpc, StatusGetter statusGetter) { + auto shouldRetry = [&statusGetter](const T& t) { + auto status = statusGetter(t); + return status.__isset.needRetry && status.needRetry; + }; + auto forceReconnect = [&statusGetter](const T& t) { + auto status = statusGetter(t); + return status.code == TSStatusCode::PLAN_FAILED_NETWORK_PARTITION;; + }; + return callWithRetryAndReconnect(rpc, shouldRetry, forceReconnect); +} + + +template +SessionConnection::RetryResult SessionConnection::callWithRetryAndReconnect(RpcFunc rpc, + ShouldRetry shouldRetry, + ForceReconnect forceReconnect) { + std::exception_ptr lastException = nullptr; + T result; + int retryAttempt; + for (retryAttempt = 0; retryAttempt <= maxRetryCount; retryAttempt++) { + try { + result = rpc(); + lastException = nullptr; + } + catch (...) { + result = T(); + lastException = std::current_exception(); + } + + if (!shouldRetry(result)) { + return {result, lastException, retryAttempt}; + } + + if (lastException != nullptr || + std::find(availableNodes->getEndPointList().begin(), availableNodes->getEndPointList().end(), + this->endPoint) == availableNodes->getEndPointList().end() || + forceReconnect(result)) { + reconnect(); + } + + try { + std::this_thread::sleep_for(std::chrono::milliseconds(retryIntervalMs)); + } + catch (const std::exception& e) { + log_debug("Thread was interrupted during retry " + + std::to_string(retryAttempt) + + " with wait time " + + std::to_string(retryIntervalMs) + + " ms. Exiting retry loop."); + break; + } + } + + return {result, lastException, retryAttempt}; +} + +#endif diff --git a/iotdb-client/client-cpp/src/main/SessionDataSet.cpp b/iotdb-client/client-cpp/src/main/SessionDataSet.cpp new file mode 100644 index 0000000000000..30e11fa1efb22 --- /dev/null +++ b/iotdb-client/client-cpp/src/main/SessionDataSet.cpp @@ -0,0 +1,252 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#include "SessionDataSet.h" +#include +#include + +RowRecord::RowRecord(int64_t timestamp) { + this->timestamp = timestamp; +} + +RowRecord::RowRecord(int64_t timestamp, const std::vector& fields) + : timestamp(timestamp), fields(fields) { +} + +RowRecord::RowRecord(const std::vector& fields) + : timestamp(-1), fields(fields) { +} + +RowRecord::RowRecord() { + this->timestamp = -1; +} + +void RowRecord::addField(const Field& f) { + this->fields.push_back(f); +} + +std::string RowRecord::toString() { + std::string ret; + if (this->timestamp != -1) { + ret.append(std::to_string(timestamp)); + ret.append("\t"); + } + for (size_t i = 0; i < fields.size(); i++) { + if (i != 0) { + ret.append("\t"); + } + TSDataType::TSDataType dataType = fields[i].dataType; + switch (dataType) { + case TSDataType::BOOLEAN: + ret.append(fields[i].boolV ? "true" : "false"); + break; + case TSDataType::INT32: + ret.append(std::to_string(fields[i].intV)); + break; + case TSDataType::DATE: + ret.append(boost::gregorian::to_iso_extended_string(fields[i].dateV)); + break; + case TSDataType::TIMESTAMP: + case TSDataType::INT64: + ret.append(std::to_string(fields[i].longV)); + break; + case TSDataType::FLOAT: + ret.append(std::to_string(fields[i].floatV)); + break; + case TSDataType::DOUBLE: + ret.append(std::to_string(fields[i].doubleV)); + break; + case TSDataType::BLOB: + case TSDataType::STRING: + case TSDataType::TEXT: + ret.append(fields[i].stringV); + break; + default: + break; + } + } + ret.append("\n"); + return ret; +} + +bool SessionDataSet::hasNext() { + if (iotdbRpcDataSet_->hasCachedRecord()) { + return true; + } + return iotdbRpcDataSet_->next(); +} + +shared_ptr SessionDataSet::next() { + if (!iotdbRpcDataSet_->hasCachedRecord() && !hasNext()) { + return nullptr; + } + iotdbRpcDataSet_->setHasCachedRecord(false); + + return constructRowRecordFromValueArray(); +} + +int SessionDataSet::getFetchSize() { + return iotdbRpcDataSet_->getFetchSize(); +} + +void SessionDataSet::setFetchSize(int fetchSize) { + return iotdbRpcDataSet_->setFetchSize(fetchSize); +} + +const std::vector& SessionDataSet::getColumnNames() const { + return iotdbRpcDataSet_->getColumnNameList(); +} + +const std::vector& SessionDataSet::getColumnTypeList() const { + return iotdbRpcDataSet_->getColumnTypeList(); +} + +void SessionDataSet::closeOperationHandle(bool forceClose) { + iotdbRpcDataSet_->close(forceClose); +} + +bool SessionDataSet::DataIterator::next() { + return iotdbRpcDataSet_->next(); +} + +bool SessionDataSet::DataIterator::isNull(const std::string& columnName) { + return iotdbRpcDataSet_->isNullByColumnName(columnName); +} + +bool SessionDataSet::DataIterator::isNullByIndex(int32_t columnIndex) { + return iotdbRpcDataSet_->isNullByIndex(columnIndex); +} + +bool SessionDataSet::DataIterator::getBooleanByIndex(int32_t columnIndex) { + return iotdbRpcDataSet_->getBooleanByIndex(columnIndex); +} + +bool SessionDataSet::DataIterator::getBoolean(const std::string& columnName) { + return iotdbRpcDataSet_->getBoolean(columnName); +} + +double SessionDataSet::DataIterator::getDoubleByIndex(int32_t columnIndex) { + return iotdbRpcDataSet_->getDoubleByIndex(columnIndex); +} + +double SessionDataSet::DataIterator::getDouble(const std::string& columnName) { + return iotdbRpcDataSet_->getDouble(columnName); +} + +float SessionDataSet::DataIterator::getFloatByIndex(int32_t columnIndex) { + return iotdbRpcDataSet_->getFloatByIndex(columnIndex); +} + +float SessionDataSet::DataIterator::getFloat(const std::string& columnName) { + return iotdbRpcDataSet_->getFloat(columnName); +} + +int32_t SessionDataSet::DataIterator::getIntByIndex(int32_t columnIndex) { + return iotdbRpcDataSet_->getIntByIndex(columnIndex); +} + +int32_t SessionDataSet::DataIterator::getInt(const std::string& columnName) { + return iotdbRpcDataSet_->getInt(columnName); +} + +int64_t SessionDataSet::DataIterator::getLongByIndex(int32_t columnIndex) { + return iotdbRpcDataSet_->getLongByIndex(columnIndex); +} + +int64_t SessionDataSet::DataIterator::getLong(const std::string& columnName) { + return iotdbRpcDataSet_->getLong(columnName); +} + +std::string SessionDataSet::DataIterator::getStringByIndex(int32_t columnIndex) { + return iotdbRpcDataSet_->getStringByIndex(columnIndex); +} + +std::string SessionDataSet::DataIterator::getString(const std::string& columnName) { + return iotdbRpcDataSet_->getString(columnName); +} + +int64_t SessionDataSet::DataIterator::getTimestampByIndex(int32_t columnIndex) { + return iotdbRpcDataSet_->getTimestampByIndex(columnIndex); +} + +int64_t SessionDataSet::DataIterator::getTimestamp(const std::string& columnName) { + return iotdbRpcDataSet_->getTimestamp(columnName); +} + +boost::gregorian::date SessionDataSet::DataIterator::getDateByIndex(int32_t columnIndex) { + return iotdbRpcDataSet_->getDateByIndex(columnIndex); +} + +boost::gregorian::date SessionDataSet::DataIterator::getDate(const std::string& columnName) { + return iotdbRpcDataSet_->getDate(columnName); +} + +int32_t SessionDataSet::DataIterator::findColumn(const std::string& columnName) { + return iotdbRpcDataSet_->findColumn(columnName); +} + +const std::vector& SessionDataSet::DataIterator::getColumnNames() const { + return iotdbRpcDataSet_->getColumnNameList(); +} + +const std::vector& SessionDataSet::DataIterator::getColumnTypeList() const { + return iotdbRpcDataSet_->getColumnTypeList(); +} + +shared_ptr SessionDataSet::constructRowRecordFromValueArray() { + std::vector outFields; + for (int i = iotdbRpcDataSet_->getValueColumnStartIndex(); i < iotdbRpcDataSet_->getColumnSize(); i++) { + Field field; + std::string columnName = iotdbRpcDataSet_->getColumnNameList().at(i); + if (!iotdbRpcDataSet_->isNullByColumnName(columnName)) { + TSDataType::TSDataType dataType = iotdbRpcDataSet_->getDataType(columnName); + field.dataType = dataType; + switch (dataType) { + case TSDataType::BOOLEAN: + field.boolV = iotdbRpcDataSet_->getBoolean(columnName); + break; + case TSDataType::INT32: + field.intV = iotdbRpcDataSet_->getInt(columnName); + break; + case TSDataType::DATE: + field.dateV = parseIntToDate(iotdbRpcDataSet_->getInt(columnName)); + break; + case TSDataType::INT64: + case TSDataType::TIMESTAMP: + field.longV = iotdbRpcDataSet_->getLong(columnName); + break; + case TSDataType::FLOAT: + field.floatV = iotdbRpcDataSet_->getFloat(columnName); + break; + case TSDataType::DOUBLE: + field.doubleV = iotdbRpcDataSet_->getDouble(columnName); + break; + case TSDataType::TEXT: + case TSDataType::BLOB: + case TSDataType::STRING: + field.stringV = iotdbRpcDataSet_->getBinary(columnName)->getStringValue(); + break; + default: + throw new UnSupportedDataTypeException("Data type %s is not supported." + dataType); + } + } + outFields.emplace_back(field); + } + return std::make_shared(iotdbRpcDataSet_->getCurrentRowTime(), outFields); +} diff --git a/iotdb-client/client-cpp/src/main/SessionDataSet.h b/iotdb-client/client-cpp/src/main/SessionDataSet.h new file mode 100644 index 0000000000000..d6178d97fbafa --- /dev/null +++ b/iotdb-client/client-cpp/src/main/SessionDataSet.h @@ -0,0 +1,142 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef IOTDB_SESSION_DATA_SET_H +#define IOTDB_SESSION_DATA_SET_H + +#include +#include +#include +#include +#include +#include "IoTDBRpcDataSet.h" +#include "Column.h" + +class RowRecord { +public: + int64_t timestamp; + std::vector fields; + + explicit RowRecord(int64_t timestamp); + RowRecord(int64_t timestamp, const std::vector& fields); + explicit RowRecord(const std::vector& fields); + RowRecord(); + + void addField(const Field& f); + std::string toString(); +}; + +class SessionDataSet { +public: + SessionDataSet(const std::string& sql, + const std::vector& columnNameList, + const std::vector& columnTypeList, + const std::map& columnNameIndex, + int64_t queryId, + int64_t statementId, + std::shared_ptr client, + int64_t sessionId, + const std::vector& queryResult, + bool ignoreTimestamp, + int64_t timeout, + bool moreData, + int32_t fetchSize, + const std::string& zoneId) { + iotdbRpcDataSet_ = std::make_shared(sql, + columnNameList, + columnTypeList, + columnNameIndex, + ignoreTimestamp, + moreData, + queryId, + statementId, + client, + sessionId, + queryResult, + fetchSize, + timeout, + zoneId, + IoTDBRpcDataSet::DEFAULT_TIME_FORMAT); + } + + ~SessionDataSet() = default; + + bool hasNext(); + shared_ptr next(); + + int getFetchSize(); + void setFetchSize(int fetchSize); + + const std::vector& getColumnNames() const; + const std::vector& getColumnTypeList() const; + void closeOperationHandle(bool forceClose = false); + + class DataIterator { + std::shared_ptr iotdbRpcDataSet_; + + public: + DataIterator(std::shared_ptr iotdbRpcDataSet) : + iotdbRpcDataSet_(iotdbRpcDataSet) { + } + + bool next(); + + bool isNull(const std::string& columnName); + bool isNullByIndex(int32_t columnIndex); + + bool getBooleanByIndex(int32_t columnIndex); + bool getBoolean(const std::string& columnName); + + double getDoubleByIndex(int32_t columnIndex); + double getDouble(const std::string& columnName); + + float getFloatByIndex(int32_t columnIndex); + float getFloat(const std::string& columnName); + + int32_t getIntByIndex(int32_t columnIndex); + int32_t getInt(const std::string& columnName); + + int64_t getLongByIndex(int32_t columnIndex); + int64_t getLong(const std::string& columnName); + + std::string getStringByIndex(int32_t columnIndex); + std::string getString(const std::string& columnName); + + int64_t getTimestampByIndex(int32_t columnIndex); + int64_t getTimestamp(const std::string& columnName); + + boost::gregorian::date getDateByIndex(int32_t columnIndex); + boost::gregorian::date getDate(const std::string& columnName); + + int32_t findColumn(const std::string& columnName); + const std::vector& getColumnNames() const; + const std::vector& getColumnTypeList() const; + }; + + DataIterator getIterator() { + return {iotdbRpcDataSet_}; + }; + +private: + std::shared_ptr constructRowRecordFromValueArray(); + + std::shared_ptr iotdbRpcDataSet_; +}; + +#endif diff --git a/iotdb-client/client-cpp/src/main/ThriftConnection.cpp b/iotdb-client/client-cpp/src/main/ThriftConnection.cpp new file mode 100644 index 0000000000000..8cee0c6f0140d --- /dev/null +++ b/iotdb-client/client-cpp/src/main/ThriftConnection.cpp @@ -0,0 +1,170 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +#include "ThriftConnection.h" +#include +#include +#include + +#include "Session.h" +#include "SessionDataSet.h" + +const int ThriftConnection::THRIFT_DEFAULT_BUFFER_SIZE = 4096; +const int ThriftConnection::THRIFT_MAX_FRAME_SIZE = 1048576; +const int ThriftConnection::CONNECTION_TIMEOUT_IN_MS = 1000; +const int ThriftConnection::DEFAULT_FETCH_SIZE = 10000; + +ThriftConnection::ThriftConnection(const TEndPoint& endPoint, + int thriftDefaultBufferSize, + int thriftMaxFrameSize, + int connectionTimeoutInMs, + int fetchSize) + : endPoint_(endPoint), + thriftDefaultBufferSize_(thriftDefaultBufferSize), + thriftMaxFrameSize_(thriftMaxFrameSize), + connectionTimeoutInMs_(connectionTimeoutInMs), + fetchSize_(fetchSize){ +} + +ThriftConnection::~ThriftConnection() = default; + +void ThriftConnection::initZoneId() { + if (!zoneId_.empty()) { + return; + } + + time_t ts = 0; + struct tm tmv{}; +#if defined(_WIN64) || defined (WIN32) || defined (_WIN32) + localtime_s(&tmv, &ts); +#else + localtime_r(&ts, &tmv); +#endif + + char zoneStr[32]; + strftime(zoneStr, sizeof(zoneStr), "%z", &tmv); + zoneId_ = zoneStr; +} + +void ThriftConnection::init(const std::string& username, + const std::string& password, + bool enableRPCCompression, + const std::string& zoneId, + const std::string& version) { + std::shared_ptr socket(new TSocket(endPoint_.ip, endPoint_.port)); + socket->setConnTimeout(connectionTimeoutInMs_); + transport_ = std::make_shared(socket); + if (!transport_->isOpen()) { + try { + transport_->open(); + } + catch (TTransportException& e) { + throw IoTDBConnectionException(e.what()); + } + } + if (zoneId.empty()) { + initZoneId(); + } + else { + this->zoneId_ = zoneId; + } + + if (enableRPCCompression) { + std::shared_ptr protocol(new TCompactProtocol(transport_)); + client_ = std::make_shared(protocol); + } + else { + std::shared_ptr protocol(new TBinaryProtocol(transport_)); + client_ = std::make_shared(protocol); + } + + std::map configuration; + configuration["version"] = version; + TSOpenSessionReq openReq; + openReq.__set_username(username); + openReq.__set_password(password); + openReq.__set_zoneId(this->zoneId_); + openReq.__set_configuration(configuration); + try { + TSOpenSessionResp openResp; + client_->openSession(openResp, openReq); + RpcUtils::verifySuccess(openResp.status); + sessionId_ = openResp.sessionId; + statementId_ = client_->requestStatementId(sessionId_); + } + catch (const TTransportException& e) { + transport_->close(); + throw IoTDBConnectionException(e.what()); + } catch (const IoTDBException& e) { + transport_->close(); + throw IoTDBException(e.what()); + } catch (const std::exception& e) { + transport_->close(); + throw IoTDBException(e.what()); + } +} + +std::unique_ptr ThriftConnection::executeQueryStatement(const std::string& sql, int64_t timeoutInMs) { + TSExecuteStatementReq req; + req.__set_sessionId(sessionId_); + req.__set_statementId(statementId_); + req.__set_statement(sql); + req.__set_timeout(timeoutInMs); + TSExecuteStatementResp resp; + try { + client_->executeQueryStatementV2(resp, req); + RpcUtils::verifySuccess(resp.status); + } + catch (const TTransportException& e) { + throw IoTDBConnectionException(e.what()); + } catch (const IoTDBException& e) { + throw IoTDBConnectionException(e.what()); + } catch (const std::exception& e) { + throw IoTDBException(e.what()); + } + std::shared_ptr queryDataSet(new TSQueryDataSet(resp.queryDataSet)); + return std::unique_ptr(new SessionDataSet("", resp.columns, resp.dataTypeList, + resp.columnNameIndexMap, resp.queryId, statementId_, + client_, sessionId_, resp.queryResult, resp.ignoreTimeStamp, + connectionTimeoutInMs_, resp.moreData, fetchSize_, zoneId_)); +} + +void ThriftConnection::close() { + try { + if (client_) { + TSCloseSessionReq req; + req.__set_sessionId(sessionId_); + TSStatus tsStatus; + client_->closeSession(tsStatus, req); + } + } + catch (const TTransportException& e) { + throw IoTDBConnectionException(e.what()); + } catch (const std::exception& e) { + throw IoTDBConnectionException(e.what()); + } + + try { + if (transport_->isOpen()) { + transport_->close(); + } + } + catch (const std::exception& e) { + throw IoTDBConnectionException(e.what()); + } +} diff --git a/iotdb-client/client-cpp/src/main/ThriftConnection.h b/iotdb-client/client-cpp/src/main/ThriftConnection.h new file mode 100644 index 0000000000000..d578e37b1b705 --- /dev/null +++ b/iotdb-client/client-cpp/src/main/ThriftConnection.h @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +#ifndef IOTDB_THRIFTCONNECTION_H +#define IOTDB_THRIFTCONNECTION_H + +#include +#include +#include "IClientRPCService.h" + +class SessionDataSet; + +class ThriftConnection { +public: + static const int THRIFT_DEFAULT_BUFFER_SIZE; + static const int THRIFT_MAX_FRAME_SIZE; + static const int CONNECTION_TIMEOUT_IN_MS; + static const int DEFAULT_FETCH_SIZE; + + explicit ThriftConnection(const TEndPoint& endPoint, + int thriftDefaultBufferSize = THRIFT_DEFAULT_BUFFER_SIZE, + int thriftMaxFrameSize = THRIFT_MAX_FRAME_SIZE, + int connectionTimeoutInMs = CONNECTION_TIMEOUT_IN_MS, + int fetchSize = DEFAULT_FETCH_SIZE); + + ~ThriftConnection(); + + void init(const std::string& username, + const std::string& password, + bool enableRPCCompression = false, + const std::string& zoneId = std::string(), + const std::string& version = "V_1_0"); + + std::unique_ptr executeQueryStatement(const std::string& sql, int64_t timeoutInMs = -1); + + void close(); + +private: + TEndPoint endPoint_; + + int thriftDefaultBufferSize_; + int thriftMaxFrameSize_; + int connectionTimeoutInMs_; + int fetchSize_; + + std::shared_ptr transport_; + std::shared_ptr client_; + int64_t sessionId_{}; + int64_t statementId_{}; + std::string zoneId_; + int timeFactor_{}; + + void initZoneId(); +}; + +#endif diff --git a/iotdb-client/client-cpp/src/main/TsBlock.cpp b/iotdb-client/client-cpp/src/main/TsBlock.cpp new file mode 100644 index 0000000000000..7c2bac272a601 --- /dev/null +++ b/iotdb-client/client-cpp/src/main/TsBlock.cpp @@ -0,0 +1,109 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +#include +#include +#include "TsBlock.h" + +std::shared_ptr TsBlock::create(int32_t positionCount, + std::shared_ptr timeColumn, + std::vector> valueColumns) { + if (valueColumns.empty()) { + throw std::invalid_argument("valueColumns cannot be empty"); + } + return std::shared_ptr(new TsBlock(positionCount, std::move(timeColumn), std::move(valueColumns))); +} + +std::shared_ptr TsBlock::deserialize(const std::string& data) { + MyStringBuffer buffer(data); + + // Read value column count + int32_t valueColumnCount = buffer.getInt(); + + // Read value column data types + std::vector valueColumnDataTypes(valueColumnCount); + for (int32_t i = 0; i < valueColumnCount; i++) { + valueColumnDataTypes[i] = static_cast(buffer.getChar()); + } + + // Read position count + int32_t positionCount = buffer.getInt(); + + // Read column encodings + std::vector columnEncodings(valueColumnCount + 1); + for (int32_t i = 0; i < valueColumnCount + 1; i++) { + columnEncodings[i] = static_cast(buffer.getChar()); + } + + // Read time column + auto timeColumnDecoder = getColumnDecoder(columnEncodings[0]); + auto timeColumn = timeColumnDecoder->readColumn(buffer, TSDataType::INT64, positionCount); + + // Read value columns + std::vector> valueColumns(valueColumnCount); + for (int32_t i = 0; i < valueColumnCount; i++) { + auto valueColumnDecoder = getColumnDecoder(columnEncodings[i + 1]); + valueColumns[i] = valueColumnDecoder->readColumn(buffer, valueColumnDataTypes[i], positionCount); + } + + return create(positionCount, std::move(timeColumn), std::move(valueColumns)); +} + +TsBlock::TsBlock(int32_t positionCount, + std::shared_ptr timeColumn, + std::vector> valueColumns) + : positionCount_(positionCount), + timeColumn_(std::move(timeColumn)), + valueColumns_(std::move(valueColumns)) { +} + +int32_t TsBlock::getPositionCount() const { + return positionCount_; +} + +int64_t TsBlock::getStartTime() const { + return timeColumn_->getLong(0); +} + +int64_t TsBlock::getEndTime() const { + return timeColumn_->getLong(positionCount_ - 1); +} + +bool TsBlock::isEmpty() const { + return positionCount_ == 0; +} + +int64_t TsBlock::getTimeByIndex(int32_t index) const { + return timeColumn_->getLong(index); +} + +int32_t TsBlock::getValueColumnCount() const { + return static_cast(valueColumns_.size()); +} + +const std::shared_ptr TsBlock::getTimeColumn() const { + return timeColumn_; +} + +const std::vector>& TsBlock::getValueColumns() const { + return valueColumns_; +} + +const std::shared_ptr TsBlock::getColumn(int32_t columnIndex) const { + return valueColumns_[columnIndex]; +} diff --git a/iotdb-client/client-cpp/src/main/TsBlock.h b/iotdb-client/client-cpp/src/main/TsBlock.h new file mode 100644 index 0000000000000..2ca6b0197445d --- /dev/null +++ b/iotdb-client/client-cpp/src/main/TsBlock.h @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef IOTDB_TS_BLOCK_H +#define IOTDB_TS_BLOCK_H + +#include +#include +#include "Column.h" + +class TsBlock { +public: + static std::shared_ptr create(int32_t positionCount, + std::shared_ptr timeColumn, + std::vector> valueColumns); + + static std::shared_ptr deserialize(const std::string& data); + + int32_t getPositionCount() const; + int64_t getStartTime() const; + int64_t getEndTime() const; + bool isEmpty() const; + int64_t getTimeByIndex(int32_t index) const; + int32_t getValueColumnCount() const; + const std::shared_ptr getTimeColumn() const; + const std::vector>& getValueColumns() const; + const std::shared_ptr getColumn(int32_t columnIndex) const; + +private: + TsBlock(int32_t positionCount, + std::shared_ptr timeColumn, + std::vector> valueColumns); + + std::shared_ptr timeColumn_; + std::vector> valueColumns_; + int32_t positionCount_; +}; + +#endif diff --git a/iotdb-client/client-cpp/src/test/cpp/sessionIT.cpp b/iotdb-client/client-cpp/src/test/cpp/sessionIT.cpp index 8016a4e37b320..09154b868e47c 100644 --- a/iotdb-client/client-cpp/src/test/cpp/sessionIT.cpp +++ b/iotdb-client/client-cpp/src/test/cpp/sessionIT.cpp @@ -22,7 +22,7 @@ using namespace std; -extern Session *session; +extern std::shared_ptr session; static vector testTimeseries = {"root.test.d1.s1", "root.test.d1.s2", "root.test.d1.s3"}; @@ -85,18 +85,27 @@ TEST_CASE("Test insertRecord by string", "[testInsertRecord]") { session->executeNonQueryStatement("insert into root.test.d1(timestamp,s1, s2, s3) values(100, 1,2,3)"); - unique_ptr sessionDataSet = session->executeQueryStatement("select s1,s2,s3 from root.test.d1"); + + unique_ptr sessionDataSet = session->executeQueryStatement("select * from root.ln.设备"); + sessionDataSet->setFetchSize(1024); - int count = 0; while (sessionDataSet->hasNext()) { - long index = 1; - count++; - for (const Field &f: sessionDataSet->next()->fields) { - REQUIRE(f.longV == index); - index++; - } - } - REQUIRE(count == 101); + auto record = sessionDataSet->next(); + std::cout << record->toString() << std::endl; + } + + // unique_ptr sessionDataSet = session->executeQueryStatement("select s1,s2,s3 from root.test.d1"); + // sessionDataSet->setFetchSize(1024); + // int count = 0; + // while (sessionDataSet->hasNext()) { + // long index = 1; + // count++; + // for (const Field &f: sessionDataSet->next()->fields) { + // REQUIRE(f.longV == index); + // index++; + // } + // } + // REQUIRE(count == 101); } TEST_CASE("Test insertRecords ", "[testInsertRecords]") { @@ -176,6 +185,50 @@ TEST_CASE("Test insertRecord with types ", "[testTypedInsertRecord]") { REQUIRE(count == 100); } + +TEST_CASE("Test insertRecord with new datatypes ", "[testTypedInsertRecordNewDatatype]") { + CaseReporter cr("testTypedInsertRecordNewDatatype"); + vector timeseries = {"root.test.d1.s1", "root.test.d1.s2", "root.test.d1.s3", "root.test.d1.s4"}; + std::vector types = {TSDataType::TIMESTAMP, + TSDataType::DATE, TSDataType::BLOB, TSDataType::STRING}; + + for (size_t i = 0; i < timeseries.size(); i++) { + if (session->checkTimeseriesExists(timeseries[i])) { + session->deleteTimeseries(timeseries[i]); + } + session->createTimeseries(timeseries[i], types[i], TSEncoding::PLAIN, CompressionType::SNAPPY); + } + string deviceId = "root.test.d1"; + vector measurements = {"s1", "s2", "s3", "s4"}; + int64_t value1 = 20250507; + boost::gregorian::date value2 = boost::gregorian::date(2025, 5, 7); + string value3 = "20250507"; + string value4 = "20250507"; + + for (long time = 0; time < 100; time++) { + vector values = {(char *) (&value1), (char *) (&value2), + const_cast(value3.c_str()), const_cast(value4.c_str())}; + session->insertRecord(deviceId, time, measurements, types, values); + } + + unique_ptr sessionDataSet = session->executeQueryStatement("select s1,s2,s3,s4 from root.test.d1"); + sessionDataSet->setFetchSize(1024); + long count = 0; + while (sessionDataSet->hasNext()) { + auto record = sessionDataSet->next(); + REQUIRE(record->fields.size() == 4); + for (int i = 0; i < 4; i++) { + REQUIRE(types[i] == record->fields[i].dataType); + } + REQUIRE(record->fields[0].longV == value1); + REQUIRE(record->fields[1].dateV == value2); + REQUIRE(record->fields[2].stringV == value3); + REQUIRE(record->fields[3].stringV == value4); + count++; + } + REQUIRE(count == 100); +} + TEST_CASE("Test insertRecords with types ", "[testTypedInsertRecords]") { CaseReporter cr("testTypedInsertRecords"); vector timeseries = {"root.test.d1.s1", "root.test.d1.s2", "root.test.d1.s3"}; @@ -274,7 +327,7 @@ TEST_CASE("Test insertTablet ", "[testInsertTablet]") { int row = tablet.rowSize++; tablet.timestamps[row] = time; for (int64_t i = 0; i < 3; i++) { - tablet.addValue(i, row, &i); + tablet.addValue(i, row, i); } if (tablet.rowSize == tablet.maxRowNumber) { session->insertTablet(tablet); @@ -300,6 +353,116 @@ TEST_CASE("Test insertTablet ", "[testInsertTablet]") { REQUIRE(count == 100); } +TEST_CASE("Test insertTablets ", "[testInsertTablets]") { + CaseReporter cr("testInsertTablets"); + vector testTimeseries = {"root.test.d1.s1", "root.test.d1.s2", "root.test.d1.s3", + "root.test.d2.s1", "root.test.d2.s2", "root.test.d2.s3"}; + for (const string ×eries: testTimeseries) { + if (session->checkTimeseriesExists(timeseries)) { + session->deleteTimeseries(timeseries); + } + session->createTimeseries(timeseries, TSDataType::INT64, TSEncoding::RLE, CompressionType::SNAPPY); + } + vector> schemaList; + schemaList.emplace_back("s1", TSDataType::INT64); + schemaList.emplace_back("s2", TSDataType::INT64); + schemaList.emplace_back("s3", TSDataType::INT64); + + int maxRowNumber = 100; + vector deviceIds = {"root.test.d1", "root.test.d2"}; + vector tablets; + for (const auto& deviceId: deviceIds) { + tablets.emplace_back(deviceId, schemaList, maxRowNumber); + } + for (auto& tablet : tablets) { + for (int64_t time = 0; time < maxRowNumber; time++) { + int row = tablet.rowSize++; + tablet.timestamps[row] = time; + for (int64_t i = 0; i < 3; i++) { + tablet.addValue(i, row, i); + } + } + } + unordered_map tabletsMap; + for (auto& tablet : tablets) { + tabletsMap[tablet.deviceId] = &tablet; + } + session->insertTablets(tabletsMap); + + unique_ptr sessionDataSet = session->executeQueryStatement("select s1,s2,s3 from root.test.d2"); + sessionDataSet->setFetchSize(1024); + int count = 0; + while (sessionDataSet->hasNext()) { + long index = 0; + count++; + for (const Field& f: sessionDataSet->next()->fields) { + REQUIRE(f.longV == index); + index++; + } + } + REQUIRE(count == 100); +} + +TEST_CASE("Test insertTablet new datatype", "[testInsertTabletNewDatatype]") { + CaseReporter cr("testInsertTabletNewDatatype"); + string deviceId = "root.test.d2"; + vector> schemaList; + std::vector measurements = {"s1", "s2", "s3", "s4"}; + std::vector dataTypes = {TSDataType::TIMESTAMP, + TSDataType::DATE, TSDataType::BLOB, TSDataType::STRING}; + for (int i = 0; i < 4; i++) { + schemaList.emplace_back(measurements[i], dataTypes[i]); + } + + for (int i = 0; i < 4; i++) { + auto timeseries = deviceId + "." + measurements[i]; + if (session->checkTimeseriesExists(timeseries)) { + session->deleteTimeseries(timeseries); + } + session->createTimeseries(timeseries, dataTypes[i], TSEncoding::PLAIN, CompressionType::UNCOMPRESSED); + } + + int64_t s1Value = 20250507; + boost::gregorian::date s2Value(2025, 5, 7); + std::string s3Value("20250507"); + std::string s4Value("20250507"); + + Tablet tablet(deviceId, schemaList, 100); + for (int64_t time = 0; time < 100; time++) { + int row = tablet.rowSize++; + tablet.timestamps[row] = time; + tablet.addValue(0, row, s1Value); + tablet.addValue(1, row, s2Value); + tablet.addValue(2, row, s3Value); + tablet.addValue(3, row, s4Value); + if (tablet.rowSize == tablet.maxRowNumber) { + session->insertTablet(tablet); + tablet.reset(); + } + } + + if (tablet.rowSize != 0) { + session->insertTablet(tablet); + tablet.reset(); + } + unique_ptr sessionDataSet = session->executeQueryStatement("select s1,s2,s3,s4 from root.test.d2"); + sessionDataSet->setFetchSize(1024); + int count = 0; + while (sessionDataSet->hasNext()) { + auto record = sessionDataSet->next(); + REQUIRE(record->fields.size() == 4); + for (int i = 0; i < 4; i++) { + REQUIRE(dataTypes[i] == record->fields[i].dataType); + } + REQUIRE(record->fields[0].longV == s1Value); + REQUIRE(record->fields[1].dateV == s2Value); + REQUIRE(record->fields[2].stringV == s3Value); + REQUIRE(record->fields[3].stringV == s4Value); + count++; + } + REQUIRE(count == 100); +} + TEST_CASE("Test Last query ", "[testLastQuery]") { CaseReporter cr("testLastQuery"); prepareTimeseries(); @@ -349,12 +512,11 @@ TEST_CASE("Test Huge query ", "[testHugeQuery]") { unique_ptr sessionDataSet = session->executeQueryStatement("select s1,s2,s3 from root.test.d1"); sessionDataSet->setFetchSize(1024); - RowRecord* rowRecord; int count = 0; print_count = 0; std::cout << "\n\niterating " << total_count << " rows:" << std::endl; while (sessionDataSet->hasNext()) { - rowRecord = sessionDataSet->next(); + auto rowRecord = sessionDataSet->next(); REQUIRE(rowRecord->timestamp == count); REQUIRE(rowRecord->fields[0].longV== 1); REQUIRE(rowRecord->fields[1].longV == 2); @@ -426,7 +588,7 @@ TEST_CASE("Test executeRawDataQuery ", "[executeRawDataQuery]") { int ts = startTs; while (sessionDataSet->hasNext()) { - RowRecord *rowRecordPtr = sessionDataSet->next(); + auto rowRecordPtr = sessionDataSet->next(); //cout << rowRecordPtr->toString(); vector fields = rowRecordPtr->fields; @@ -458,14 +620,14 @@ TEST_CASE("Test executeRawDataQuery ", "[executeRawDataQuery]") { REQUIRE(columns[3] == paths[2]); ts = startTs; while (sessionDataSet->hasNext()) { - RowRecord *rowRecordPtr = sessionDataSet->next(); + auto rowRecordPtr = sessionDataSet->next(); cout << rowRecordPtr->toString(); vector fields = rowRecordPtr->fields; REQUIRE(rowRecordPtr->timestamp == ts); REQUIRE(fields[0].dataType == TSDataType::INT64); REQUIRE(fields[0].longV == 9); - REQUIRE(fields[1].dataType == TSDataType::NULLTYPE); + REQUIRE(fields[1].dataType == TSDataType::UNKNOWN); REQUIRE(fields[2].dataType == TSDataType::INT64); REQUIRE(fields[2].longV == 999); } @@ -528,7 +690,7 @@ TEST_CASE("Test executeLastDataQuery ", "[testExecuteLastDataQuery]") { int index = 0; while (sessionDataSet->hasNext()) { - RowRecord *rowRecordPtr = sessionDataSet->next(); + auto rowRecordPtr = sessionDataSet->next(); cout << rowRecordPtr->toString(); vector fields = rowRecordPtr->fields; @@ -550,7 +712,7 @@ TEST_CASE("Test executeLastDataQuery ", "[testExecuteLastDataQuery]") { index = 0; while (sessionDataSet->hasNext()) { - RowRecord *rowRecordPtr = sessionDataSet->next(); + auto rowRecordPtr = sessionDataSet->next(); cout << rowRecordPtr->toString(); vector fields = rowRecordPtr->fields; diff --git a/iotdb-client/client-cpp/src/test/main.cpp b/iotdb-client/client-cpp/src/test/main.cpp index 9476343384c5a..82b3e60ddb593 100644 --- a/iotdb-client/client-cpp/src/test/main.cpp +++ b/iotdb-client/client-cpp/src/test/main.cpp @@ -22,7 +22,7 @@ #include #include "Session.h" -Session *session = new Session("127.0.0.1", 6667, "root", "root"); +std::shared_ptr session = std::make_shared("127.0.0.1", 6667, "root", "root"); struct SessionListener : Catch::TestEventListenerBase { diff --git a/iotdb-client/client-py/SessionExample.py b/iotdb-client/client-py/SessionExample.py index c489fe7d03d14..ca610de9a0c24 100644 --- a/iotdb-client/client-py/SessionExample.py +++ b/iotdb-client/client-py/SessionExample.py @@ -18,7 +18,7 @@ # Uncomment the following line to use apache-iotdb module installed by pip3 import numpy as np - +from datetime import date from iotdb.Session import Session from iotdb.utils.BitMap import BitMap from iotdb.utils.IoTDBConstants import TSDataType, TSEncoding, Compressor @@ -360,6 +360,57 @@ while session_data_set.has_next(): print(session_data_set.next()) +# insert tablet with new data types +measurements_new_type = ["s_01", "s_02", "s_03", "s_04"] +data_types_new_type = [ + TSDataType.DATE, + TSDataType.TIMESTAMP, + TSDataType.BLOB, + TSDataType.STRING, +] +values_new_type = [ + [date(2024, 1, 1), 1, b"\x12\x34", "test01"], + [date(2024, 1, 2), 2, b"\x12\x34", "test02"], + [date(2024, 1, 3), 3, b"\x12\x34", "test03"], + [date(2024, 1, 4), 4, b"\x12\x34", "test04"], +] +timestamps_new_type = [1, 2, 3, 4] +tablet_new_type = Tablet( + "root.sg_test_01.d_04", + measurements_new_type, + data_types_new_type, + values_new_type, + timestamps_new_type, +) +session.insert_tablet(tablet_new_type) +np_values_new_type = [ + np.array([date(2024, 2, 4), date(2024, 3, 4), date(2024, 4, 4), date(2024, 5, 4)]), + np.array([5, 6, 7, 8], TSDataType.INT64.np_dtype()), + np.array([b"\x12\x34", b"\x12\x34", b"\x12\x34", b"\x12\x34"]), + np.array(["test01", "test02", "test03", "test04"]), +] +np_timestamps_new_type = np.array([5, 6, 7, 8], TSDataType.INT64.np_dtype()) +np_tablet_new_type = NumpyTablet( + "root.sg_test_01.d_04", + measurements_new_type, + data_types_new_type, + np_values_new_type, + np_timestamps_new_type, +) +session.insert_tablet(np_tablet_new_type) +with session.execute_query_statement( + "select s_01,s_02,s_03,s_04 from root.sg_test_01.d_04" +) as dataset: + print(dataset.get_column_names()) + while dataset.has_next(): + print(dataset.next()) + +with session.execute_query_statement( + "select s_01,s_02,s_03,s_04 from root.sg_test_01.d_04" +) as dataset: + df = dataset.todf() + print(df.to_string()) + # delete database session.delete_storage_group("root.sg_test_01") diff --git a/iotdb-client/client-py/iotdb/Session.py b/iotdb-client/client-py/iotdb/Session.py index 5aca06c2781df..cece2ae474ee6 100644 --- a/iotdb-client/client-py/iotdb/Session.py +++ b/iotdb-client/client-py/iotdb/Session.py @@ -18,6 +18,7 @@ import logging import random +import sys import struct import time import warnings @@ -58,6 +59,7 @@ TSLastDataQueryReq, TSInsertStringRecordsOfOneDeviceReq, ) +from .tsfile.utils.DateUtils import parse_date_to_int from .utils.IoTDBConnectionException import IoTDBConnectionException logger = logging.getLogger("IoTDB") @@ -83,6 +85,9 @@ def __init__( fetch_size=DEFAULT_FETCH_SIZE, zone_id=DEFAULT_ZONE_ID, enable_redirection=True, + use_ssl=False, + ca_certs=None, + connection_timeout_in_ms=None, ): self.__host = host self.__port = port @@ -103,6 +108,9 @@ def __init__( self.__enable_redirection = enable_redirection self.__device_id_to_endpoint = None self.__endpoint_to_connection = None + self.__use_ssl = use_ssl + self.__ca_certs = ca_certs + self.__connection_timeout_in_ms = connection_timeout_in_ms @classmethod def init_from_node_urls( @@ -113,11 +121,23 @@ def init_from_node_urls( fetch_size=DEFAULT_FETCH_SIZE, zone_id=DEFAULT_ZONE_ID, enable_redirection=True, + use_ssl=False, + ca_certs=None, + connection_timeout_in_ms=None, ): if node_urls is None: raise RuntimeError("node urls is empty") session = Session( - None, None, user, password, fetch_size, zone_id, enable_redirection + None, + None, + user, + password, + fetch_size, + zone_id, + enable_redirection, + use_ssl=use_ssl, + ca_certs=ca_certs, + connection_timeout_in_ms=connection_timeout_in_ms, ) session.__hosts = [] session.__ports = [] @@ -165,16 +185,7 @@ def open(self, enable_rpc_compression=False): } def init_connection(self, endpoint): - transport = TTransport.TFramedTransport( - TSocket.TSocket(endpoint.ip, endpoint.port) - ) - - if not transport.isOpen(): - try: - transport.open() - except TTransport.TTransportException as e: - raise IoTDBConnectionException(e) from None - + transport = self.__get_transport(endpoint) if self.__enable_rpc_compression: client = Client(TCompactProtocol.TCompactProtocolAccelerated(transport)) else: @@ -221,6 +232,33 @@ def init_connection(self, endpoint): self.__zone_id = self.get_time_zone() return SessionConnection(client, transport, session_id, statement_id) + def __get_transport(self, endpoint): + if self.__use_ssl: + import ssl + from thrift.transport import TSSLSocket + + if sys.version_info >= (3, 10): + context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) + else: + context = ssl.SSLContext(ssl.PROTOCOL_TLS) + context.verify_mode = ssl.CERT_REQUIRED + context.check_hostname = True + context.load_verify_locations(cafile=self.__ca_certs) + socket = TSSLSocket.TSSLSocket( + host=endpoint.ip, port=endpoint.port, ssl_context=context + ) + else: + socket = TSocket.TSocket(endpoint.ip, endpoint.port) + socket.setTimeout(self.__connection_timeout_in_ms) + transport = TTransport.TFramedTransport(socket) + + if not transport.isOpen(): + try: + transport.open() + except TTransport.TTransportException as e: + raise IoTDBConnectionException(e) from None + return transport + def is_open(self): return not self.__is_close @@ -1442,6 +1480,36 @@ def value_to_bytes(data_types, values): values_tobe_packed.append(b"\x05") values_tobe_packed.append(len(value_bytes)) values_tobe_packed.append(value_bytes) + # TIMESTAMP + elif data_type == 8: + format_str_list.append("cq") + values_tobe_packed.append(b"\x08") + values_tobe_packed.append(value) + # DATE + elif data_type == 9: + format_str_list.append("ci") + values_tobe_packed.append(b"\x09") + values_tobe_packed.append(parse_date_to_int(value)) + # BLOB + elif data_type == 10: + format_str_list.append("ci") + format_str_list.append(str(len(value))) + format_str_list.append("s") + values_tobe_packed.append(b"\x0a") + values_tobe_packed.append(len(value)) + values_tobe_packed.append(value) + # STRING + elif data_type == 11: + if isinstance(value, str): + value_bytes = bytes(value, "utf-8") + else: + value_bytes = value + format_str_list.append("ci") + format_str_list.append(str(len(value_bytes))) + format_str_list.append("s") + values_tobe_packed.append(b"\x0b") + values_tobe_packed.append(len(value_bytes)) + values_tobe_packed.append(value_bytes) else: raise RuntimeError("Unsupported data type:" + str(data_type)) format_str = "".join(format_str_list) diff --git a/iotdb-client/client-py/iotdb/SessionPool.py b/iotdb-client/client-py/iotdb/SessionPool.py index 6f1d758079691..85cd17946d6d9 100644 --- a/iotdb-client/client-py/iotdb/SessionPool.py +++ b/iotdb-client/client-py/iotdb/SessionPool.py @@ -42,6 +42,9 @@ def __init__( time_zone: str = DEFAULT_TIME_ZONE, max_retry: int = DEFAULT_MAX_RETRY, enable_compression: bool = False, + use_ssl: bool = False, + ca_certs: str = None, + connection_timeout_in_ms: int = None, ): self.host = host self.port = port @@ -58,6 +61,9 @@ def __init__( self.time_zone = time_zone self.max_retry = max_retry self.enable_compression = enable_compression + self.use_ssl = use_ssl + self.ca_certs = ca_certs + self.connection_timeout_in_ms = connection_timeout_in_ms class SessionPool(object): @@ -80,6 +86,9 @@ def __construct_session(self) -> Session: self.__pool_config.password, self.__pool_config.fetch_size, self.__pool_config.time_zone, + use_ssl=self.__pool_config.use_ssl, + ca_certs=self.__pool_config.ca_certs, + connection_timeout_in_ms=self.__pool_config.connection_timeout_in_ms, ) else: @@ -90,6 +99,9 @@ def __construct_session(self) -> Session: self.__pool_config.password, self.__pool_config.fetch_size, self.__pool_config.time_zone, + use_ssl=self.__pool_config.use_ssl, + ca_certs=self.__pool_config.ca_certs, + connection_timeout_in_ms=self.__pool_config.connection_timeout_in_ms, ) session.open(self.__pool_config.enable_compression) @@ -102,7 +114,6 @@ def __poll_session(self): return q def get_session(self) -> Session: - if self.__closed: raise ConnectionError("SessionPool has already been closed.") @@ -137,7 +148,6 @@ def get_session(self) -> Session: return session def put_back(self, session: Session): - if self.__closed: raise ConnectionError( "SessionPool has already been closed, please close the session manually." @@ -150,7 +160,6 @@ def put_back(self, session: Session): self.__pool_size -= 1 def close(self): - with self.__lock: while not self.__queue.empty(): session = self.__queue.get(block=False) diff --git a/iotdb-client/client-py/iotdb/tsfile/utils/DateUtils.py b/iotdb-client/client-py/iotdb/tsfile/utils/DateUtils.py new file mode 100644 index 0000000000000..4cafedea6183b --- /dev/null +++ b/iotdb-client/client-py/iotdb/tsfile/utils/DateUtils.py @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from datetime import date + + +class DateTimeParseException(Exception): + pass + + +def parse_int_to_date(date_int: int) -> date: + try: + year = date_int // 10000 + month = (date_int // 100) % 100 + day = date_int % 100 + return date(year, month, day) + except ValueError as e: + raise DateTimeParseException("Invalid date format.") from e + + +def parse_date_to_int(local_date: date) -> int: + if local_date is None: + raise DateTimeParseException("Date expression is none or empty.") + if local_date.year < 1000: + raise DateTimeParseException("Year must be between 1000 and 9999.") + return local_date.year * 10000 + local_date.month * 100 + local_date.day diff --git a/iotdb-client/client-py/iotdb/utils/Field.py b/iotdb-client/client-py/iotdb/utils/Field.py index 913c7594bf59c..b27313f5a9080 100644 --- a/iotdb-client/client-py/iotdb/utils/Field.py +++ b/iotdb-client/client-py/iotdb/utils/Field.py @@ -17,7 +17,8 @@ # # for package -from .IoTDBConstants import TSDataType +from iotdb.utils.IoTDBConstants import TSDataType +from iotdb.tsfile.utils.DateUtils import parse_int_to_date import numpy as np import pandas as pd @@ -36,15 +37,25 @@ def copy(field): if output.get_data_type() is not None: if output.get_data_type() == TSDataType.BOOLEAN: output.set_bool_value(field.get_bool_value()) - elif output.get_data_type() == TSDataType.INT32: + elif ( + output.get_data_type() == TSDataType.INT32 + or output.get_data_type() == TSDataType.DATE + ): output.set_int_value(field.get_int_value()) - elif output.get_data_type() == TSDataType.INT64: + elif ( + output.get_data_type() == TSDataType.INT64 + or output.get_data_type() == TSDataType.TIMESTAMP + ): output.set_long_value(field.get_long_value()) elif output.get_data_type() == TSDataType.FLOAT: output.set_float_value(field.get_float_value()) elif output.get_data_type() == TSDataType.DOUBLE: output.set_double_value(field.get_double_value()) - elif output.get_data_type() == TSDataType.TEXT: + elif ( + output.get_data_type() == TSDataType.TEXT + or output.get_data_type() == TSDataType.STRING + or output.get_data_type() == TSDataType.BLOB + ): output.set_binary_value(field.get_binary_value()) else: raise Exception( @@ -64,6 +75,7 @@ def set_bool_value(self, value: bool): def get_bool_value(self): if self.__data_type is None: raise Exception("Null Field Exception!") + if ( self.__data_type != TSDataType.BOOLEAN or self.value is None @@ -80,6 +92,7 @@ def get_int_value(self): raise Exception("Null Field Exception!") if ( self.__data_type != TSDataType.INT32 + and self.__data_type != TSDataType.DATE or self.value is None or self.value is pd.NA ): @@ -94,6 +107,7 @@ def get_long_value(self): raise Exception("Null Field Exception!") if ( self.__data_type != TSDataType.INT64 + and self.__data_type != TSDataType.TIMESTAMP or self.value is None or self.value is pd.NA ): @@ -136,17 +150,34 @@ def get_binary_value(self): raise Exception("Null Field Exception!") if ( self.__data_type != TSDataType.TEXT + and self.__data_type != TSDataType.STRING + and self.__data_type != TSDataType.BLOB or self.value is None or self.value is pd.NA ): return None return self.value + def get_date_value(self): + if self.__data_type is None: + raise Exception("Null Field Exception!") + if ( + self.__data_type != TSDataType.DATE + or self.value is None + or self.value is pd.NA + ): + return None + return parse_int_to_date(self.value) + def get_string_value(self): if self.__data_type is None or self.value is None or self.value is pd.NA: return "None" - elif self.__data_type == 5: + # TEXT, STRING + elif self.__data_type == 5 or self.__data_type == 11: return self.value.decode("utf-8") + # BLOB + elif self.__data_type == 10: + return str(hex(int.from_bytes(self.value, byteorder="big"))) else: return str(self.get_object_value(self.__data_type)) @@ -163,13 +194,18 @@ def get_object_value(self, data_type): return bool(self.value) elif data_type == 1: return np.int32(self.value) - elif data_type == 2: + elif data_type == 2 or data_type == 8: return np.int64(self.value) elif data_type == 3: return np.float32(self.value) elif data_type == 4: return np.float64(self.value) - return self.value + elif data_type == 9: + return parse_int_to_date(self.value) + elif data_type == 5 or data_type == 10 or data_type == 11: + return self.value + else: + raise RuntimeError("Unsupported data type:" + str(data_type)) @staticmethod def get_field(value, data_type): diff --git a/iotdb-client/client-py/iotdb/utils/IoTDBConstants.py b/iotdb-client/client-py/iotdb/utils/IoTDBConstants.py index 9b671663ff4fa..4b9082b5353fd 100644 --- a/iotdb-client/client-py/iotdb/utils/IoTDBConstants.py +++ b/iotdb-client/client-py/iotdb/utils/IoTDBConstants.py @@ -15,6 +15,7 @@ # specific language governing permissions and limitations # under the License. # +from datetime import date from enum import unique, IntEnum import numpy as np @@ -27,6 +28,10 @@ class TSDataType(IntEnum): FLOAT = 3 DOUBLE = 4 TEXT = 5 + TIMESTAMP = 8 + DATE = 9 + BLOB = 10 + STRING = 11 def np_dtype(self): return { @@ -35,7 +40,11 @@ def np_dtype(self): TSDataType.DOUBLE: np.dtype(">f8"), TSDataType.INT32: np.dtype(">i4"), TSDataType.INT64: np.dtype(">i8"), - TSDataType.TEXT: np.dtype("str"), + TSDataType.TEXT: str, + TSDataType.TIMESTAMP: np.dtype(">i8"), + TSDataType.DATE: date, + TSDataType.BLOB: bytes, + TSDataType.STRING: str, }[self] diff --git a/iotdb-client/client-py/iotdb/utils/IoTDBRpcDataSet.py b/iotdb-client/client-py/iotdb/utils/IoTDBRpcDataSet.py index 3151b2acfd8c5..2249e1c7c0d46 100644 --- a/iotdb-client/client-py/iotdb/utils/IoTDBRpcDataSet.py +++ b/iotdb-client/client-py/iotdb/utils/IoTDBRpcDataSet.py @@ -24,6 +24,8 @@ import pandas as pd from thrift.transport import TTransport from iotdb.thrift.rpc.IClientRPCService import TSFetchResultsReq, TSCloseOperationReq +from iotdb.tsfile.utils.DateUtils import parse_int_to_date +from iotdb.utils.IoTDBConnectionException import IoTDBConnectionException from iotdb.utils.IoTDBConstants import TSDataType logger = logging.getLogger("IoTDB") @@ -168,25 +170,31 @@ def construct_one_data_frame(self): data_type = self.column_type_deduplicated_list[location] value_buffer = self.__query_data_set.valueList[location] value_buffer_len = len(value_buffer) + # DOUBLE if data_type == 4: data_array = np.frombuffer( value_buffer, np.dtype(np.double).newbyteorder(">") ) + # FLOAT elif data_type == 3: data_array = np.frombuffer( value_buffer, np.dtype(np.float32).newbyteorder(">") ) + # BOOLEAN elif data_type == 0: data_array = np.frombuffer(value_buffer, np.dtype("?")) - elif data_type == 1: + # INT32, DATE + elif data_type == 1 or data_type == 9: data_array = np.frombuffer( value_buffer, np.dtype(np.int32).newbyteorder(">") ) - elif data_type == 2: + # INT64, TIMESTAMP + elif data_type == 2 or data_type == 8: data_array = np.frombuffer( value_buffer, np.dtype(np.int64).newbyteorder(">") ) - elif data_type == 5: + # TEXT, STRING, BLOB + elif data_type == 5 or data_type == 11 or data_type == 10: j = 0 offset = 0 data_array = [] @@ -208,13 +216,8 @@ def construct_one_data_frame(self): data_array = data_array.byteswap().view( data_array.dtype.newbyteorder("<") ) - # self.__query_data_set.valueList[location] = None if len(data_array) < total_length: - # INT32 or INT64 or boolean - if data_type == 0 or data_type == 1 or data_type == 2: - tmp_array = np.full(total_length, np.nan, np.float32) - else: - tmp_array = np.full(total_length, None, dtype=object) + tmp_array = np.full(total_length, None, dtype=object) bitmap_buffer = self.__query_data_set.bitmapList[location] buffer = _to_bitbuffer(bitmap_buffer) @@ -223,10 +226,13 @@ def construct_one_data_frame(self): bit_mask = bit_mask[:total_length] tmp_array[bit_mask] = data_array - if data_type == 1: + # INT32, DATE + if data_type == 1 or data_type == 9: tmp_array = pd.Series(tmp_array, dtype="Int32") - elif data_type == 2: + # INT64, TIMESTAMP + elif data_type == 2 or data_type == 8: tmp_array = pd.Series(tmp_array, dtype="Int64") + # BOOLEAN elif data_type == 0: tmp_array = pd.Series(tmp_array, dtype="boolean") data_array = tmp_array @@ -251,7 +257,7 @@ def _has_next_result_set(self): return True return False - def resultset_to_pandas(self): + def result_set_to_pandas(self): result = {} for column_name in self.__column_name_list: result[column_name] = [] @@ -283,25 +289,31 @@ def resultset_to_pandas(self): data_type = self.column_type_deduplicated_list[location] value_buffer = self.__query_data_set.valueList[location] value_buffer_len = len(value_buffer) + # DOUBLE if data_type == 4: data_array = np.frombuffer( value_buffer, np.dtype(np.double).newbyteorder(">") ) + # FLOAT elif data_type == 3: data_array = np.frombuffer( value_buffer, np.dtype(np.float32).newbyteorder(">") ) + # BOOLEAN elif data_type == 0: data_array = np.frombuffer(value_buffer, np.dtype("?")) + # INT32 elif data_type == 1: data_array = np.frombuffer( value_buffer, np.dtype(np.int32).newbyteorder(">") ) - elif data_type == 2: + # INT64, TIMESTAMP + elif data_type == 2 or data_type == 8: data_array = np.frombuffer( value_buffer, np.dtype(np.int64).newbyteorder(">") ) - elif data_type == 5: + # TEXT, STRING + elif data_type == 5 or data_type == 11: j = 0 offset = 0 data_array = [] @@ -317,7 +329,30 @@ def resultset_to_pandas(self): data_array.append(value) j += 1 offset += length - data_array = np.array(data_array, dtype=object) + data_array = pd.Series(data_array).astype(str) + # BLOB + elif data_type == 10: + j = 0 + offset = 0 + data_array = [] + while offset < value_buffer_len: + length = int.from_bytes( + value_buffer[offset : offset + 4], + byteorder="big", + signed=False, + ) + offset += 4 + value = value_buffer[offset : offset + length] + data_array.append(value) + j += 1 + offset += length + data_array = pd.Series(data_array) + # DATE + elif data_type == 9: + data_array = np.frombuffer( + value_buffer, np.dtype(np.int32).newbyteorder(">") + ) + data_array = pd.Series(data_array).apply(parse_int_to_date) else: raise RuntimeError("unsupported data type {}.".format(data_type)) if data_array.dtype.byteorder == ">": @@ -327,13 +362,26 @@ def resultset_to_pandas(self): self.__query_data_set.valueList[location] = None tmp_array = [] if len(data_array) < total_length: - if data_type == 1 or data_type == 2: - tmp_array = np.full(total_length, np.nan, np.float32) + # BOOLEAN, INT32, INT64, TIMESTAMP + if ( + data_type == 0 + or data_type == 1 + or data_type == 2 + or data_type == 8 + ): + tmp_array = np.full(total_length, np.nan, dtype=np.float32) + # FLOAT, DOUBLE elif data_type == 3 or data_type == 4: - tmp_array = np.full(total_length, np.nan, data_array.dtype) - elif data_type == 0: - tmp_array = np.full(total_length, np.nan, np.float32) - elif data_type == 5: + tmp_array = np.full( + total_length, np.nan, dtype=data_array.dtype + ) + # TEXT, STRING, BLOB, DATE + elif ( + data_type == 5 + or data_type == 11 + or data_type == 10 + or data_type == 9 + ): tmp_array = np.full(total_length, None, dtype=data_array.dtype) bitmap_buffer = self.__query_data_set.bitmapList[location] @@ -370,6 +418,8 @@ def resultset_to_pandas(self): return df def fetch_results(self): + if self.__is_closed: + raise IoTDBConnectionException("This DataSet is already closed") self.__rows_index = 0 request = TSFetchResultsReq( self.__session_id, diff --git a/iotdb-client/client-py/iotdb/utils/NumpyTablet.py b/iotdb-client/client-py/iotdb/utils/NumpyTablet.py index 4577f7f880c42..8602ed41e2369 100644 --- a/iotdb-client/client-py/iotdb/utils/NumpyTablet.py +++ b/iotdb-client/client-py/iotdb/utils/NumpyTablet.py @@ -17,6 +17,10 @@ # import struct + +import numpy as np + +from iotdb.tsfile.utils.DateUtils import parse_date_to_int from iotdb.utils.IoTDBConstants import TSDataType from iotdb.utils.BitMap import BitMap @@ -38,7 +42,7 @@ def __init__( :param measurements: List, sensors :param data_types: TSDataType List, specify value types for sensors :param values: List of numpy array, the values of each column should be the inner numpy array - :param timestamps: Numpy array, the timestamps + :param timestamps: ndarray, the timestamps """ if len(values) > 0 and len(values[0]) != len(timestamps): raise RuntimeError( @@ -102,12 +106,22 @@ def get_binary_values(self): bs_len = 0 bs_list = [] for data_type, value in zip(self.__data_types, self.__values): - # TEXT - if data_type == 5: + # BOOLEAN, INT32, INT64, FLOAT, DOUBLE, TIMESTAMP + if ( + data_type == 0 + or data_type == 1 + or data_type == 2 + or data_type == 3 + or data_type == 4 + or data_type == 8 + ): + bs = value.tobytes() + # TEXT, STRING, BLOB + elif data_type == 5 or data_type == 11 or data_type == 10: format_str_list = [">"] values_tobe_packed = [] for str_list in value: - # Fot TEXT, it's same as the original solution + # For TEXT, it's same as the original solution if isinstance(str_list, str): value_bytes = bytes(str_list, "utf-8") else: @@ -119,11 +133,18 @@ def get_binary_values(self): values_tobe_packed.append(value_bytes) format_str = "".join(format_str_list) bs = struct.pack(format_str, *values_tobe_packed) - # Non-TEXT + # DATE + elif data_type == 9: + bs = ( + np.vectorize(parse_date_to_int)(value) + .astype(np.dtype(">i4")) + .tobytes() + ) else: - bs = value.tobytes() + raise RuntimeError("Unsupported data type:" + str(data_type)) bs_list.append(bs) bs_len += len(bs) + if self.bitmaps is not None: format_str_list = [">"] values_tobe_packed = [] diff --git a/iotdb-client/client-py/iotdb/utils/RowRecord.py b/iotdb-client/client-py/iotdb/utils/RowRecord.py index 16a88f1edf9b1..91bcebf948b0d 100644 --- a/iotdb-client/client-py/iotdb/utils/RowRecord.py +++ b/iotdb-client/client-py/iotdb/utils/RowRecord.py @@ -17,15 +17,11 @@ # # for package -from .Field import Field - -# for debug -# from IoTDBConstants import TSDataType -# from Field import Field +from iotdb.utils.Field import Field class RowRecord(object): - def __init__(self, timestamp, field_list=None): + def __init__(self, timestamp, field_list: list = None): self.__timestamp = timestamp self.__field_list = field_list diff --git a/iotdb-client/client-py/iotdb/utils/SessionDataSet.py b/iotdb-client/client-py/iotdb/utils/SessionDataSet.py index a7ce7dfc40cb3..f4d63035efc5e 100644 --- a/iotdb-client/client-py/iotdb/utils/SessionDataSet.py +++ b/iotdb-client/client-py/iotdb/utils/SessionDataSet.py @@ -124,29 +124,33 @@ def construct_row_record_from_data_frame(self): def close_operation_handle(self): self.iotdb_rpc_data_set.close() - def todf(self): - return resultset_to_pandas(self) + def todf(self) -> pd.DataFrame: + return result_set_to_pandas(self) -def resultset_to_pandas(result_set: SessionDataSet) -> pd.DataFrame: +def result_set_to_pandas(result_set: SessionDataSet) -> pd.DataFrame: """ Transforms a SessionDataSet from IoTDB to a Pandas Data Frame Each Field from IoTDB is a column in Pandas :param result_set: :return: """ - return result_set.iotdb_rpc_data_set.resultset_to_pandas() + return result_set.iotdb_rpc_data_set.result_set_to_pandas() def get_typed_point(field: Field, none_value=None): choices = { # In Case of Boolean, cast to 0 / 1 - TSDataType.BOOLEAN: lambda field: 1 if field.get_bool_value() else 0, - TSDataType.TEXT: lambda field: field.get_string_value(), - TSDataType.FLOAT: lambda field: field.get_float_value(), - TSDataType.INT32: lambda field: field.get_int_value(), - TSDataType.DOUBLE: lambda field: field.get_double_value(), - TSDataType.INT64: lambda field: field.get_long_value(), + TSDataType.BOOLEAN: lambda f: 1 if f.get_bool_value() else 0, + TSDataType.TEXT: lambda f: f.get_string_value(), + TSDataType.FLOAT: lambda f: f.get_float_value(), + TSDataType.INT32: lambda f: f.get_int_value(), + TSDataType.DOUBLE: lambda f: f.get_double_value(), + TSDataType.INT64: lambda f: f.get_long_value(), + TSDataType.TIMESTAMP: lambda f: f.get_long_value(), + TSDataType.STRING: lambda f: f.get_string_value(), + TSDataType.DATE: lambda f: f.get_date_value(), + TSDataType.BLOB: lambda f: f.get_binary_value(), } result_next_type: TSDataType = field.get_data_type() diff --git a/iotdb-client/client-py/iotdb/utils/Tablet.py b/iotdb-client/client-py/iotdb/utils/Tablet.py index 508361dc85748..0cba41d4bdd9e 100644 --- a/iotdb-client/client-py/iotdb/utils/Tablet.py +++ b/iotdb-client/client-py/iotdb/utils/Tablet.py @@ -18,6 +18,9 @@ import struct +from typing import List, Union + +from iotdb.tsfile.utils.DateUtils import parse_date_to_int from iotdb.utils.BitMap import BitMap @@ -89,13 +92,14 @@ def get_binary_timestamps(self): def get_binary_values(self): format_str_list = [">"] values_tobe_packed = [] - bitmaps = [] + bitmaps: List[Union[BitMap, None]] = [] has_none = False for i in range(self.__column_number): bitmap = None bitmaps.append(bitmap) - data_type_value = self.__data_types[i] - if data_type_value == 0: + data_type = self.__data_types[i] + # BOOLEAN + if data_type == 0: format_str_list.append(str(self.__row_number)) format_str_list.append("?") for j in range(self.__row_number): @@ -105,8 +109,8 @@ def get_binary_values(self): values_tobe_packed.append(False) self.__mark_none_value(bitmaps, i, j) has_none = True - - elif data_type_value == 1: + # INT32 + elif data_type == 1: format_str_list.append(str(self.__row_number)) format_str_list.append("i") for j in range(self.__row_number): @@ -116,8 +120,8 @@ def get_binary_values(self): values_tobe_packed.append(0) self.__mark_none_value(bitmaps, i, j) has_none = True - - elif data_type_value == 2: + # INT64 or TIMESTAMP + elif data_type == 2 or data_type == 8: format_str_list.append(str(self.__row_number)) format_str_list.append("q") for j in range(self.__row_number): @@ -127,8 +131,8 @@ def get_binary_values(self): values_tobe_packed.append(0) self.__mark_none_value(bitmaps, i, j) has_none = True - - elif data_type_value == 3: + # FLOAT + elif data_type == 3: format_str_list.append(str(self.__row_number)) format_str_list.append("f") for j in range(self.__row_number): @@ -138,8 +142,8 @@ def get_binary_values(self): values_tobe_packed.append(0) self.__mark_none_value(bitmaps, i, j) has_none = True - - elif data_type_value == 4: + # DOUBLE + elif data_type == 4: format_str_list.append(str(self.__row_number)) format_str_list.append("d") for j in range(self.__row_number): @@ -149,8 +153,8 @@ def get_binary_values(self): values_tobe_packed.append(0) self.__mark_none_value(bitmaps, i, j) has_none = True - - elif data_type_value == 5: + # TEXT, STRING, BLOB + elif data_type == 5 or data_type == 11 or data_type == 10: for j in range(self.__row_number): if self.__values[j][i] is not None: if isinstance(self.__values[j][i], str): @@ -171,7 +175,19 @@ def get_binary_values(self): values_tobe_packed.append(value_bytes) self.__mark_none_value(bitmaps, i, j) has_none = True - + # DATE + elif data_type == 9: + format_str_list.append(str(self.__row_number)) + format_str_list.append("i") + for j in range(self.__row_number): + if self.__values[j][i] is not None: + values_tobe_packed.append( + parse_date_to_int(self.__values[j][i]) + ) + else: + values_tobe_packed.append(0) + self.__mark_none_value(bitmaps, i, j) + has_none = True else: raise RuntimeError("Unsupported data type:" + str(self.__data_types[i])) diff --git a/iotdb-client/client-py/pom.xml b/iotdb-client/client-py/pom.xml index 0d942986ac8ab..84a5ac73d9077 100644 --- a/iotdb-client/client-py/pom.xml +++ b/iotdb-client/client-py/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-client - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT iotdb-python-api IoTDB: Client: Python-API @@ -34,13 +34,13 @@ org.apache.iotdb iotdb-thrift-commons - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT provided org.apache.iotdb iotdb-thrift - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT provided @@ -151,7 +151,7 @@ copy-resources - generate-sources + process-resources utf-8 ${basedir}/ diff --git a/iotdb-client/client-py/release.sh b/iotdb-client/client-py/release.sh index 692047b8b57b6..60e466eea7e7c 100755 --- a/iotdb-client/client-py/release.sh +++ b/iotdb-client/client-py/release.sh @@ -25,6 +25,7 @@ python3 --version rm -Rf build rm -Rf dist rm -Rf iotdb_session.egg_info +rm -f pyproject.toml # (Re-)build generated code (cd ../..; mvn clean package -pl iotdb-client/client-py -am) diff --git a/iotdb-client/client-py/tests/integration/test_new_data_types.py b/iotdb-client/client-py/tests/integration/test_new_data_types.py new file mode 100644 index 0000000000000..ce89b5b831652 --- /dev/null +++ b/iotdb-client/client-py/tests/integration/test_new_data_types.py @@ -0,0 +1,184 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +from datetime import date + +import numpy as np + +from iotdb.Session import Session +from iotdb.SessionPool import PoolConfig, create_session_pool +from iotdb.utils.IoTDBConstants import TSDataType +from iotdb.utils.NumpyTablet import NumpyTablet +from iotdb.utils.Tablet import Tablet +from iotdb.IoTDBContainer import IoTDBContainer + + +def test_session(): + session_test() + + +def test_session_pool(): + session_test(True) + + +def session_test(use_session_pool=False): + with IoTDBContainer("iotdb:dev") as db: + db: IoTDBContainer + + if use_session_pool: + pool_config = PoolConfig( + db.get_container_host_ip(), + db.get_exposed_port(6667), + "root", + "root", + None, + 1024, + "Asia/Shanghai", + 3, + ) + session_pool = create_session_pool(pool_config, 1, 3000) + session = session_pool.get_session() + else: + session = Session(db.get_container_host_ip(), db.get_exposed_port(6667)) + session.open(False) + + if not session.is_open(): + print("can't open session") + exit(1) + + device_id = "root.sg_test_01.d_04" + measurements_new_type = ["s_01", "s_02", "s_03", "s_04"] + data_types_new_type = [ + TSDataType.DATE, + TSDataType.TIMESTAMP, + TSDataType.BLOB, + TSDataType.STRING, + ] + values_new_type = [ + [date(2024, 1, 1), 1, b"\x12\x34", "test01"], + [date(2024, 1, 2), 2, b"\x12\x34", "test02"], + [date(2024, 1, 3), 3, b"\x12\x34", "test03"], + [date(2024, 1, 4), 4, b"\x12\x34", "test04"], + ] + timestamps_new_type = [1, 2, 3, 4] + tablet_new_type = Tablet( + device_id, + measurements_new_type, + data_types_new_type, + values_new_type, + timestamps_new_type, + ) + session.insert_tablet(tablet_new_type) + np_values_new_type = [ + np.array( + [date(2024, 1, 5), date(2024, 1, 6), date(2024, 1, 7), date(2024, 1, 8)] + ), + np.array([5, 6, 7, 8], TSDataType.INT64.np_dtype()), + np.array([b"\x12\x34", b"\x12\x34", b"\x12\x34", b"\x12\x34"]), + np.array(["test05", "test06", "test07", "test08"]), + ] + np_timestamps_new_type = np.array([5, 6, 7, 8], TSDataType.INT64.np_dtype()) + np_tablet_new_type = NumpyTablet( + device_id, + measurements_new_type, + data_types_new_type, + np_values_new_type, + np_timestamps_new_type, + ) + session.insert_tablet(np_tablet_new_type) + session.insert_records( + [device_id, device_id], + [9, 10], + [measurements_new_type, measurements_new_type], + [data_types_new_type, data_types_new_type], + [ + [date(2024, 1, 9), 9, b"\x12\x34", "test09"], + [date(2024, 1, 10), 10, b"\x12\x34", "test010"], + ], + ) + + with session.execute_query_statement( + "select s_01,s_02,s_03,s_04 from root.sg_test_01.d_04" + ) as dataset: + print(dataset.get_column_names()) + while dataset.has_next(): + print(dataset.next()) + + with session.execute_query_statement( + "select s_01,s_02,s_03,s_04 from root.sg_test_01.d_04" + ) as dataset: + df = dataset.todf() + print(df.to_string()) + + with session.execute_query_statement( + "select s_01,s_02,s_03,s_04 from root.sg_test_01.d_04" + ) as dataset: + cnt = 0 + while dataset.has_next(): + row_record = dataset.next() + timestamp = row_record.get_timestamp() + assert row_record.get_fields()[0].get_date_value() == date( + 2024, 1, timestamp + ) + assert ( + row_record.get_fields()[1].get_object_value(TSDataType.TIMESTAMP) + == timestamp + ) + assert row_record.get_fields()[2].get_binary_value() == b"\x12\x34" + assert row_record.get_fields()[3].get_string_value() == "test0" + str( + timestamp + ) + cnt += 1 + assert cnt == 10 + + with session.execute_query_statement( + "select s_01,s_02,s_03,s_04 from root.sg_test_01.d_04" + ) as dataset: + df = dataset.todf() + rows, columns = df.shape + assert rows == 10 + assert columns == 5 + + session.insert_records( + [device_id, device_id], + [11, 12], + [measurements_new_type, ["s_02", "s_03", "s_04"]], + [ + data_types_new_type, + [ + TSDataType.TIMESTAMP, + TSDataType.BLOB, + TSDataType.STRING, + ], + ], + [ + [date(1971, 1, 1), 11, b"\x12\x34", "test11"], + [12, b"\x12\x34", "test12"], + ], + ) + + with session.execute_query_statement( + "select s_01,s_02,s_03,s_04 from root.sg_test_01.d_04 where time > 10" + ) as dataset: + cnt = 0 + while dataset.has_next(): + cnt += 1 + print(dataset.next()) + assert cnt == 2 + + # close session connection. + session.close() diff --git a/iotdb-client/isession/pom.xml b/iotdb-client/isession/pom.xml index ed15ef7d98feb..9fee5aac7f11a 100644 --- a/iotdb-client/isession/pom.xml +++ b/iotdb-client/isession/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-client - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT isession IoTDB: Client: isession @@ -32,7 +32,7 @@ org.apache.iotdb service-rpc - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.tsfile @@ -47,12 +47,12 @@ org.apache.iotdb iotdb-thrift - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-thrift-commons - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.thrift diff --git a/iotdb-client/isession/src/main/java/org/apache/iotdb/isession/SessionDataSet.java b/iotdb-client/isession/src/main/java/org/apache/iotdb/isession/SessionDataSet.java index 4b3d837c07ed5..822c464ddcadf 100644 --- a/iotdb-client/isession/src/main/java/org/apache/iotdb/isession/SessionDataSet.java +++ b/iotdb-client/isession/src/main/java/org/apache/iotdb/isession/SessionDataSet.java @@ -29,10 +29,12 @@ import org.apache.tsfile.enums.TSDataType; import org.apache.tsfile.read.common.Field; import org.apache.tsfile.read.common.RowRecord; +import org.apache.tsfile.utils.Binary; import org.apache.tsfile.write.UnSupportedDataTypeException; import java.nio.ByteBuffer; import java.sql.Timestamp; +import java.time.LocalDate; import java.time.ZoneId; import java.util.ArrayList; import java.util.List; @@ -347,6 +349,22 @@ public Timestamp getTimestamp(String columnName) throws StatementExecutionExcept return ioTDBRpcDataSet.getTimestamp(columnName); } + public LocalDate getDate(int columnIndex) throws StatementExecutionException { + return ioTDBRpcDataSet.getDate(columnIndex); + } + + public LocalDate getDate(String columnName) throws StatementExecutionException { + return ioTDBRpcDataSet.getDate(columnName); + } + + public Binary getBlob(int columnIndex) throws StatementExecutionException { + return ioTDBRpcDataSet.getBinary(columnIndex); + } + + public Binary getBlob(String columnName) throws StatementExecutionException { + return ioTDBRpcDataSet.getBinary(columnName); + } + public int findColumn(String columnName) { return ioTDBRpcDataSet.findColumn(columnName); } diff --git a/iotdb-client/jdbc/pom.xml b/iotdb-client/jdbc/pom.xml index 34634e26d1d21..83653859d36c7 100644 --- a/iotdb-client/jdbc/pom.xml +++ b/iotdb-client/jdbc/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-client - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT iotdb-jdbc IoTDB: Client: Jdbc @@ -38,12 +38,12 @@ org.apache.iotdb service-rpc - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-thrift-commons - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.tsfile @@ -58,7 +58,7 @@ org.apache.iotdb iotdb-thrift - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.thrift diff --git a/iotdb-client/jdbc/src/main/feature/feature.xml b/iotdb-client/jdbc/src/main/feature/feature.xml index 8f37dae5f8c1f..033fe46d47c6c 100644 --- a/iotdb-client/jdbc/src/main/feature/feature.xml +++ b/iotdb-client/jdbc/src/main/feature/feature.xml @@ -18,7 +18,7 @@ --> - +
Feature to install required Bundle to use IoTDB inside Karaf container
wrap scr diff --git a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/GroupedLSBWatermarkEncoder.java b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/GroupedLSBWatermarkEncoder.java index 6ec6a9b5baf10..d1d830fb2c2cd 100644 --- a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/GroupedLSBWatermarkEncoder.java +++ b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/GroupedLSBWatermarkEncoder.java @@ -134,6 +134,10 @@ public RowRecord encodeRecord(RowRecord rowRecord) { double originDoubleValue = field.getDoubleV(); field.setDoubleV(encodeDouble(originDoubleValue, timestamp)); break; + case BLOB: + case STRING: + case BOOLEAN: + case TEXT: default: } } diff --git a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBDataSourceFactory.java b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBDataSourceFactory.java index 81259278dc8e9..8e601d5df6fa5 100644 --- a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBDataSourceFactory.java +++ b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBDataSourceFactory.java @@ -45,16 +45,13 @@ public void setProperties(IoTDBDataSource ds, Properties prop) { String url = (String) properties.remove(DataSourceFactory.JDBC_URL); if (url != null) { ds.setUrl(url); - logger.info("URL set {}", url); } String user = (String) properties.remove(DataSourceFactory.JDBC_USER); ds.setUser(user); - logger.info("User set {}", user); String password = (String) properties.remove(DataSourceFactory.JDBC_PASSWORD); ds.setPassword(password); - logger.info("Password set {}", password); logger.info("Remaining properties {}", properties.size()); diff --git a/iotdb-client/pom.xml b/iotdb-client/pom.xml index fe67109c928f0..fe7d75f9fc9b5 100644 --- a/iotdb-client/pom.xml +++ b/iotdb-client/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-parent - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT iotdb-client pom diff --git a/iotdb-client/service-rpc/pom.xml b/iotdb-client/service-rpc/pom.xml index 693a97e18556d..885c066868756 100644 --- a/iotdb-client/service-rpc/pom.xml +++ b/iotdb-client/service-rpc/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-client - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT service-rpc IoTDB: Client: Service-RPC @@ -60,12 +60,12 @@ org.apache.iotdb iotdb-thrift - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-thrift-commons - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.thrift diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/IoTDBRpcDataSet.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/IoTDBRpcDataSet.java index 3e405c95d82d4..8fc484fd00c0a 100644 --- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/IoTDBRpcDataSet.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/IoTDBRpcDataSet.java @@ -36,6 +36,7 @@ import java.nio.ByteBuffer; import java.sql.Timestamp; +import java.time.LocalDate; import java.time.ZoneId; import java.util.ArrayList; import java.util.BitSet; @@ -320,6 +321,9 @@ public boolean next() throws StatementExecutionException, IoTDBConnectionExcepti } public boolean fetchResults() throws StatementExecutionException, IoTDBConnectionException { + if (isClosed) { + throw new IoTDBConnectionException("This DataSet is already closed"); + } TSFetchResultsReq req = new TSFetchResultsReq(sessionId, sql, fetchSize, queryId, true); req.setTimeout(timeout); try { @@ -372,7 +376,7 @@ public boolean isNull(int columnIndex) throws StatementExecutionException { int index = columnOrdinalMap.get(findColumnNameByIndex(columnIndex)) - START_INDEX; // time column will never be null if (index < 0) { - return true; + return false; } return isNull(index, tsBlockIndex); } @@ -381,7 +385,7 @@ public boolean isNull(String columnName) { int index = columnOrdinalMap.get(columnName) - START_INDEX; // time column will never be null if (index < 0) { - return true; + return false; } return isNull(index, tsBlockIndex); } @@ -466,6 +470,7 @@ public long getLong(int columnIndex) throws StatementExecutionException { public long getLong(String columnName) throws StatementExecutionException { checkRecord(); if (columnName.equals(TIMESTAMP_STR)) { + lastReadWasNull = false; return curTsBlock.getTimeByIndex(tsBlockIndex); } int index = columnOrdinalMap.get(columnName) - START_INDEX; @@ -516,11 +521,21 @@ public String getString(String columnName) throws StatementExecutionException { } public Timestamp getTimestamp(int columnIndex) throws StatementExecutionException { - return new Timestamp(getLong(columnIndex)); + return getTimestamp(findColumnNameByIndex(columnIndex)); } public Timestamp getTimestamp(String columnName) throws StatementExecutionException { - return getTimestamp(findColumn(columnName)); + long longValue = getLong(columnName); + return lastReadWasNull ? null : new Timestamp(longValue); + } + + public LocalDate getDate(int columnIndex) throws StatementExecutionException { + return getDate(findColumnNameByIndex(columnIndex)); + } + + public LocalDate getDate(String columnName) throws StatementExecutionException { + int intValue = getInt(columnName); + return lastReadWasNull ? null : DateUtils.parseIntToLocalDate(intValue); } public TSDataType getDataType(int columnIndex) throws StatementExecutionException { @@ -544,6 +559,7 @@ public int findColumn(String columnName) { public String getValueByName(String columnName) throws StatementExecutionException { checkRecord(); if (columnName.equals(TIMESTAMP_STR)) { + lastReadWasNull = false; return String.valueOf(curTsBlock.getTimeByIndex(tsBlockIndex)); } int index = columnOrdinalMap.get(columnName) - START_INDEX; @@ -587,6 +603,7 @@ public String getString(int index, TSDataType tsDataType) { public Object getObjectByName(String columnName) throws StatementExecutionException { checkRecord(); if (columnName.equals(TIMESTAMP_STR)) { + lastReadWasNull = false; return new Timestamp(curTsBlock.getTimeByIndex(tsBlockIndex)); } int index = columnOrdinalMap.get(columnName) - START_INDEX; diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TElasticFramedTransport.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TElasticFramedTransport.java index b0c55c21bd063..4c7602c869988 100644 --- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TElasticFramedTransport.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TElasticFramedTransport.java @@ -130,9 +130,18 @@ protected void readFrame() throws TTransportException { if (size > thriftMaxFrameSize) { close(); - throw new TTransportException( - TTransportException.CORRUPTED_DATA, - "Frame size (" + size + ") larger than protect max size (" + thriftMaxFrameSize + ")!"); + if (size == 1195725856L || size == 1347375956L) { + // if someone sends HTTP GET/POST to this port, the size will be read as the following + throw new TTransportException( + TTransportException.CORRUPTED_DATA, + "Singular frame size (" + + size + + ") detected, you may be sending HTTP GET/POST requests to the Thrift-RPC port, please confirm that you are using the right port"); + } else { + throw new TTransportException( + TTransportException.CORRUPTED_DATA, + "Frame size (" + size + ") larger than protect max size (" + thriftMaxFrameSize + ")!"); + } } readBuffer.fill(underlying, size); } diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TSStatusCode.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TSStatusCode.java index 80e31e2469de8..a4b7ad9bd3a5c 100644 --- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TSStatusCode.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TSStatusCode.java @@ -81,6 +81,7 @@ public enum TSStatusCode { SCHEMA_QUOTA_EXCEEDED(526), MEASUREMENT_ALREADY_EXISTS_IN_TEMPLATE(527), ONLY_LOGICAL_VIEW(528), + DATABASE_CONFLICT(529), // Storage Engine SYSTEM_READ_ONLY(600), @@ -97,6 +98,7 @@ public enum TSStatusCode { DISK_SPACE_INSUFFICIENT(611), OVERSIZE_TTL(612), TTL_CONFIG_ERROR(613), + WAL_ENTRY_TOO_LARGE(620), // Query Engine SQL_PARSE_ERROR(700), @@ -116,6 +118,18 @@ public enum TSStatusCode { NO_SUCH_QUERY(714), QUERY_WAS_KILLED(715), EXPLAIN_ANALYZE_FETCH_ERROR(716), + TOO_MANY_CONCURRENT_QUERIES_ERROR(717), + OPERATOR_NOT_FOUND(718), + + QUERY_EXECUTION_MEMORY_NOT_ENOUGH(719), + QUERY_TIMEOUT(720), + PLAN_FAILED_NETWORK_PARTITION(721), + CANNOT_FETCH_FI_STATE(722), + + // Arithmetic + NUMERIC_VALUE_OUT_OF_RANGE(750), + DIVISION_BY_ZERO(751), + DATE_OUT_OF_RANGE(752), // Authentication INIT_AUTH_ERROR(800), @@ -147,6 +161,9 @@ public enum TSStatusCode { REGION_LEADER_CHANGE_ERROR(905), NO_AVAILABLE_REGION_GROUP(906), LACK_PARTITION_ALLOCATION(907), + RECONSTRUCT_REGION_ERROR(908), + EXTEND_REGION_ERROR(909), + REMOVE_REGION_PEER_ERROR(910), // Cluster Manager ADD_CONFIGNODE_ERROR(1000), @@ -172,6 +189,9 @@ public enum TSStatusCode { PIPE_ERROR(1107), PIPESERVER_ERROR(1108), VERIFY_METADATA_ERROR(1109), + LOAD_TEMPORARY_UNAVAILABLE_EXCEPTION(1110), + LOAD_IDEMPOTENT_CONFLICT_EXCEPTION(1111), + LOAD_USER_CONFLICT_EXCEPTION(1112), // UDF UDF_LOAD_CLASS_ERROR(1200), @@ -196,7 +216,16 @@ public enum TSStatusCode { CQ_ALREADY_EXIST(1402), CQ_UPDATE_LAST_EXEC_TIME_ERROR(1403), - // code 1500-1599 are used by IoTDB-ML + // AI + CREATE_MODEL_ERROR(1500), + DROP_MODEL_ERROR(1501), + MODEL_EXIST_ERROR(1502), + GET_MODEL_INFO_ERROR(1503), + NO_REGISTERED_AI_NODE_ERROR(1504), + MODEL_NOT_FOUND_ERROR(1505), + REGISTER_AI_NODE_ERROR(1506), + AI_NODE_INTERNAL_ERROR(1510), + REMOVE_AI_NODE_ERROR(1511), // Pipe Plugin CREATE_PIPE_PLUGIN_ERROR(1600), @@ -230,6 +259,9 @@ public enum TSStatusCode { PIPE_RECEIVER_IDEMPOTENT_CONFLICT_EXCEPTION(1809), PIPE_RECEIVER_USER_CONFLICT_EXCEPTION(1810), PIPE_CONFIG_RECEIVER_HANDSHAKE_NEEDED(1811), + PIPE_TRANSFER_SLICE_OUT_OF_ORDER(1812), + PIPE_PUSH_META_TIMEOUT(1813), + PIPE_PUSH_META_NOT_ENOUGH_MEMORY(1814), // Subscription SUBSCRIPTION_VERSION_ERROR(1900), @@ -243,6 +275,8 @@ public enum TSStatusCode { SUBSCRIPTION_UNSUBSCRIBE_ERROR(1908), SUBSCRIPTION_MISSING_CUSTOMER(1909), SHOW_SUBSCRIPTION_ERROR(1910), + SUBSCRIPTION_PIPE_TIMEOUT_ERROR(1911), + SUBSCRIPTION_NOT_ENABLED_ERROR(1912), // Topic CREATE_TOPIC_ERROR(2000), diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/config/ConsumerConstant.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/config/ConsumerConstant.java index 3693e89c3bf5c..5ad1d9ba4356d 100644 --- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/config/ConsumerConstant.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/config/ConsumerConstant.java @@ -50,6 +50,11 @@ public class ConsumerConstant { public static final String FILE_SAVE_FSYNC_KEY = "file-save-fsync"; public static final boolean FILE_SAVE_FSYNC_DEFAULT_VALUE = false; + public static final String THRIFT_MAX_FRAME_SIZE_KEY = "thrift-max-frame-size"; + + public static final String MAX_POLL_PARALLELISM_KEY = "max-poll-parallelism"; + public static final int MAX_POLL_PARALLELISM_DEFAULT_VALUE = 1; + /////////////////////////////// pull consumer /////////////////////////////// public static final String AUTO_COMMIT_KEY = "auto-commit"; diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/config/TopicConfig.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/config/TopicConfig.java index fc967835dd24f..62bc0e0ab5e70 100644 --- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/config/TopicConfig.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/config/TopicConfig.java @@ -51,6 +51,11 @@ public TopicConfig(final Map attributes) { private static final Map REALTIME_STREAM_MODE_CONFIG = Collections.singletonMap("realtime.mode", "stream"); + private static final Map SINK_TABLET_FORMAT_CONFIG = + Collections.singletonMap("format", "tablet"); + private static final Map SINK_TS_FILE_FORMAT_CONFIG = + Collections.singletonMap("format", "tsfile"); + private static final Map SNAPSHOT_MODE_CONFIG = Collections.singletonMap("mode", MODE_SNAPSHOT_VALUE); private static final Map LIVE_MODE_CONFIG = @@ -59,7 +64,7 @@ public TopicConfig(final Map attributes) { private static final Set LOOSE_RANGE_KEY_SET; static { - Set set = new HashSet<>(2); + final Set set = new HashSet<>(2); set.add("history.loose-range"); set.add("realtime.loose-range"); LOOSE_RANGE_KEY_SET = Collections.unmodifiableSet(set); @@ -103,7 +108,7 @@ public Map getAttributesWithTimeRange() { } public Map getAttributesWithRealtimeMode() { - return REALTIME_STREAM_MODE_CONFIG; + return REALTIME_STREAM_MODE_CONFIG; // default to stream (hybrid) } public Map getAttributesWithSourceMode() { @@ -121,6 +126,19 @@ public Map getAttributesWithSourceLooseRange() { .collect(Collectors.toMap(key -> key, key -> looseRangeValue)); } + public Map getAttributesWithSourcePrefix() { + final Map attributesWithProcessorPrefix = new HashMap<>(); + attributes.forEach( + (key, value) -> { + if (key.toLowerCase().startsWith("source")) { + attributesWithProcessorPrefix.put(key, value); + } + }); + return attributesWithProcessorPrefix; + } + + /////////////////////////////// processor attributes mapping /////////////////////////////// + public Map getAttributesWithProcessorPrefix() { final Map attributesWithProcessorPrefix = new HashMap<>(); attributes.forEach( @@ -131,4 +149,24 @@ public Map getAttributesWithProcessorPrefix() { }); return attributesWithProcessorPrefix; } + + public Map getAttributesWithSinkFormat() { + // refer to + // org.apache.iotdb.db.pipe.agent.task.connection.PipeEventCollector.parseAndCollectEvent(org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent) + return TopicConstant.FORMAT_TS_FILE_HANDLER_VALUE.equalsIgnoreCase( + attributes.getOrDefault(TopicConstant.FORMAT_KEY, TopicConstant.FORMAT_DEFAULT_VALUE)) + ? SINK_TS_FILE_FORMAT_CONFIG + : SINK_TABLET_FORMAT_CONFIG; + } + + public Map getAttributesWithSinkPrefix() { + final Map attributesWithProcessorPrefix = new HashMap<>(); + attributes.forEach( + (key, value) -> { + if (key.toLowerCase().startsWith("sink")) { + attributesWithProcessorPrefix.put(key, value); + } + }); + return attributesWithProcessorPrefix; + } } diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/exception/SubscriptionPayloadExceedException.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/exception/SubscriptionPayloadExceedException.java new file mode 100644 index 0000000000000..30c397b4e310f --- /dev/null +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/exception/SubscriptionPayloadExceedException.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.rpc.subscription.exception; + +import java.util.Objects; + +public class SubscriptionPayloadExceedException extends SubscriptionRuntimeCriticalException { + + public SubscriptionPayloadExceedException(final String message) { + super(message); + } + + public SubscriptionPayloadExceedException(final String message, final Throwable cause) { + super(message, cause); + } + + @Override + public boolean equals(final Object obj) { + return obj instanceof SubscriptionPayloadExceedException + && Objects.equals(getMessage(), ((SubscriptionPayloadExceedException) obj).getMessage()) + && Objects.equals( + getTimeStamp(), ((SubscriptionPayloadExceedException) obj).getTimeStamp()); + } + + @Override + public int hashCode() { + return super.hashCode(); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/selfhost/PipeWALSelfHostResource.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/exception/SubscriptionPipeTimeoutException.java similarity index 54% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/selfhost/PipeWALSelfHostResource.java rename to iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/exception/SubscriptionPipeTimeoutException.java index e8e03e64a1621..26231e4839695 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/selfhost/PipeWALSelfHostResource.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/exception/SubscriptionPipeTimeoutException.java @@ -17,25 +17,29 @@ * under the License. */ -package org.apache.iotdb.db.pipe.resource.wal.selfhost; +package org.apache.iotdb.rpc.subscription.exception; -import org.apache.iotdb.db.pipe.resource.wal.PipeWALResource; -import org.apache.iotdb.db.storageengine.dataregion.wal.exception.MemTablePinException; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler; +import java.util.Objects; -public class PipeWALSelfHostResource extends PipeWALResource { +public class SubscriptionPipeTimeoutException extends SubscriptionTimeoutException { - public PipeWALSelfHostResource(WALEntryHandler walEntryHandler) { - super(walEntryHandler); + public SubscriptionPipeTimeoutException(final String message) { + super(message); + } + + public SubscriptionPipeTimeoutException(final String message, final Throwable cause) { + super(message, cause); } @Override - protected void pinInternal() throws MemTablePinException { - walEntryHandler.pinMemTable(); + public boolean equals(final Object obj) { + return obj instanceof SubscriptionPipeTimeoutException + && Objects.equals(getMessage(), ((SubscriptionPipeTimeoutException) obj).getMessage()) + && Objects.equals(getTimeStamp(), ((SubscriptionPipeTimeoutException) obj).getTimeStamp()); } @Override - protected void unpinInternal() throws MemTablePinException { - walEntryHandler.unpinMemTable(); + public int hashCode() { + return super.hashCode(); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/selfhost/PipeWALSelfHostResourceManager.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/exception/SubscriptionPollTimeoutException.java similarity index 53% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/selfhost/PipeWALSelfHostResourceManager.java rename to iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/exception/SubscriptionPollTimeoutException.java index c7fe0accda2fd..67c460ecfb30f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/selfhost/PipeWALSelfHostResourceManager.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/exception/SubscriptionPollTimeoutException.java @@ -17,22 +17,29 @@ * under the License. */ -package org.apache.iotdb.db.pipe.resource.wal.selfhost; +package org.apache.iotdb.rpc.subscription.exception; -import org.apache.iotdb.db.pipe.resource.wal.PipeWALResourceManager; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler; +import java.util.Objects; -public class PipeWALSelfHostResourceManager extends PipeWALResourceManager { +public class SubscriptionPollTimeoutException extends SubscriptionTimeoutException { + + public SubscriptionPollTimeoutException(final String message) { + super(message); + } + + public SubscriptionPollTimeoutException(final String message, final Throwable cause) { + super(message, cause); + } @Override - protected void pinInternal(final long memTableId, final WALEntryHandler walEntryHandler) { - memtableIdToPipeWALResourceMap - .computeIfAbsent(memTableId, id -> new PipeWALSelfHostResource(walEntryHandler)) - .pin(); + public boolean equals(final Object obj) { + return obj instanceof SubscriptionPollTimeoutException + && Objects.equals(getMessage(), ((SubscriptionPollTimeoutException) obj).getMessage()) + && Objects.equals(getTimeStamp(), ((SubscriptionPollTimeoutException) obj).getTimeStamp()); } @Override - protected void unpinInternal(final long memTableId, final WALEntryHandler walEntryHandler) { - memtableIdToPipeWALResourceMap.get(memTableId).unpin(); + public int hashCode() { + return super.hashCode(); } } diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/exception/SubscriptionRuntimeCriticalException.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/exception/SubscriptionRuntimeCriticalException.java index 72124ba3109dd..0e471dca77799 100644 --- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/exception/SubscriptionRuntimeCriticalException.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/exception/SubscriptionRuntimeCriticalException.java @@ -21,7 +21,7 @@ import java.util.Objects; -public class SubscriptionRuntimeCriticalException extends SubscriptionException { +public class SubscriptionRuntimeCriticalException extends SubscriptionRuntimeException { public SubscriptionRuntimeCriticalException(final String message) { super(message); diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/exception/SubscriptionTimeoutException.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/exception/SubscriptionTimeoutException.java new file mode 100644 index 0000000000000..f6471a74abaf3 --- /dev/null +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/exception/SubscriptionTimeoutException.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.rpc.subscription.exception; + +import java.util.Objects; + +public abstract class SubscriptionTimeoutException extends SubscriptionRuntimeNonCriticalException { + + public static String KEYWORD = "TimeoutException"; + + public SubscriptionTimeoutException(final String message) { + super(message); + } + + public SubscriptionTimeoutException(final String message, final Throwable cause) { + super(message, cause); + } + + @Override + public boolean equals(final Object obj) { + return obj instanceof SubscriptionTimeoutException + && Objects.equals(getMessage(), ((SubscriptionTimeoutException) obj).getMessage()) + && Objects.equals(getTimeStamp(), ((SubscriptionTimeoutException) obj).getTimeStamp()); + } + + @Override + public int hashCode() { + return super.hashCode(); + } +} diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/ErrorPayload.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/ErrorPayload.java index 7c2a763f1b956..4cf4d3d96b4f4 100644 --- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/ErrorPayload.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/ErrorPayload.java @@ -28,8 +28,14 @@ public class ErrorPayload implements SubscriptionPollPayload { + private static final String OUTDATED_ERROR_MSG = "outdated subscription event"; + public static final ErrorPayload OUTDATED_ERROR_PAYLOAD = + new ErrorPayload(OUTDATED_ERROR_MSG, false); + + /** The error message describing the issue. */ private transient String errorMessage; + /** Indicates whether the error is critical. */ private transient boolean critical; public String getErrorMessage() { diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/FileInitPayload.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/FileInitPayload.java index 04ebd8a89ab23..d0189b41cf507 100644 --- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/FileInitPayload.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/FileInitPayload.java @@ -28,6 +28,7 @@ public class FileInitPayload implements SubscriptionPollPayload { + /** The name of the file to be initialized. */ private transient String fileName; public String getFileName() { diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/FilePiecePayload.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/FilePiecePayload.java index 5c915d83c684e..b0590a23b378a 100644 --- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/FilePiecePayload.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/FilePiecePayload.java @@ -30,10 +30,13 @@ public class FilePiecePayload implements SubscriptionPollPayload { + /** The name of the file. */ private transient String fileName; + /** The field to be filled in the next {@link PollFilePayload} request. */ private transient long nextWritingOffset; + /** The piece of the file content. */ private transient byte[] filePiece; public String getFileName() { diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/FileSealPayload.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/FileSealPayload.java index bec792c2c9dcf..51d94a78c9e72 100644 --- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/FileSealPayload.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/FileSealPayload.java @@ -28,8 +28,10 @@ public class FileSealPayload implements SubscriptionPollPayload { + /** The name of the file to be sealed. */ private transient String fileName; + /** The length of the file. */ private transient long fileLength; public String getFileName() { diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/PollFilePayload.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/PollFilePayload.java index 06128c70237a9..2836ac38abdab 100644 --- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/PollFilePayload.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/PollFilePayload.java @@ -28,18 +28,14 @@ public class PollFilePayload implements SubscriptionPollPayload { - private transient String topicName; - - private transient String fileName; + /** The commit context associated with the {@link SubscriptionPollResponse}. */ + private transient SubscriptionCommitContext commitContext; + /** The offset from which the file content should be read. */ private transient long writingOffset; - public String getTopicName() { - return topicName; - } - - public String getFileName() { - return fileName; + public SubscriptionCommitContext getCommitContext() { + return commitContext; } public long getWritingOffset() { @@ -48,23 +44,20 @@ public long getWritingOffset() { public PollFilePayload() {} - public PollFilePayload(final String topicName, final String fileName, final long writingOffset) { - this.topicName = topicName; - this.fileName = fileName; + public PollFilePayload(final SubscriptionCommitContext commitContext, final long writingOffset) { + this.commitContext = commitContext; this.writingOffset = writingOffset; } @Override public void serialize(final DataOutputStream stream) throws IOException { - ReadWriteIOUtils.write(topicName, stream); - ReadWriteIOUtils.write(fileName, stream); + commitContext.serialize(stream); ReadWriteIOUtils.write(writingOffset, stream); } @Override public SubscriptionPollPayload deserialize(final ByteBuffer buffer) { - topicName = ReadWriteIOUtils.readString(buffer); - fileName = ReadWriteIOUtils.readString(buffer); + commitContext = SubscriptionCommitContext.deserialize(buffer); writingOffset = ReadWriteIOUtils.readLong(buffer); return this; } @@ -80,22 +73,19 @@ public boolean equals(final Object obj) { return false; } final PollFilePayload that = (PollFilePayload) obj; - return Objects.equals(this.topicName, that.topicName) - && Objects.equals(this.fileName, that.fileName) + return Objects.equals(this.commitContext, that.commitContext) && Objects.equals(this.writingOffset, that.writingOffset); } @Override public int hashCode() { - return Objects.hash(topicName, fileName, writingOffset); + return Objects.hash(commitContext, writingOffset); } @Override public String toString() { - return "PollFilePayload{topicName=" - + topicName - + ", fileName=" - + fileName + return "PollFilePayload{commitContext=" + + commitContext + ", writingOffset=" + writingOffset + "}"; diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/PollPayload.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/PollPayload.java index 77f6747bac8e8..443b899497bd1 100644 --- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/PollPayload.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/PollPayload.java @@ -30,6 +30,7 @@ public class PollPayload implements SubscriptionPollPayload { + /** The set of topic names that need to be polled. */ private transient Set topicNames = new HashSet<>(); public PollPayload() {} diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/PollTabletsPayload.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/PollTabletsPayload.java new file mode 100644 index 0000000000000..ea53c93c63675 --- /dev/null +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/PollTabletsPayload.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.rpc.subscription.payload.poll; + +import org.apache.tsfile.utils.ReadWriteIOUtils; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Objects; + +public class PollTabletsPayload implements SubscriptionPollPayload { + + /** The commit context associated with the {@link SubscriptionPollResponse}. */ + private transient SubscriptionCommitContext commitContext; + + /** The index for the next batch of tablets. */ + private transient int offset; + + public SubscriptionCommitContext getCommitContext() { + return commitContext; + } + + public int getOffset() { + return offset; + } + + public PollTabletsPayload() {} + + public PollTabletsPayload(final SubscriptionCommitContext commitContext, final int offset) { + this.commitContext = commitContext; + this.offset = offset; + } + + @Override + public void serialize(final DataOutputStream stream) throws IOException { + commitContext.serialize(stream); + ReadWriteIOUtils.write(offset, stream); + } + + @Override + public SubscriptionPollPayload deserialize(final ByteBuffer buffer) { + commitContext = SubscriptionCommitContext.deserialize(buffer); + offset = ReadWriteIOUtils.readInt(buffer); + return this; + } + + /////////////////////////////// Object /////////////////////////////// + + @Override + public boolean equals(final Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + final PollTabletsPayload that = (PollTabletsPayload) obj; + return Objects.equals(this.commitContext, that.commitContext) + && Objects.equals(this.offset, that.offset); + } + + @Override + public int hashCode() { + return Objects.hash(commitContext, offset); + } + + @Override + public String toString() { + return "PollTabletsPayload{commitContext=" + commitContext + ", offset=" + offset + "}"; + } +} diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/SubscriptionPollRequest.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/SubscriptionPollRequest.java index fce474fff0d7c..3337887b185f5 100644 --- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/SubscriptionPollRequest.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/SubscriptionPollRequest.java @@ -36,13 +36,20 @@ public class SubscriptionPollRequest { private final transient SubscriptionPollPayload payload; - private final transient long timeoutMs; // unused now + private final transient long timeoutMs; + + /** The maximum size, in bytes, for the response payload. */ + private final transient long maxBytes; public SubscriptionPollRequest( - final short requestType, final SubscriptionPollPayload payload, final long timeoutMs) { + final short requestType, + final SubscriptionPollPayload payload, + final long timeoutMs, + final long maxBytes) { this.requestType = requestType; this.payload = payload; this.timeoutMs = timeoutMs; + this.maxBytes = maxBytes; } public short getRequestType() { @@ -57,6 +64,10 @@ public long getTimeoutMs() { return timeoutMs; } + public long getMaxBytes() { + return maxBytes; + } + //////////////////////////// serialization //////////////////////////// public static ByteBuffer serialize(final SubscriptionPollRequest request) throws IOException { @@ -71,6 +82,7 @@ private void serialize(final DataOutputStream stream) throws IOException { ReadWriteIOUtils.write(requestType, stream); payload.serialize(stream); ReadWriteIOUtils.write(timeoutMs, stream); + ReadWriteIOUtils.write(maxBytes, stream); } public static SubscriptionPollRequest deserialize(final ByteBuffer buffer) { @@ -84,6 +96,9 @@ public static SubscriptionPollRequest deserialize(final ByteBuffer buffer) { case POLL_FILE: payload = new PollFilePayload().deserialize(buffer); break; + case POLL_TABLETS: + payload = new PollTabletsPayload().deserialize(buffer); + break; default: LOGGER.warn("unexpected request type: {}, payload will be null", requestType); break; @@ -93,7 +108,8 @@ public static SubscriptionPollRequest deserialize(final ByteBuffer buffer) { } final long timeoutMs = ReadWriteIOUtils.readLong(buffer); - return new SubscriptionPollRequest(requestType, payload, timeoutMs); + final long maxBytes = ReadWriteIOUtils.readLong(buffer); + return new SubscriptionPollRequest(requestType, payload, timeoutMs, maxBytes); } /////////////////////////////// object /////////////////////////////// @@ -106,6 +122,8 @@ public String toString() { + payload + ", timeoutMs=" + timeoutMs + + ", maxBytes=" + + maxBytes + "}"; } } diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/SubscriptionPollRequestType.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/SubscriptionPollRequestType.java index c034d4dfbc68c..b225faec9e7dd 100644 --- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/SubscriptionPollRequestType.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/SubscriptionPollRequestType.java @@ -26,6 +26,7 @@ public enum SubscriptionPollRequestType { POLL((short) 0), POLL_FILE((short) 1), + POLL_TABLETS((short) 2), ; private final short type; diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/SubscriptionPollResponse.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/SubscriptionPollResponse.java index 01d173d274283..06baa30acee9f 100644 --- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/SubscriptionPollResponse.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/SubscriptionPollResponse.java @@ -27,6 +27,8 @@ import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; public class SubscriptionPollResponse { @@ -110,16 +112,18 @@ public static SubscriptionPollResponse deserialize(final ByteBuffer buffer) { return new SubscriptionPollResponse(responseType, payload, commitContext); } - /////////////////////////////// object /////////////////////////////// + /////////////////////////////// stringify /////////////////////////////// @Override public String toString() { - return "SubscriptionPollResponse{responseType=" - + SubscriptionPollResponseType.valueOf(responseType).toString() - + ", payload=" - + payload - + ", commitContext=" - + commitContext - + "}"; + return "SubscriptionPollResponse" + coreReportMessage(); + } + + protected Map coreReportMessage() { + final Map result = new HashMap<>(); + result.put("responseType", SubscriptionPollResponseType.valueOf(responseType).toString()); + result.put("payload", payload.toString()); + result.put("commitContext", commitContext.toString()); + return result; } } diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/TabletsPayload.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/TabletsPayload.java index 6ece7cc691b3d..5654572f45873 100644 --- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/TabletsPayload.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/TabletsPayload.java @@ -28,28 +28,46 @@ import java.util.ArrayList; import java.util.List; import java.util.Objects; -import java.util.concurrent.CopyOnWriteArrayList; public class TabletsPayload implements SubscriptionPollPayload { - protected transient List tablets = new ArrayList<>(); + /** A batch of tablets. */ + private transient List tablets = new ArrayList<>(); + + /** + * The field to be filled in the next {@link PollTabletsPayload} request. + * + *
    + *
  • If nextOffset is 1, it indicates that the current payload is the first payload (its + * tablets are empty) and the fetching should continue. + *
  • If nextOffset is negative (or zero), it indicates all tablets have been fetched, and + * -nextOffset represents the total number of tablets. + *
+ */ + private transient int nextOffset; public TabletsPayload() {} - public TabletsPayload(final List tablets) { - this.tablets = new CopyOnWriteArrayList<>(tablets); + public TabletsPayload(final List tablets, final int nextOffset) { + this.tablets = tablets; + this.nextOffset = nextOffset; } public List getTablets() { return tablets; } + public int getNextOffset() { + return nextOffset; + } + @Override public void serialize(final DataOutputStream stream) throws IOException { ReadWriteIOUtils.write(tablets.size(), stream); for (final Tablet tablet : tablets) { tablet.serialize(stream); } + ReadWriteIOUtils.write(nextOffset, stream); } @Override @@ -59,7 +77,8 @@ public SubscriptionPollPayload deserialize(final ByteBuffer buffer) { for (int i = 0; i < size; ++i) { tablets.add(Tablet.deserialize(buffer)); } - this.tablets = new CopyOnWriteArrayList<>(tablets); + this.tablets = tablets; + this.nextOffset = ReadWriteIOUtils.readInt(buffer); return this; } @@ -72,16 +91,17 @@ public boolean equals(final Object obj) { return false; } final TabletsPayload that = (TabletsPayload) obj; - return Objects.equals(this.tablets, that.tablets); + return Objects.equals(this.tablets, that.tablets) + && Objects.equals(this.nextOffset, that.nextOffset); } @Override public int hashCode() { - return Objects.hash(tablets); + return Objects.hash(tablets, nextOffset); } @Override public String toString() { - return "TabletsPayload{size of tablets=" + tablets.size() + "}"; + return "TabletsPayload{size of tablets=" + tablets.size() + ", nextOffset=" + nextOffset + "}"; } } diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/response/PipeSubscribeHeartbeatResp.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/response/PipeSubscribeHeartbeatResp.java index 0b0f61b915834..7bf02c7c6e765 100644 --- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/response/PipeSubscribeHeartbeatResp.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/response/PipeSubscribeHeartbeatResp.java @@ -19,13 +19,45 @@ package org.apache.iotdb.rpc.subscription.payload.response; +import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.rpc.subscription.config.TopicConfig; import org.apache.iotdb.service.rpc.thrift.TPipeSubscribeResp; +import org.apache.tsfile.utils.PublicBAOS; +import org.apache.tsfile.utils.ReadWriteIOUtils; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import java.util.Objects; public class PipeSubscribeHeartbeatResp extends TPipeSubscribeResp { + private transient Map topics = new HashMap<>(); // subscribed topics + + private transient Map endPoints = new HashMap<>(); // available endpoints + + private transient List topicNamesToUnsubscribe = + new ArrayList<>(); // topics should be unsubscribed + + public Map getTopics() { + return topics; + } + + public Map getEndPoints() { + return endPoints; + } + + public List getTopicNamesToUnsubscribe() { + return topicNamesToUnsubscribe; + } + /////////////////////////////// Thrift /////////////////////////////// /** @@ -42,11 +74,75 @@ public static PipeSubscribeHeartbeatResp toTPipeSubscribeResp(final TSStatus sta return resp; } + /** + * Serialize the incoming parameters into `PipeSubscribeHeartbeatResp`, called by the subscription + * server. + */ + public static PipeSubscribeHeartbeatResp toTPipeSubscribeResp( + final TSStatus status, + final Map topics, + final Map endPoints, + final List topicNamesToUnsubscribe) + throws IOException { + final PipeSubscribeHeartbeatResp resp = toTPipeSubscribeResp(status); + + try (final PublicBAOS byteArrayOutputStream = new PublicBAOS(); + final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) { + ReadWriteIOUtils.write(topics.size(), outputStream); + for (final Map.Entry entry : topics.entrySet()) { + ReadWriteIOUtils.write(entry.getKey(), outputStream); + entry.getValue().serialize(outputStream); + } + ReadWriteIOUtils.write(endPoints.size(), outputStream); + for (final Map.Entry entry : endPoints.entrySet()) { + ReadWriteIOUtils.write(entry.getKey(), outputStream); + ReadWriteIOUtils.write(entry.getValue().getIp(), outputStream); + ReadWriteIOUtils.write(entry.getValue().getPort(), outputStream); + } + ReadWriteIOUtils.writeStringList(topicNamesToUnsubscribe, outputStream); + resp.body = + Collections.singletonList( + ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size())); + } + + return resp; + } + /** Deserialize `TPipeSubscribeResp` to obtain parameters, called by the subscription client. */ public static PipeSubscribeHeartbeatResp fromTPipeSubscribeResp( final TPipeSubscribeResp heartbeatResp) { final PipeSubscribeHeartbeatResp resp = new PipeSubscribeHeartbeatResp(); + if (Objects.nonNull(heartbeatResp.body)) { + for (final ByteBuffer byteBuffer : heartbeatResp.body) { + if (Objects.nonNull(byteBuffer) && byteBuffer.hasRemaining()) { + { + final int size = ReadWriteIOUtils.readInt(byteBuffer); + final Map topics = new HashMap<>(); + for (int i = 0; i < size; i++) { + final String topicName = ReadWriteIOUtils.readString(byteBuffer); + final TopicConfig topicConfig = TopicConfig.deserialize(byteBuffer); + topics.put(topicName, topicConfig); + } + resp.topics = topics; + } + { + final int size = ReadWriteIOUtils.readInt(byteBuffer); + final Map endPoints = new HashMap<>(); + for (int i = 0; i < size; i++) { + final int nodeId = ReadWriteIOUtils.readInt(byteBuffer); + final String ip = ReadWriteIOUtils.readString(byteBuffer); + final int port = ReadWriteIOUtils.readInt(byteBuffer); + endPoints.put(nodeId, new TEndPoint(ip, port)); + } + resp.endPoints = endPoints; + } + resp.topicNamesToUnsubscribe = ReadWriteIOUtils.readStringList(byteBuffer); + break; + } + } + } + resp.status = heartbeatResp.status; resp.version = heartbeatResp.version; resp.type = heartbeatResp.type; @@ -66,7 +162,10 @@ public boolean equals(final Object obj) { return false; } final PipeSubscribeHeartbeatResp that = (PipeSubscribeHeartbeatResp) obj; - return Objects.equals(this.status, that.status) + return Objects.equals(this.topics, that.topics) + && Objects.equals(this.endPoints, that.endPoints) + && Objects.equals(this.topicNamesToUnsubscribe, that.topicNamesToUnsubscribe) + && Objects.equals(this.status, that.status) && this.version == that.version && this.type == that.type && Objects.equals(this.body, that.body); @@ -74,6 +173,6 @@ public boolean equals(final Object obj) { @Override public int hashCode() { - return Objects.hash(status, version, type, body); + return Objects.hash(topics, endPoints, topicNamesToUnsubscribe, status, version, type, body); } } diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/response/PipeSubscribePollResp.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/response/PipeSubscribePollResp.java index f9fb8a076d822..83c001f7d5a0d 100644 --- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/response/PipeSubscribePollResp.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/response/PipeSubscribePollResp.java @@ -46,8 +46,6 @@ public static PipeSubscribePollResp toTPipeSubscribeResp( final TSStatus status, final List byteBuffers) { final PipeSubscribePollResp resp = new PipeSubscribePollResp(); - // resp.events = events; - resp.status = status; resp.version = PipeSubscribeResponseVersion.VERSION_1.getVersion(); resp.type = PipeSubscribeResponseType.ACK.getType(); diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/response/PipeSubscribeSubscribeResp.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/response/PipeSubscribeSubscribeResp.java index e12d350bda1a0..ad5ed52df51af 100644 --- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/response/PipeSubscribeSubscribeResp.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/response/PipeSubscribeSubscribeResp.java @@ -62,11 +62,7 @@ public static PipeSubscribeSubscribeResp toTPipeSubscribeResp(final TSStatus sta */ public static PipeSubscribeSubscribeResp toTPipeSubscribeResp( final TSStatus status, final Map topics) throws IOException { - final PipeSubscribeSubscribeResp resp = new PipeSubscribeSubscribeResp(); - - resp.status = status; - resp.version = PipeSubscribeResponseVersion.VERSION_1.getVersion(); - resp.type = PipeSubscribeResponseType.ACK.getType(); + final PipeSubscribeSubscribeResp resp = toTPipeSubscribeResp(status); try (final PublicBAOS byteArrayOutputStream = new PublicBAOS(); final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) { diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/response/PipeSubscribeUnsubscribeResp.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/response/PipeSubscribeUnsubscribeResp.java index a6add300611f4..6856983b37d39 100644 --- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/response/PipeSubscribeUnsubscribeResp.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/subscription/payload/response/PipeSubscribeUnsubscribeResp.java @@ -62,11 +62,7 @@ public static PipeSubscribeUnsubscribeResp toTPipeSubscribeResp(final TSStatus s */ public static PipeSubscribeUnsubscribeResp toTPipeSubscribeResp( final TSStatus status, final Map topics) throws IOException { - final PipeSubscribeUnsubscribeResp resp = new PipeSubscribeUnsubscribeResp(); - - resp.status = status; - resp.version = PipeSubscribeResponseVersion.VERSION_1.getVersion(); - resp.type = PipeSubscribeResponseType.ACK.getType(); + final PipeSubscribeUnsubscribeResp resp = toTPipeSubscribeResp(status); try (final PublicBAOS byteArrayOutputStream = new PublicBAOS(); final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) { diff --git a/iotdb-client/service-rpc/src/test/java/org/apache/iotdb/rpc/TElasticFramedTransportTest.java b/iotdb-client/service-rpc/src/test/java/org/apache/iotdb/rpc/TElasticFramedTransportTest.java new file mode 100644 index 0000000000000..086dc33825065 --- /dev/null +++ b/iotdb-client/service-rpc/src/test/java/org/apache/iotdb/rpc/TElasticFramedTransportTest.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.rpc; + +import org.apache.thrift.transport.TByteBuffer; +import org.apache.thrift.transport.TTransportException; +import org.junit.Test; + +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +public class TElasticFramedTransportTest { + + @Test + public void testSingularSize() { + + try { + TElasticFramedTransport transport = + new TElasticFramedTransport( + new TByteBuffer( + ByteBuffer.wrap("GET 127.0.0.1 HTTP/1.1".getBytes(StandardCharsets.UTF_8))), + 128 * 1024 * 1024, + 512 * 1024 * 1024, + false); + transport.open(); + transport.read(ByteBuffer.allocate(4096)); + fail("Exception expected"); + } catch (TTransportException e) { + assertEquals( + "Singular frame size (1195725856) detected, you may be sending HTTP GET/POST requests to the Thrift-RPC port, please confirm that you are using the right port", + e.getMessage()); + } + + try { + TElasticFramedTransport transport = + new TElasticFramedTransport( + new TByteBuffer( + ByteBuffer.wrap("POST 127.0.0.1 HTTP/1.1".getBytes(StandardCharsets.UTF_8))), + 128 * 1024 * 1024, + 512 * 1024 * 1024, + false); + transport.open(); + transport.read(ByteBuffer.allocate(4096)); + fail("Exception expected"); + } catch (TTransportException e) { + assertEquals( + "Singular frame size (1347375956) detected, you may be sending HTTP GET/POST requests to the Thrift-RPC port, please confirm that you are using the right port", + e.getMessage()); + } + } +} diff --git a/iotdb-client/session/pom.xml b/iotdb-client/session/pom.xml index 4e514bc3265b8..e45c1a8fe29c1 100644 --- a/iotdb-client/session/pom.xml +++ b/iotdb-client/session/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-client - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT iotdb-session IoTDB: Client: Session @@ -37,17 +37,17 @@ org.apache.iotdb service-rpc - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb isession - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-thrift-commons - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.tsfile @@ -62,7 +62,7 @@ org.apache.iotdb iotdb-thrift - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.slf4j diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/Session.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/Session.java index a552002fdae5e..e61d86c2f20dc 100644 --- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/Session.java +++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/Session.java @@ -78,6 +78,7 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.nio.ByteBuffer; +import java.time.LocalDate; import java.time.ZoneId; import java.util.ArrayList; import java.util.Arrays; @@ -411,6 +412,7 @@ public Session( if (nodeUrls.isEmpty()) { throw new IllegalArgumentException("nodeUrls shouldn't be empty."); } + nodeUrls = shuffleNodeUrls(nodeUrls); this.nodeUrls = nodeUrls; this.username = username; this.password = password; @@ -427,6 +429,7 @@ public Session(Builder builder) { if (builder.nodeUrls.isEmpty()) { throw new IllegalArgumentException("nodeUrls shouldn't be empty."); } + builder.nodeUrls = shuffleNodeUrls(builder.nodeUrls); this.nodeUrls = builder.nodeUrls; this.enableQueryRedirection = true; } else { @@ -547,6 +550,16 @@ private void initThreadPool() { }); } + private static List shuffleNodeUrls(List endPoints) { + try { + Collections.shuffle(endPoints); + } catch (UnsupportedOperationException e) { + endPoints = new ArrayList<>(endPoints); + Collections.shuffle(endPoints); + } + return endPoints; + } + private List getNodeUrls() { if (defaultEndPoint != null) { return Collections.singletonList(defaultEndPoint); @@ -581,10 +594,10 @@ public synchronized void open( @Override public synchronized void close() throws IoTDBConnectionException { - if (isClosed) { - return; - } try { + if (isClosed) { + return; + } if (enableRedirection) { for (SessionConnection sessionConnection : endPointToSessionConnection.values()) { sessionConnection.close(); @@ -1427,7 +1440,7 @@ private TSInsertRecordReq genTSInsertRecordReq( request.setPrefixPath(prefixPath); request.setTimestamp(time); request.setMeasurements(measurements); - ByteBuffer buffer = SessionUtils.getValueBuffer(types, values); + ByteBuffer buffer = SessionUtils.getValueBuffer(types, values, measurements); request.setValues(buffer); request.setIsAligned(isAligned); return request; @@ -2392,7 +2405,8 @@ private TSInsertRecordsOfOneDeviceReq genTSInsertRecordsOfOneDeviceReq( request.setPrefixPath(prefixPath); request.setTimestamps(times); request.setMeasurementsList(measurementsList); - List buffersList = objectValuesListToByteBufferList(valuesList, typesList); + List buffersList = + objectValuesListToByteBufferList(valuesList, typesList, measurementsList); request.setValuesList(buffersList); request.setIsAligned(isAligned); return request; @@ -2466,11 +2480,14 @@ private static List sortList(List source, Integer[] index) { } private List objectValuesListToByteBufferList( - List> valuesList, List> typesList) + List> valuesList, + List> typesList, + List> measurementsList) throws IoTDBConnectionException { List buffersList = new ArrayList<>(); for (int i = 0; i < valuesList.size(); i++) { - ByteBuffer buffer = SessionUtils.getValueBuffer(typesList.get(i), valuesList.get(i)); + ByteBuffer buffer = + SessionUtils.getValueBuffer(typesList.get(i), valuesList.get(i), measurementsList.get(i)); buffersList.add(buffer); } return buffersList; @@ -2542,7 +2559,8 @@ private TSInsertRecordsReq genTSInsertRecordsReq( request.setTimestamps(times); request.setMeasurementsList(measurementsList); request.setIsAligned(isAligned); - List buffersList = objectValuesListToByteBufferList(valuesList, typesList); + List buffersList = + objectValuesListToByteBufferList(valuesList, typesList, measurementsList); request.setValuesList(buffersList); return request; } @@ -2579,7 +2597,7 @@ private void updateTSInsertRecordsReq( request.addToPrefixPaths(deviceId); request.addToTimestamps(time); request.addToMeasurementsList(measurements); - ByteBuffer buffer = SessionUtils.getValueBuffer(types, values); + ByteBuffer buffer = SessionUtils.getValueBuffer(types, values, measurements); request.addToValuesList(buffer); } @@ -2686,6 +2704,9 @@ private TSInsertTabletReq genTSInsertTabletReq(Tablet tablet, boolean sorted, bo TSInsertTabletReq request = new TSInsertTabletReq(); for (IMeasurementSchema measurementSchema : tablet.getSchemas()) { + if (measurementSchema.getMeasurementId() == null) { + throw new IllegalArgumentException("measurement should be non null value"); + } request.addToMeasurements(measurementSchema.getMeasurementId()); request.addToTypes(measurementSchema.getType().ordinal()); } @@ -2806,6 +2827,9 @@ private void updateTSInsertTabletsReq( List dataTypes = new ArrayList<>(); request.setIsAligned(isAligned); for (IMeasurementSchema measurementSchema : tablet.getSchemas()) { + if (measurementSchema.getMeasurementId() == null) { + throw new IllegalArgumentException("measurement should be non null value"); + } measurements.add(measurementSchema.getMeasurementId()); dataTypes.add(measurementSchema.getType().ordinal()); } @@ -3295,13 +3319,19 @@ private Object sortList(Object valueList, TSDataType dataType, Integer[] index) } return sortedValues; case INT32: - case DATE: int[] intValues = (int[]) valueList; int[] sortedIntValues = new int[intValues.length]; for (int i = 0; i < index.length; i++) { sortedIntValues[i] = intValues[index[i]]; } return sortedIntValues; + case DATE: + LocalDate[] date = (LocalDate[]) valueList; + LocalDate[] sortedDateValues = new LocalDate[date.length]; + for (int i = 0; i < index.length; i++) { + sortedDateValues[i] = date[index[i]]; + } + return sortedDateValues; case INT64: case TIMESTAMP: long[] longValues = (long[]) valueList; diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/SessionConnection.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/SessionConnection.java index 4a8c3a36ce5be..425ae908e431c 100644 --- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/SessionConnection.java +++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/SessionConnection.java @@ -71,6 +71,7 @@ import org.apache.thrift.transport.TTransport; import org.apache.thrift.transport.TTransportException; import org.apache.tsfile.utils.Pair; +import org.apache.tsfile.utils.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -262,23 +263,14 @@ protected IClientRPCService.Iface getClient() { protected void setTimeZone(String zoneId) throws StatementExecutionException, IoTDBConnectionException { - TSSetTimeZoneReq req = new TSSetTimeZoneReq(sessionId, zoneId); - TSStatus resp; - try { - resp = client.setTimeZone(req); - } catch (TException e) { - if (reconnect()) { - try { - req.setSessionId(sessionId); - resp = client.setTimeZone(req); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } - } - RpcUtils.verifySuccess(resp); + final TSStatus status = + callWithReconnect( + () -> { + TSSetTimeZoneReq req = new TSSetTimeZoneReq(sessionId, zoneId); + return client.setTimeZone(req); + }) + .getResult(); + RpcUtils.verifySuccess(status); setTimeZoneOfSession(zoneId); } @@ -295,93 +287,52 @@ protected String getTimeZone() { protected void setStorageGroup(String storageGroup) throws IoTDBConnectionException, StatementExecutionException { - try { - RpcUtils.verifySuccess(client.setStorageGroup(sessionId, storageGroup)); - } catch (TException e) { - if (reconnect()) { - try { - RpcUtils.verifySuccess(client.setStorageGroup(sessionId, storageGroup)); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } - } + final TSStatus status = + callWithReconnect(() -> client.setStorageGroup(sessionId, storageGroup)).getResult(); + RpcUtils.verifySuccess(status); } protected void deleteStorageGroups(List storageGroups) throws IoTDBConnectionException, StatementExecutionException { - try { - RpcUtils.verifySuccess(client.deleteStorageGroups(sessionId, storageGroups)); - } catch (TException e) { - if (reconnect()) { - try { - RpcUtils.verifySuccess(client.deleteStorageGroups(sessionId, storageGroups)); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } - } + final TSStatus status = + callWithReconnect(() -> client.deleteStorageGroups(sessionId, storageGroups)).getResult(); + RpcUtils.verifySuccess(status); } protected void createTimeseries(TSCreateTimeseriesReq request) throws IoTDBConnectionException, StatementExecutionException { - request.setSessionId(sessionId); - try { - RpcUtils.verifySuccess(client.createTimeseries(request)); - } catch (TException e) { - if (reconnect()) { - try { - request.setSessionId(sessionId); - RpcUtils.verifySuccess(client.createTimeseries(request)); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } - } + final TSStatus status = + callWithReconnect( + () -> { + request.setSessionId(sessionId); + return client.createTimeseries(request); + }) + .getResult(); + RpcUtils.verifySuccess(status); } protected void createAlignedTimeseries(TSCreateAlignedTimeseriesReq request) throws IoTDBConnectionException, StatementExecutionException { - request.setSessionId(sessionId); - try { - RpcUtils.verifySuccess(client.createAlignedTimeseries(request)); - } catch (TException e) { - if (reconnect()) { - try { - request.setSessionId(sessionId); - RpcUtils.verifySuccess(client.createAlignedTimeseries(request)); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } - } + final TSStatus status = + callWithReconnect( + () -> { + request.setSessionId(sessionId); + return client.createAlignedTimeseries(request); + }) + .getResult(); + RpcUtils.verifySuccess(status); } protected void createMultiTimeseries(TSCreateMultiTimeseriesReq request) throws IoTDBConnectionException, StatementExecutionException { - request.setSessionId(sessionId); - try { - RpcUtils.verifySuccess(client.createMultiTimeseries(request)); - } catch (TException e) { - if (reconnect()) { - try { - request.setSessionId(sessionId); - RpcUtils.verifySuccess(client.createMultiTimeseries(request)); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } - } + final TSStatus status = + callWithReconnect( + () -> { + request.setSessionId(sessionId); + return client.createMultiTimeseries(request); + }) + .getResult(); + RpcUtils.verifySuccess(status); } protected boolean checkTimeseriesExists(String path, long timeout) @@ -406,26 +357,22 @@ protected SessionDataSet executeQueryStatement(String sql, long timeout) TSExecuteStatementReq execReq = new TSExecuteStatementReq(sessionId, sql, statementId); execReq.setFetchSize(session.fetchSize); execReq.setTimeout(timeout); - TSExecuteStatementResp execResp; - try { - execReq.setEnableRedirectQuery(enableRedirect); - execResp = client.executeQueryStatementV2(execReq); + execReq.setEnableRedirectQuery(enableRedirect); + + RetryResult result = + callWithReconnect( + () -> { + execReq.setSessionId(sessionId); + execReq.setStatementId(statementId); + return client.executeQueryStatementV2(execReq); + }); + TSExecuteStatementResp execResp = result.getResult(); + if (result.getRetryAttempts() == 0) { RpcUtils.verifySuccessWithRedirection(execResp.getStatus()); - } catch (TException e) { - if (reconnect()) { - try { - execReq.setSessionId(sessionId); - execReq.setStatementId(statementId); - execResp = client.executeQueryStatementV2(execReq); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } + } else { + RpcUtils.verifySuccess(execResp.getStatus()); } - RpcUtils.verifySuccess(execResp.getStatus()); return new SessionDataSet( sql, execResp.getColumns(), @@ -445,49 +392,8 @@ protected SessionDataSet executeQueryStatement(String sql, long timeout) protected void executeNonQueryStatement(String sql) throws IoTDBConnectionException, StatementExecutionException { - TSExecuteStatementReq request = new TSExecuteStatementReq(sessionId, sql, statementId); - - TException lastTException = null; - TSStatus status = null; - for (int i = 0; i <= maxRetryCount; i++) { - if (i > 0) { - // re-init the TException and TSStatus - lastTException = null; - status = null; - // not first time, we need to sleep and then reconnect - try { - TimeUnit.MILLISECONDS.sleep(retryIntervalInMs); - } catch (InterruptedException e) { - // just ignore - } - if (!reconnect()) { - // reconnect failed, just continue to make another retry. - continue; - } - } - try { - status = executeNonQueryStatementInternal(request); - // need retry - if (status.isSetNeedRetry() && status.isNeedRetry()) { - continue; - } - // succeed or don't need to retry - RpcUtils.verifySuccess(status); - return; - } catch (TException e) { - // all network exception need retry until reaching maxRetryCount - lastTException = e; - } - } - - if (status != null) { - RpcUtils.verifySuccess(status); - } else if (lastTException != null) { - throw new IoTDBConnectionException(lastTException); - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } + callWithRetryAndVerify(() -> executeNonQueryStatementInternal(request)); } private TSStatus executeNonQueryStatementInternal(TSExecuteStatementReq request) @@ -504,26 +410,23 @@ protected SessionDataSet executeRawDataQuery( new TSRawDataQueryReq(sessionId, paths, startTime, endTime, statementId); execReq.setFetchSize(session.fetchSize); execReq.setTimeout(timeOut); - TSExecuteStatementResp execResp; - try { - execReq.setEnableRedirectQuery(enableRedirect); - execResp = client.executeRawDataQueryV2(execReq); + execReq.setEnableRedirectQuery(enableRedirect); + + RetryResult result = + callWithReconnect( + () -> { + execReq.setSessionId(sessionId); + execReq.setStatementId(statementId); + return client.executeRawDataQueryV2(execReq); + }); + + TSExecuteStatementResp execResp = result.getResult(); + if (result.getRetryAttempts() == 0) { RpcUtils.verifySuccessWithRedirection(execResp.getStatus()); - } catch (TException e) { - if (reconnect()) { - try { - execReq.setSessionId(sessionId); - execReq.setStatementId(statementId); - execResp = client.executeRawDataQueryV2(execReq); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } + } else { + RpcUtils.verifySuccess(execResp.getStatus()); } - RpcUtils.verifySuccess(execResp.getStatus()); return new SessionDataSet( "", execResp.getColumns(), @@ -548,28 +451,27 @@ protected Pair executeLastDataQueryForOneDevice( req.setEnableRedirectQuery(enableRedirect); req.setLegalPathNodes(isLegalPathNodes); req.setTimeout(timeOut); - TSExecuteStatementResp tsExecuteStatementResp = null; TEndPoint redirectedEndPoint = null; - try { - tsExecuteStatementResp = client.executeFastLastDataQueryForOneDeviceV2(req); - RpcUtils.verifySuccessWithRedirection(tsExecuteStatementResp.getStatus()); - } catch (RedirectException e) { - redirectedEndPoint = e.getEndPoint(); - } catch (TException e) { - if (reconnect()) { - try { - req.setSessionId(sessionId); - req.setStatementId(statementId); - tsExecuteStatementResp = client.executeFastLastDataQueryForOneDeviceV2(req); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); + + RetryResult result = + callWithReconnect( + () -> { + req.setSessionId(sessionId); + req.setStatementId(statementId); + return client.executeFastLastDataQueryForOneDeviceV2(req); + }); + + TSExecuteStatementResp tsExecuteStatementResp = result.getResult(); + if (result.getRetryAttempts() == 0) { + try { + RpcUtils.verifySuccessWithRedirection(tsExecuteStatementResp.getStatus()); + } catch (RedirectException e) { + redirectedEndPoint = e.getEndPoint(); } + } else { + RpcUtils.verifySuccess(tsExecuteStatementResp.getStatus()); } - RpcUtils.verifySuccess(tsExecuteStatementResp.getStatus()); return new Pair<>( new SessionDataSet( "", @@ -594,25 +496,22 @@ protected SessionDataSet executeLastDataQuery(List paths, long time, lon tsLastDataQueryReq.setFetchSize(session.fetchSize); tsLastDataQueryReq.setEnableRedirectQuery(enableRedirect); tsLastDataQueryReq.setTimeout(timeOut); - TSExecuteStatementResp tsExecuteStatementResp; - try { - tsExecuteStatementResp = client.executeLastDataQueryV2(tsLastDataQueryReq); + + RetryResult result = + callWithReconnect( + () -> { + tsLastDataQueryReq.setSessionId(sessionId); + tsLastDataQueryReq.setStatementId(statementId); + return client.executeLastDataQueryV2(tsLastDataQueryReq); + }); + final TSExecuteStatementResp tsExecuteStatementResp = result.getResult(); + + if (result.getRetryAttempts() == 0) { RpcUtils.verifySuccessWithRedirection(tsExecuteStatementResp.getStatus()); - } catch (TException e) { - if (reconnect()) { - try { - tsLastDataQueryReq.setSessionId(sessionId); - tsLastDataQueryReq.setStatementId(statementId); - tsExecuteStatementResp = client.executeLastDataQueryV2(tsLastDataQueryReq); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } + } else { + RpcUtils.verifySuccess(tsExecuteStatementResp.getStatus()); } - RpcUtils.verifySuccess(tsExecuteStatementResp.getStatus()); return new SessionDataSet( "", tsExecuteStatementResp.getColumns(), @@ -676,25 +575,21 @@ protected SessionDataSet executeAggregationQuery( private SessionDataSet executeAggregationQuery(TSAggregationQueryReq tsAggregationQueryReq) throws StatementExecutionException, IoTDBConnectionException, RedirectException { - TSExecuteStatementResp tsExecuteStatementResp; - try { - tsExecuteStatementResp = client.executeAggregationQueryV2(tsAggregationQueryReq); + RetryResult result = + callWithReconnect( + () -> { + tsAggregationQueryReq.setSessionId(sessionId); + tsAggregationQueryReq.setStatementId(statementId); + return client.executeAggregationQueryV2(tsAggregationQueryReq); + }); + + TSExecuteStatementResp tsExecuteStatementResp = result.getResult(); + if (result.getRetryAttempts() == 0) { RpcUtils.verifySuccessWithRedirection(tsExecuteStatementResp.getStatus()); - } catch (TException e) { - if (reconnect()) { - try { - tsAggregationQueryReq.setSessionId(sessionId); - tsAggregationQueryReq.setStatementId(statementId); - tsExecuteStatementResp = client.executeAggregationQuery(tsAggregationQueryReq); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } + } else { + RpcUtils.verifySuccess(tsExecuteStatementResp.getStatus()); } - RpcUtils.verifySuccess(tsExecuteStatementResp.getStatus()); return new SessionDataSet( "", tsExecuteStatementResp.getColumns(), @@ -721,52 +616,7 @@ private TSAggregationQueryReq createAggregationQueryReq( protected void insertRecord(TSInsertRecordReq request) throws IoTDBConnectionException, StatementExecutionException, RedirectException { - TException lastTException = null; - TSStatus status = null; - for (int i = 0; i <= maxRetryCount; i++) { - if (i > 0) { - // re-init the TException and TSStatus - lastTException = null; - status = null; - // not first time, we need to sleep and then reconnect - try { - TimeUnit.MILLISECONDS.sleep(retryIntervalInMs); - } catch (InterruptedException e) { - // just ignore - } - if (!reconnect()) { - // reconnect failed, just continue to make another retry. - continue; - } - } - try { - status = insertRecordInternal(request); - // need retry - if (status.isSetNeedRetry() && status.isNeedRetry()) { - continue; - } - // succeed or don't need to retry - if (i == 0) { - // first time succeed, take account for redirection info - RpcUtils.verifySuccessWithRedirection(status); - } else { - // if it's retry, just ignore redirection info - RpcUtils.verifySuccess(status); - } - return; - } catch (TException e) { - // all network exception need retry until reaching maxRetryCount - lastTException = e; - } - } - - if (status != null) { - RpcUtils.verifySuccess(status); - } else if (lastTException != null) { - throw new IoTDBConnectionException(lastTException); - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } + callWithRetryAndVerifyWithRedirection(() -> insertRecordInternal(request)); } private TSStatus insertRecordInternal(TSInsertRecordReq request) throws TException { @@ -776,52 +626,7 @@ private TSStatus insertRecordInternal(TSInsertRecordReq request) throws TExcepti protected void insertRecord(TSInsertStringRecordReq request) throws IoTDBConnectionException, StatementExecutionException, RedirectException { - TException lastTException = null; - TSStatus status = null; - for (int i = 0; i <= maxRetryCount; i++) { - if (i > 0) { - // re-init the TException and TSStatus - lastTException = null; - status = null; - // not first time, we need to sleep and then reconnect - try { - TimeUnit.MILLISECONDS.sleep(retryIntervalInMs); - } catch (InterruptedException e) { - // just ignore - } - if (!reconnect()) { - // reconnect failed, just continue to make another retry. - continue; - } - } - try { - status = insertRecordInternal(request); - // need retry - if (status.isSetNeedRetry() && status.isNeedRetry()) { - continue; - } - // succeed or don't need to retry - if (i == 0) { - // first time succeed, take account for redirection info - RpcUtils.verifySuccessWithRedirection(status); - } else { - // if it's retry, just ignore redirection info - RpcUtils.verifySuccess(status); - } - return; - } catch (TException e) { - // all network exception need retry until reaching maxRetryCount - lastTException = e; - } - } - - if (status != null) { - RpcUtils.verifySuccess(status); - } else if (lastTException != null) { - throw new IoTDBConnectionException(lastTException); - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } + callWithRetryAndVerifyWithRedirection(() -> insertRecordInternal(request)); } private TSStatus insertRecordInternal(TSInsertStringRecordReq request) throws TException { @@ -831,52 +636,8 @@ private TSStatus insertRecordInternal(TSInsertStringRecordReq request) throws TE protected void insertRecords(TSInsertRecordsReq request) throws IoTDBConnectionException, StatementExecutionException, RedirectException { - TException lastTException = null; - TSStatus status = null; - for (int i = 0; i <= maxRetryCount; i++) { - if (i > 0) { - // re-init the TException and TSStatus - lastTException = null; - status = null; - // not first time, we need to sleep and then reconnect - try { - TimeUnit.MILLISECONDS.sleep(retryIntervalInMs); - } catch (InterruptedException e) { - // just ignore - } - if (!reconnect()) { - // reconnect failed, just continue to make another retry. - continue; - } - } - try { - status = insertRecordsInternal(request); - // need retry - if (status.isSetNeedRetry() && status.isNeedRetry()) { - continue; - } - // succeed or don't need to retry - if (i == 0) { - // first time succeed, take account for redirection info - RpcUtils.verifySuccessWithRedirectionForMultiDevices(status, request.getPrefixPaths()); - } else { - // if it's retry, just ignore redirection info - RpcUtils.verifySuccess(status); - } - return; - } catch (TException e) { - // all network exception need retry until reaching maxRetryCount - lastTException = e; - } - } - - if (status != null) { - RpcUtils.verifySuccess(status); - } else if (lastTException != null) { - throw new IoTDBConnectionException(lastTException); - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } + callWithRetryAndVerifyWithRedirectionForMultipleDevices( + () -> insertRecordsInternal(request), request::getPrefixPaths); } private TSStatus insertRecordsInternal(TSInsertRecordsReq request) throws TException { @@ -886,53 +647,8 @@ private TSStatus insertRecordsInternal(TSInsertRecordsReq request) throws TExcep protected void insertRecords(TSInsertStringRecordsReq request) throws IoTDBConnectionException, StatementExecutionException, RedirectException { - - TException lastTException = null; - TSStatus status = null; - for (int i = 0; i <= maxRetryCount; i++) { - if (i > 0) { - // re-init the TException and TSStatus - lastTException = null; - status = null; - // not first time, we need to sleep and then reconnect - try { - TimeUnit.MILLISECONDS.sleep(retryIntervalInMs); - } catch (InterruptedException e) { - // just ignore - } - if (!reconnect()) { - // reconnect failed, just continue to make another retry. - continue; - } - } - try { - status = insertRecordsInternal(request); - // need retry - if (status.isSetNeedRetry() && status.isNeedRetry()) { - continue; - } - // succeed or don't need to retry - if (i == 0) { - // first time succeed, take account for redirection info - RpcUtils.verifySuccessWithRedirectionForMultiDevices(status, request.getPrefixPaths()); - } else { - // if it's retry, just ignore redirection info - RpcUtils.verifySuccess(status); - } - return; - } catch (TException e) { - // all network exception need retry until reaching maxRetryCount - lastTException = e; - } - } - - if (status != null) { - RpcUtils.verifySuccess(status); - } else if (lastTException != null) { - throw new IoTDBConnectionException(lastTException); - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } + callWithRetryAndVerifyWithRedirectionForMultipleDevices( + () -> insertRecordsInternal(request), request::getPrefixPaths); } private TSStatus insertRecordsInternal(TSInsertStringRecordsReq request) throws TException { @@ -942,53 +658,7 @@ private TSStatus insertRecordsInternal(TSInsertStringRecordsReq request) throws protected void insertRecordsOfOneDevice(TSInsertRecordsOfOneDeviceReq request) throws IoTDBConnectionException, StatementExecutionException, RedirectException { - - TException lastTException = null; - TSStatus status = null; - for (int i = 0; i <= maxRetryCount; i++) { - if (i > 0) { - // re-init the TException and TSStatus - lastTException = null; - status = null; - // not first time, we need to sleep and then reconnect - try { - TimeUnit.MILLISECONDS.sleep(retryIntervalInMs); - } catch (InterruptedException e) { - // just ignore - } - if (!reconnect()) { - // reconnect failed, just continue to make another retry. - continue; - } - } - try { - status = insertRecordsOfOneDeviceInternal(request); - // need retry - if (status.isSetNeedRetry() && status.isNeedRetry()) { - continue; - } - // succeed or don't need to retry - if (i == 0) { - // first time succeed, take account for redirection info - RpcUtils.verifySuccessWithRedirection(status); - } else { - // if it's retry, just ignore redirection info - RpcUtils.verifySuccess(status); - } - return; - } catch (TException e) { - // all network exception need retry until reaching maxRetryCount - lastTException = e; - } - } - - if (status != null) { - RpcUtils.verifySuccess(status); - } else if (lastTException != null) { - throw new IoTDBConnectionException(lastTException); - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } + callWithRetryAndVerifyWithRedirection(() -> insertRecordsOfOneDeviceInternal(request)); } private TSStatus insertRecordsOfOneDeviceInternal(TSInsertRecordsOfOneDeviceReq request) @@ -999,53 +669,7 @@ private TSStatus insertRecordsOfOneDeviceInternal(TSInsertRecordsOfOneDeviceReq protected void insertStringRecordsOfOneDevice(TSInsertStringRecordsOfOneDeviceReq request) throws IoTDBConnectionException, StatementExecutionException, RedirectException { - - TException lastTException = null; - TSStatus status = null; - for (int i = 0; i <= maxRetryCount; i++) { - if (i > 0) { - // re-init the TException and TSStatus - lastTException = null; - status = null; - // not first time, we need to sleep and then reconnect - try { - TimeUnit.MILLISECONDS.sleep(retryIntervalInMs); - } catch (InterruptedException e) { - // just ignore - } - if (!reconnect()) { - // reconnect failed, just continue to make another retry. - continue; - } - } - try { - status = insertStringRecordsOfOneDeviceInternal(request); - // need retry - if (status.isSetNeedRetry() && status.isNeedRetry()) { - continue; - } - // succeed or don't need to retry - if (i == 0) { - // first time succeed, take account for redirection info - RpcUtils.verifySuccessWithRedirection(status); - } else { - // if it's retry, just ignore redirection info - RpcUtils.verifySuccess(status); - } - return; - } catch (TException e) { - // all network exception need retry until reaching maxRetryCount - lastTException = e; - } - } - - if (status != null) { - RpcUtils.verifySuccess(status); - } else if (lastTException != null) { - throw new IoTDBConnectionException(lastTException); - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } + callWithRetryAndVerifyWithRedirection(() -> insertStringRecordsOfOneDeviceInternal(request)); } private TSStatus insertStringRecordsOfOneDeviceInternal( @@ -1054,57 +678,48 @@ private TSStatus insertStringRecordsOfOneDeviceInternal( return client.insertStringRecordsOfOneDevice(request); } - protected void insertTablet(TSInsertTabletReq request) - throws IoTDBConnectionException, StatementExecutionException, RedirectException { + private void callWithRetryAndVerifyWithRedirectionForMultipleDevices( + TFunction function, Supplier> pathSupplier) + throws StatementExecutionException, RedirectException, IoTDBConnectionException { + RetryResult result = callWithRetry(function); - TException lastTException = null; - TSStatus status = null; - for (int i = 0; i <= maxRetryCount; i++) { - if (i > 0) { - // re-init the TException and TSStatus - lastTException = null; - status = null; - // not first time, we need to sleep and then reconnect - try { - TimeUnit.MILLISECONDS.sleep(retryIntervalInMs); - } catch (InterruptedException e) { - // just ignore - } - if (!reconnect()) { - // reconnect failed, just continue to make another retry. - continue; - } - } - try { - status = insertTabletInternal(request); - // need retry - if (status.isSetNeedRetry() && status.isNeedRetry()) { - continue; - } - // succeed or don't need to retry - if (i == 0) { - // first time succeed, take account for redirection info - RpcUtils.verifySuccessWithRedirection(status); - } else { - // if it's retry, just ignore redirection info - RpcUtils.verifySuccess(status); - } - return; - } catch (TException e) { - // all network exception need retry until reaching maxRetryCount - lastTException = e; + TSStatus status = result.getResult(); + if (status != null) { + if (result.getRetryAttempts() == 0) { + RpcUtils.verifySuccessWithRedirectionForMultiDevices(status, pathSupplier.get()); + } else { + RpcUtils.verifySuccess(status); } + } else if (result.getException() != null) { + throw new IoTDBConnectionException(result.getException()); + } else { + throw new IoTDBConnectionException(logForReconnectionFailure()); } + } + private void callWithRetryAndVerifyWithRedirection(TFunction function) + throws StatementExecutionException, RedirectException, IoTDBConnectionException { + RetryResult result = callWithRetry(function); + + TSStatus status = result.getResult(); if (status != null) { - RpcUtils.verifySuccess(status); - } else if (lastTException != null) { - throw new IoTDBConnectionException(lastTException); + if (result.getRetryAttempts() == 0) { + RpcUtils.verifySuccessWithRedirection(status); + } else { + RpcUtils.verifySuccess(status); + } + } else if (result.getException() != null) { + throw new IoTDBConnectionException(result.getException()); } else { throw new IoTDBConnectionException(logForReconnectionFailure()); } } + protected void insertTablet(TSInsertTabletReq request) + throws IoTDBConnectionException, StatementExecutionException, RedirectException { + callWithRetryAndVerifyWithRedirection(() -> insertTabletInternal(request)); + } + private TSStatus insertTabletInternal(TSInsertTabletReq request) throws TException { request.setSessionId(sessionId); return client.insertTablet(request); @@ -1112,53 +727,8 @@ private TSStatus insertTabletInternal(TSInsertTabletReq request) throws TExcepti protected void insertTablets(TSInsertTabletsReq request) throws IoTDBConnectionException, StatementExecutionException, RedirectException { - - TException lastTException = null; - TSStatus status = null; - for (int i = 0; i <= maxRetryCount; i++) { - if (i > 0) { - // re-init the TException and TSStatus - lastTException = null; - status = null; - // not first time, we need to sleep and then reconnect - try { - TimeUnit.MILLISECONDS.sleep(retryIntervalInMs); - } catch (InterruptedException e) { - // just ignore - } - if (!reconnect()) { - // reconnect failed, just continue to make another retry. - continue; - } - } - try { - status = insertTabletsInternal(request); - // need retry - if (status.isSetNeedRetry() && status.isNeedRetry()) { - continue; - } - // succeed or don't need to retry - if (i == 0) { - // first time succeed, take account for redirection info - RpcUtils.verifySuccessWithRedirectionForMultiDevices(status, request.getPrefixPaths()); - } else { - // if it's retry, just ignore redirection info - RpcUtils.verifySuccess(status); - } - return; - } catch (TException e) { - // all network exception need retry until reaching maxRetryCount - lastTException = e; - } - } - - if (status != null) { - RpcUtils.verifySuccess(status); - } else if (lastTException != null) { - throw new IoTDBConnectionException(lastTException); - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } + callWithRetryAndVerifyWithRedirectionForMultipleDevices( + () -> insertTabletsInternal(request), request::getPrefixPaths); } private TSStatus insertTabletsInternal(TSInsertTabletsReq request) throws TException { @@ -1168,55 +738,31 @@ private TSStatus insertTabletsInternal(TSInsertTabletsReq request) throws TExcep protected void deleteTimeseries(List paths) throws IoTDBConnectionException, StatementExecutionException { + callWithRetryAndVerify(() -> client.deleteTimeseries(sessionId, paths)); + } - TException lastTException = null; - TSStatus status = null; - for (int i = 0; i <= maxRetryCount; i++) { - if (i > 0) { - // re-init the TException and TSStatus - lastTException = null; - status = null; - // not first time, we need to sleep and then reconnect - try { - TimeUnit.MILLISECONDS.sleep(retryIntervalInMs); - } catch (InterruptedException e) { - // just ignore - } - if (!reconnect()) { - // reconnect failed, just continue to make another retry. - continue; - } - } - try { - status = client.deleteTimeseries(sessionId, paths); - // need retry - if (status.isSetNeedRetry() && status.isNeedRetry()) { - continue; - } - // succeed or don't need to retry - RpcUtils.verifySuccess(status); - return; - } catch (TException e) { - // all network exception need retry until reaching maxRetryCount - lastTException = e; - } - } + public void deleteData(TSDeleteDataReq request) + throws IoTDBConnectionException, StatementExecutionException { + callWithRetryAndVerify(() -> deleteDataInternal(request)); + } - if (status != null) { - RpcUtils.verifySuccess(status); - } else if (lastTException != null) { - throw new IoTDBConnectionException(lastTException); + private void callWithRetryAndVerify(TFunction rpc) + throws IoTDBConnectionException, StatementExecutionException { + RetryResult result = callWithRetry(rpc); + if (result.getResult() != null) { + RpcUtils.verifySuccess(result.getResult()); + } else if (result.getException() != null) { + throw new IoTDBConnectionException(result.getException()); } else { throw new IoTDBConnectionException(logForReconnectionFailure()); } } - public void deleteData(TSDeleteDataReq request) - throws IoTDBConnectionException, StatementExecutionException { - + private RetryResult callWithRetry(TFunction rpc) { TException lastTException = null; TSStatus status = null; - for (int i = 0; i <= maxRetryCount; i++) { + int i; + for (i = 0; i <= maxRetryCount; i++) { if (i > 0) { // re-init the TException and TSStatus lastTException = null; @@ -1225,7 +771,13 @@ public void deleteData(TSDeleteDataReq request) try { TimeUnit.MILLISECONDS.sleep(retryIntervalInMs); } catch (InterruptedException e) { - // just ignore + Thread.currentThread().interrupt(); + logger.warn( + "Thread {} was interrupted during retry {} with wait time {} ms. Exiting retry loop.", + Thread.currentThread().getName(), + i, + retryIntervalInMs); + break; } if (!reconnect()) { // reconnect failed, just continue to make another retry. @@ -1233,27 +785,19 @@ public void deleteData(TSDeleteDataReq request) } } try { - status = deleteDataInternal(request); + status = rpc.run(); // need retry if (status.isSetNeedRetry() && status.isNeedRetry()) { continue; } - // succeed or don't need to retry - RpcUtils.verifySuccess(status); - return; + break; } catch (TException e) { // all network exception need retry until reaching maxRetryCount lastTException = e; } } - if (status != null) { - RpcUtils.verifySuccess(status); - } else if (lastTException != null) { - throw new IoTDBConnectionException(lastTException); - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } + return new RetryResult<>(status, lastTException, i); } private TSStatus deleteDataInternal(TSDeleteDataReq request) throws TException { @@ -1263,116 +807,74 @@ private TSStatus deleteDataInternal(TSDeleteDataReq request) throws TException { protected void testInsertRecord(TSInsertStringRecordReq request) throws IoTDBConnectionException, StatementExecutionException { - request.setSessionId(sessionId); - try { - RpcUtils.verifySuccess(client.testInsertStringRecord(request)); - } catch (TException e) { - if (reconnect()) { - try { - request.setSessionId(sessionId); - RpcUtils.verifySuccess(client.testInsertStringRecord(request)); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } - } + final TSStatus status = + callWithReconnect( + () -> { + request.setSessionId(sessionId); + return client.testInsertStringRecord(request); + }) + .getResult(); + RpcUtils.verifySuccess(status); } protected void testInsertRecord(TSInsertRecordReq request) throws IoTDBConnectionException, StatementExecutionException { - request.setSessionId(sessionId); - try { - RpcUtils.verifySuccess(client.testInsertRecord(request)); - } catch (TException e) { - if (reconnect()) { - try { - request.setSessionId(sessionId); - RpcUtils.verifySuccess(client.testInsertRecord(request)); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } - } + final TSStatus status = + callWithReconnect( + () -> { + request.setSessionId(sessionId); + return client.testInsertRecord(request); + }) + .getResult(); + RpcUtils.verifySuccess(status); } public void testInsertRecords(TSInsertStringRecordsReq request) throws IoTDBConnectionException, StatementExecutionException { - request.setSessionId(sessionId); - try { - RpcUtils.verifySuccess(client.testInsertStringRecords(request)); - } catch (TException e) { - if (reconnect()) { - try { - request.setSessionId(sessionId); - RpcUtils.verifySuccess(client.testInsertStringRecords(request)); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } - } + final TSStatus status = + callWithReconnect( + () -> { + request.setSessionId(sessionId); + return client.testInsertStringRecords(request); + }) + .getResult(); + RpcUtils.verifySuccess(status); } public void testInsertRecords(TSInsertRecordsReq request) throws IoTDBConnectionException, StatementExecutionException { - request.setSessionId(sessionId); - try { - RpcUtils.verifySuccess(client.testInsertRecords(request)); - } catch (TException e) { - if (reconnect()) { - try { - request.setSessionId(sessionId); - RpcUtils.verifySuccess(client.testInsertRecords(request)); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } - } + final TSStatus status = + callWithReconnect( + () -> { + request.setSessionId(sessionId); + return client.testInsertRecords(request); + }) + .getResult(); + RpcUtils.verifySuccess(status); } protected void testInsertTablet(TSInsertTabletReq request) throws IoTDBConnectionException, StatementExecutionException { - request.setSessionId(sessionId); - try { - RpcUtils.verifySuccess(client.testInsertTablet(request)); - } catch (TException e) { - if (reconnect()) { - try { - request.setSessionId(sessionId); - RpcUtils.verifySuccess(client.testInsertTablet(request)); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } - } + final TSStatus status = + callWithReconnect( + () -> { + request.setSessionId(sessionId); + return client.testInsertTablet(request); + }) + .getResult(); + RpcUtils.verifySuccess(status); } protected void testInsertTablets(TSInsertTabletsReq request) throws IoTDBConnectionException, StatementExecutionException { - request.setSessionId(sessionId); - try { - RpcUtils.verifySuccess(client.testInsertTablets(request)); - } catch (TException e) { - if (reconnect()) { - try { - request.setSessionId(sessionId); - RpcUtils.verifySuccess(client.testInsertTablets(request)); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } - } + final TSStatus status = + callWithReconnect( + () -> { + request.setSessionId(sessionId); + return client.testInsertTablets(request); + }) + .getResult(); + RpcUtils.verifySuccess(status); } @SuppressWarnings({ @@ -1425,189 +927,121 @@ private boolean reconnect() { protected void createSchemaTemplate(TSCreateSchemaTemplateReq request) throws IoTDBConnectionException, StatementExecutionException { - request.setSessionId(sessionId); - try { - RpcUtils.verifySuccess(client.createSchemaTemplate(request)); - } catch (TException e) { - if (reconnect()) { - try { - request.setSessionId(sessionId); - RpcUtils.verifySuccess(client.createSchemaTemplate(request)); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } - } + final TSStatus status = + callWithReconnect( + () -> { + request.setSessionId(sessionId); + return client.createSchemaTemplate(request); + }) + .getResult(); + RpcUtils.verifySuccess(status); } protected void appendSchemaTemplate(TSAppendSchemaTemplateReq request) throws IoTDBConnectionException, StatementExecutionException { - request.setSessionId(sessionId); - try { - RpcUtils.verifySuccess(client.appendSchemaTemplate(request)); - } catch (TException e) { - if (reconnect()) { - try { - request.setSessionId(sessionId); - RpcUtils.verifySuccess(client.appendSchemaTemplate(request)); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } - } + final TSStatus status = + callWithReconnect( + () -> { + request.setSessionId(sessionId); + return client.appendSchemaTemplate(request); + }) + .getResult(); + RpcUtils.verifySuccess(status); } protected void pruneSchemaTemplate(TSPruneSchemaTemplateReq request) throws IoTDBConnectionException, StatementExecutionException { - request.setSessionId(sessionId); - try { - RpcUtils.verifySuccess(client.pruneSchemaTemplate(request)); - } catch (TException e) { - if (reconnect()) { - try { - request.setSessionId(sessionId); - RpcUtils.verifySuccess(client.pruneSchemaTemplate(request)); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } - } + final TSStatus status = + callWithReconnect( + () -> { + request.setSessionId(sessionId); + return client.pruneSchemaTemplate(request); + }) + .getResult(); + RpcUtils.verifySuccess(status); } protected TSQueryTemplateResp querySchemaTemplate(TSQueryTemplateReq req) throws StatementExecutionException, IoTDBConnectionException { - TSQueryTemplateResp execResp; - req.setSessionId(sessionId); - try { - execResp = client.querySchemaTemplate(req); - RpcUtils.verifySuccess(execResp.getStatus()); - } catch (TException e) { - if (reconnect()) { - try { - execResp = client.querySchemaTemplate(req); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } - } - + final TSQueryTemplateResp execResp = + callWithReconnect( + () -> { + req.setSessionId(sessionId); + return client.querySchemaTemplate(req); + }) + .getResult(); RpcUtils.verifySuccess(execResp.getStatus()); return execResp; } protected void setSchemaTemplate(TSSetSchemaTemplateReq request) throws IoTDBConnectionException, StatementExecutionException { - request.setSessionId(sessionId); - try { - RpcUtils.verifySuccess(client.setSchemaTemplate(request)); - } catch (TException e) { - if (reconnect()) { - try { - request.setSessionId(sessionId); - RpcUtils.verifySuccess(client.setSchemaTemplate(request)); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } - } + final TSStatus status = + callWithReconnect( + () -> { + request.setSessionId(sessionId); + return client.setSchemaTemplate(request); + }) + .getResult(); + RpcUtils.verifySuccess(status); } protected void unsetSchemaTemplate(TSUnsetSchemaTemplateReq request) throws IoTDBConnectionException, StatementExecutionException { - request.setSessionId(sessionId); - try { - RpcUtils.verifySuccess(client.unsetSchemaTemplate(request)); - } catch (TException e) { - if (reconnect()) { - try { - request.setSessionId(sessionId); - RpcUtils.verifySuccess(client.unsetSchemaTemplate(request)); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } - } + final TSStatus status = + callWithReconnect( + () -> { + request.setSessionId(sessionId); + return client.unsetSchemaTemplate(request); + }) + .getResult(); + RpcUtils.verifySuccess(status); } protected void dropSchemaTemplate(TSDropSchemaTemplateReq request) throws IoTDBConnectionException, StatementExecutionException { - request.setSessionId(sessionId); - try { - RpcUtils.verifySuccess(client.dropSchemaTemplate(request)); - } catch (TException e) { - if (reconnect()) { - try { - request.setSessionId(sessionId); - RpcUtils.verifySuccess(client.dropSchemaTemplate(request)); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } - } + final TSStatus status = + callWithReconnect( + () -> { + request.setSessionId(sessionId); + return client.dropSchemaTemplate(request); + }) + .getResult(); + RpcUtils.verifySuccess(status); } protected void createTimeseriesUsingSchemaTemplate( TCreateTimeseriesUsingSchemaTemplateReq request) throws IoTDBConnectionException, StatementExecutionException { - request.setSessionId(sessionId); - try { - RpcUtils.verifySuccess(client.createTimeseriesUsingSchemaTemplate(request)); - } catch (TException e) { - if (reconnect()) { - try { - request.setSessionId(sessionId); - RpcUtils.verifySuccess(client.createTimeseriesUsingSchemaTemplate(request)); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(MSG_RECONNECTION_FAIL); - } - } + final TSStatus status = + callWithReconnect( + () -> { + request.setSessionId(sessionId); + return client.createTimeseriesUsingSchemaTemplate(request); + }) + .getResult(); + RpcUtils.verifySuccess(status); } protected TSBackupConfigurationResp getBackupConfiguration() throws IoTDBConnectionException, StatementExecutionException { - TSBackupConfigurationResp execResp; - try { - execResp = client.getBackupConfiguration(); - RpcUtils.verifySuccess(execResp.getStatus()); - } catch (TException e) { - if (reconnect()) { - try { - execResp = client.getBackupConfiguration(); - RpcUtils.verifySuccess(execResp.getStatus()); - } catch (TException tException) { - throw new IoTDBConnectionException(tException); - } - } else { - throw new IoTDBConnectionException(logForReconnectionFailure()); - } - } + final TSBackupConfigurationResp execResp = + callWithReconnect(() -> client.getBackupConfiguration()).getResult(); + RpcUtils.verifySuccess(execResp.getStatus()); return execResp; } - public TSConnectionInfoResp fetchAllConnections() throws IoTDBConnectionException { + private RetryResult callWithReconnect(TFunction supplier) + throws IoTDBConnectionException { + T ret; try { - return client.fetchAllConnectionsInfo(); + ret = supplier.run(); + return new RetryResult<>(ret, null, 0); } catch (TException e) { if (reconnect()) { try { - return client.fetchAllConnectionsInfo(); + ret = supplier.run(); + return new RetryResult<>(ret, null, 1); } catch (TException tException) { throw new IoTDBConnectionException(tException); } @@ -1617,6 +1051,10 @@ public TSConnectionInfoResp fetchAllConnections() throws IoTDBConnectionExceptio } } + public TSConnectionInfoResp fetchAllConnections() throws IoTDBConnectionException { + return callWithReconnect(() -> client.fetchAllConnectionsInfo()).getResult(); + } + public boolean isEnableRedirect() { return enableRedirect; } @@ -1652,4 +1090,33 @@ private String logForReconnectionFailure() { public String toString() { return "SessionConnection{" + " endPoint=" + endPoint + "}"; } + + private interface TFunction { + T run() throws TException; + } + + private static class RetryResult { + private final T result; + private final TException exception; + private final int retryAttempts; + + public RetryResult(T result, TException exception, int retryAttempts) { + Preconditions.checkArgument(result == null || exception == null); + this.result = result; + this.exception = exception; + this.retryAttempts = retryAttempts; + } + + public int getRetryAttempts() { + return retryAttempts; + } + + public TException getException() { + return exception; + } + + public T getResult() { + return result; + } + } } diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/pool/SessionPool.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/pool/SessionPool.java index 70e4d73209df0..301b245b70e46 100644 --- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/pool/SessionPool.java +++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/pool/SessionPool.java @@ -651,11 +651,10 @@ private ISession getSession() throws IoTDBConnectionException { long timeOut = Math.min(waitToGetSessionTimeoutInMs, 60_000); if (System.currentTimeMillis() - start > timeOut) { LOGGER.warn( - "the SessionPool has wait for {} seconds to get a new connection: {} with {}, {}", + "the SessionPool has wait for {} seconds to get a new connection: {} with {}", (System.currentTimeMillis() - start) / 1000, formattedNodeUrls, - user, - password); + user); LOGGER.warn( "current occupied size {}, queue size {}, considered size {} ", occupied.size(), diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/SubscriptionSession.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/SubscriptionSession.java index d70de467db066..2f2bc40d951ac 100644 --- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/SubscriptionSession.java +++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/SubscriptionSession.java @@ -30,6 +30,7 @@ import org.apache.iotdb.session.SessionConnection; import org.apache.iotdb.session.subscription.model.Subscription; import org.apache.iotdb.session.subscription.model.Topic; +import org.apache.iotdb.session.subscription.util.IdentifierUtils; import org.apache.tsfile.read.common.Field; import org.apache.tsfile.read.common.RowRecord; @@ -46,11 +47,20 @@ public class SubscriptionSession extends Session { public SubscriptionSession(final String host, final int port) { - this(host, port, SessionConfig.DEFAULT_USER, SessionConfig.DEFAULT_PASSWORD); + this( + host, + port, + SessionConfig.DEFAULT_USER, + SessionConfig.DEFAULT_PASSWORD, + SessionConfig.DEFAULT_MAX_FRAME_SIZE); } public SubscriptionSession( - final String host, final int port, final String username, final String password) { + final String host, + final int port, + final String username, + final String password, + final int thriftMaxFrameSize) { // TODO: more configs control super( new Session.Builder() @@ -58,12 +68,11 @@ public SubscriptionSession( .port(port) .username(username) .password(password) + .thriftMaxFrameSize(thriftMaxFrameSize) // disable auto fetch .enableAutoFetch(false) // disable redirection - .enableRedirection(false) - // TODO: config - .thriftMaxFrameSize(Integer.MAX_VALUE)); + .enableRedirection(false)); } @Override @@ -80,16 +89,85 @@ public SessionConnection constructSessionConnection( /////////////////////////////// topic /////////////////////////////// + /** + * Creates a topic with the specified name. + * + *

If the topic name contains single quotes, it must be enclosed in backticks (`). For example, + * to create a topic named 'topic', the value passed in as topicName should be `'topic'` + * + * @param topicName If the created topic name contains single quotes, the passed parameter needs + * to be enclosed in backticks. + * @throws IoTDBConnectionException If there is an issue with the connection to IoTDB. + * @throws StatementExecutionException If there is an issue executing the SQL statement. + */ public void createTopic(final String topicName) throws IoTDBConnectionException, StatementExecutionException { + IdentifierUtils.checkAndParseIdentifier(topicName); // ignore the parse result final String sql = String.format("CREATE TOPIC %s", topicName); executeNonQueryStatement(sql); } + /** + * Creates a topic with the specified name only if it does not already exist. + * + *

This method is similar to {@link #createTopic(String)}, but includes the 'IF NOT EXISTS' + * condition. If the topic name contains single quotes, it must be enclosed in backticks (`). + * + * @param topicName If the created topic name contains single quotes, the passed parameter needs + * to be enclosed in backticks. + * @throws IoTDBConnectionException If there is an issue with the connection to IoTDB. + * @throws StatementExecutionException If there is an issue executing the SQL statement. + */ + public void createTopicIfNotExists(final String topicName) + throws IoTDBConnectionException, StatementExecutionException { + IdentifierUtils.checkAndParseIdentifier(topicName); // ignore the parse result + final String sql = String.format("CREATE TOPIC IF NOT EXISTS %s", topicName); + executeNonQueryStatement(sql); + } + + /** + * Creates a topic with the specified name and properties. + * + *

Topic names with single quotes must be enclosed in backticks (`). Property keys and values + * are included in the SQL statement automatically. + * + * @param topicName If the created topic name contains single quotes, the passed parameter needs + * to be enclosed in backticks. + * @param properties A {@link Properties} object containing the topic's properties. + * @throws IoTDBConnectionException If a connection issue occurs with IoTDB. + * @throws StatementExecutionException If a statement execution issue occurs. + */ public void createTopic(final String topicName, final Properties properties) throws IoTDBConnectionException, StatementExecutionException { - if (properties.isEmpty()) { - createTopic(topicName); + IdentifierUtils.checkAndParseIdentifier(topicName); // ignore the parse result + createTopic(topicName, properties, false); + } + + /** + * Creates a topic with the specified properties if it does not already exist. Topic names with + * single quotes must be enclosed in backticks (`). + * + * @param topicName If the created topic name contains single quotes, the passed parameter needs + * to be enclosed in backticks. + * @param properties A {@link Properties} object containing the topic's properties. + * @throws IoTDBConnectionException If a connection issue occurs. + * @throws StatementExecutionException If the SQL statement execution fails. + */ + public void createTopicIfNotExists(final String topicName, final Properties properties) + throws IoTDBConnectionException, StatementExecutionException { + IdentifierUtils.checkAndParseIdentifier(topicName); // ignore the parse result + createTopic(topicName, properties, true); + } + + private void createTopic( + final String topicName, final Properties properties, final boolean isSetIfNotExistsCondition) + throws IoTDBConnectionException, StatementExecutionException { + if (Objects.isNull(properties) || properties.isEmpty()) { + if (isSetIfNotExistsCondition) { + createTopicIfNotExists(topicName); + } else { + createTopic(topicName); + } return; } final StringBuilder sb = new StringBuilder(); @@ -106,16 +184,49 @@ public void createTopic(final String topicName, final Properties properties) .append(',')); sb.deleteCharAt(sb.length() - 1); sb.append(')'); - final String sql = String.format("CREATE TOPIC %s WITH %s", topicName, sb); + final String sql = + isSetIfNotExistsCondition + ? String.format("CREATE TOPIC IF NOT EXISTS %s WITH %s", topicName, sb) + : String.format("CREATE TOPIC %s WITH %s", topicName, sb); executeNonQueryStatement(sql); } + /** + * Drops the specified topic. + * + *

This method removes the specified topic from the database. If the topic name contains single + * quotes, it must be enclosed in backticks (`). + * + * @param topicName The name of the topic to be deleted, if it contains single quotes, needs to be + * enclosed in backticks. + * @throws IoTDBConnectionException If there is an issue with the connection to IoTDB. + * @throws StatementExecutionException If there is an issue executing the SQL statement. + */ public void dropTopic(final String topicName) throws IoTDBConnectionException, StatementExecutionException { + IdentifierUtils.checkAndParseIdentifier(topicName); // ignore the parse result final String sql = String.format("DROP TOPIC %s", topicName); executeNonQueryStatement(sql); } + /** + * Drops the specified topic if it exists. + * + *

This method is similar to {@link #dropTopic(String)}, but includes the 'IF EXISTS' + * condition. If the topic name contains single quotes, it must be enclosed in backticks (`). + * + * @param topicName The name of the topic to be deleted, if it contains single quotes, needs to be + * enclosed in backticks. + * @throws IoTDBConnectionException If there is an issue with the connection to IoTDB. + * @throws StatementExecutionException If there is an issue executing the SQL statement. + */ + public void dropTopicIfExists(final String topicName) + throws IoTDBConnectionException, StatementExecutionException { + IdentifierUtils.checkAndParseIdentifier(topicName); // ignore the parse result + final String sql = String.format("DROP TOPIC IF EXISTS %s", topicName); + executeNonQueryStatement(sql); + } + public Set getTopics() throws IoTDBConnectionException, StatementExecutionException { final String sql = "SHOW TOPICS"; try (final SessionDataSet dataSet = executeQueryStatement(sql)) { @@ -125,6 +236,7 @@ public Set getTopics() throws IoTDBConnectionException, StatementExecutio public Optional getTopic(final String topicName) throws IoTDBConnectionException, StatementExecutionException { + IdentifierUtils.checkAndParseIdentifier(topicName); // ignore the parse result final String sql = String.format("SHOW TOPIC %s", topicName); try (final SessionDataSet dataSet = executeQueryStatement(sql)) { final Set topics = convertDataSetToTopics(dataSet); @@ -147,12 +259,27 @@ public Set getSubscriptions() public Set getSubscriptions(final String topicName) throws IoTDBConnectionException, StatementExecutionException { + IdentifierUtils.checkAndParseIdentifier(topicName); // ignore the parse result final String sql = String.format("SHOW SUBSCRIPTIONS ON %s", topicName); try (final SessionDataSet dataSet = executeQueryStatement(sql)) { return convertDataSetToSubscriptions(dataSet); } } + public void dropSubscription(final String subscriptionId) + throws IoTDBConnectionException, StatementExecutionException { + IdentifierUtils.checkAndParseIdentifier(subscriptionId); // ignore the parse result + final String sql = String.format("DROP SUBSCRIPTION %s", subscriptionId); + executeNonQueryStatement(sql); + } + + public void dropSubscriptionIfExists(final String subscriptionId) + throws IoTDBConnectionException, StatementExecutionException { + IdentifierUtils.checkAndParseIdentifier(subscriptionId); // ignore the parse result + final String sql = String.format("DROP SUBSCRIPTION IF EXISTS %s", subscriptionId); + executeNonQueryStatement(sql); + } + /////////////////////////////// utility /////////////////////////////// public Set convertDataSetToTopics(final SessionDataSet dataSet) @@ -178,7 +305,7 @@ public Set convertDataSetToSubscriptions(final SessionDataSet data while (dataSet.hasNext()) { final RowRecord record = dataSet.next(); final List fields = record.getFields(); - if (fields.size() != 3) { + if (fields.size() != 4) { throw new SubscriptionException( String.format( "Unexpected fields %s was obtained during SHOW SUBSCRIPTION...", @@ -188,7 +315,8 @@ public Set convertDataSetToSubscriptions(final SessionDataSet data new Subscription( fields.get(0).getStringValue(), fields.get(1).getStringValue(), - fields.get(2).getStringValue())); + fields.get(2).getStringValue(), + fields.get(3).getStringValue())); } return subscriptions; } diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/SubscriptionSessionConnection.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/SubscriptionSessionConnection.java index 135e1055f47c0..1245a39744280 100644 --- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/SubscriptionSessionConnection.java +++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/SubscriptionSessionConnection.java @@ -20,9 +20,7 @@ package org.apache.iotdb.session.subscription; import org.apache.iotdb.common.rpc.thrift.TEndPoint; -import org.apache.iotdb.isession.SessionDataSet; import org.apache.iotdb.rpc.IoTDBConnectionException; -import org.apache.iotdb.rpc.StatementExecutionException; import org.apache.iotdb.service.rpc.thrift.TPipeSubscribeReq; import org.apache.iotdb.service.rpc.thrift.TPipeSubscribeResp; import org.apache.iotdb.session.Session; @@ -31,20 +29,11 @@ import org.apache.thrift.TException; import java.time.ZoneId; -import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.function.Supplier; public class SubscriptionSessionConnection extends SessionConnection { - private static final String SHOW_DATA_NODES_COMMAND = "SHOW DATANODES"; - private static final String NODE_ID_COLUMN_NAME = "NodeID"; - private static final String STATUS_COLUMN_NAME = "Status"; - private static final String IP_COLUMN_NAME = "RpcAddress"; - private static final String PORT_COLUMN_NAME = "RpcPort"; - private static final String REMOVING_STATUS = "Removing"; - public SubscriptionSessionConnection( Session session, TEndPoint endPoint, @@ -56,27 +45,6 @@ public SubscriptionSessionConnection( super(session, endPoint, zoneId, availableNodes, maxRetryCount, retryIntervalInMs); } - // from org.apache.iotdb.session.NodesSupplier.updateDataNodeList - public Map fetchAllEndPoints() - throws IoTDBConnectionException, StatementExecutionException { - SessionDataSet dataSet = session.executeQueryStatement(SHOW_DATA_NODES_COMMAND); - SessionDataSet.DataIterator iterator = dataSet.iterator(); - Map endPoints = new HashMap<>(); - while (iterator.next()) { - // ignore removing DN - if (REMOVING_STATUS.equals(iterator.getString(STATUS_COLUMN_NAME))) { - continue; - } - String ip = iterator.getString(IP_COLUMN_NAME); - String port = iterator.getString(PORT_COLUMN_NAME); - if (ip != null && port != null) { - endPoints.put( - iterator.getInt(NODE_ID_COLUMN_NAME), new TEndPoint(ip, Integer.parseInt(port))); - } - } - return endPoints; - } - public TPipeSubscribeResp pipeSubscribe(final TPipeSubscribeReq req) throws TException { return client.pipeSubscribe(req); } diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionConsumer.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionConsumer.java index c1a89ded9837a..9e7242de2009b 100644 --- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionConsumer.java +++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionConsumer.java @@ -25,28 +25,30 @@ import org.apache.iotdb.rpc.subscription.config.TopicConfig; import org.apache.iotdb.rpc.subscription.exception.SubscriptionConnectionException; import org.apache.iotdb.rpc.subscription.exception.SubscriptionException; +import org.apache.iotdb.rpc.subscription.exception.SubscriptionPipeTimeoutException; +import org.apache.iotdb.rpc.subscription.exception.SubscriptionPollTimeoutException; import org.apache.iotdb.rpc.subscription.exception.SubscriptionRuntimeCriticalException; import org.apache.iotdb.rpc.subscription.exception.SubscriptionRuntimeNonCriticalException; +import org.apache.iotdb.rpc.subscription.exception.SubscriptionTimeoutException; import org.apache.iotdb.rpc.subscription.payload.poll.ErrorPayload; import org.apache.iotdb.rpc.subscription.payload.poll.FileInitPayload; import org.apache.iotdb.rpc.subscription.payload.poll.FilePiecePayload; import org.apache.iotdb.rpc.subscription.payload.poll.FileSealPayload; -import org.apache.iotdb.rpc.subscription.payload.poll.PollFilePayload; -import org.apache.iotdb.rpc.subscription.payload.poll.PollPayload; import org.apache.iotdb.rpc.subscription.payload.poll.SubscriptionCommitContext; import org.apache.iotdb.rpc.subscription.payload.poll.SubscriptionPollPayload; -import org.apache.iotdb.rpc.subscription.payload.poll.SubscriptionPollRequest; -import org.apache.iotdb.rpc.subscription.payload.poll.SubscriptionPollRequestType; import org.apache.iotdb.rpc.subscription.payload.poll.SubscriptionPollResponse; import org.apache.iotdb.rpc.subscription.payload.poll.SubscriptionPollResponseType; import org.apache.iotdb.rpc.subscription.payload.poll.TabletsPayload; import org.apache.iotdb.session.subscription.payload.SubscriptionMessage; import org.apache.iotdb.session.subscription.payload.SubscriptionMessageType; +import org.apache.iotdb.session.subscription.util.CollectionUtils; import org.apache.iotdb.session.subscription.util.IdentifierUtils; +import org.apache.iotdb.session.subscription.util.PollTimer; import org.apache.iotdb.session.subscription.util.RandomStringGenerator; -import org.apache.iotdb.session.subscription.util.SubscriptionPollTimer; import org.apache.iotdb.session.util.SessionUtils; +import org.apache.thrift.annotation.Nullable; +import org.apache.tsfile.write.record.Tablet; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -61,28 +63,40 @@ import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Objects; import java.util.Optional; import java.util.Properties; import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.CancellationException; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.locks.LockSupport; +import java.util.function.BiFunction; import java.util.stream.Collectors; -import static org.apache.iotdb.rpc.subscription.config.TopicConstant.MODE_SNAPSHOT_VALUE; +import static org.apache.iotdb.rpc.subscription.payload.poll.SubscriptionPollResponseType.ERROR; +import static org.apache.iotdb.rpc.subscription.payload.poll.SubscriptionPollResponseType.FILE_INIT; +import static org.apache.iotdb.rpc.subscription.payload.poll.SubscriptionPollResponseType.TABLETS; +import static org.apache.iotdb.rpc.subscription.payload.poll.SubscriptionPollResponseType.TERMINATION; +import static org.apache.iotdb.session.subscription.util.SetPartitioner.partition; abstract class SubscriptionConsumer implements AutoCloseable { private static final Logger LOGGER = LoggerFactory.getLogger(SubscriptionConsumer.class); - private static final long SLEEP_NS = 100_000_000L; // 100ms + private static final long SLEEP_MS = 100L; + private static final long SLEEP_DELTA_MS = 50L; + private static final long TIMER_DELTA_MS = 250L; private final String username; private final String password; @@ -101,14 +115,30 @@ abstract class SubscriptionConsumer implements AutoCloseable { private final String fileSaveDir; private final boolean fileSaveFsync; + private final Set inFlightFilesCommitContextSet = new HashSet<>(); + + private final int thriftMaxFrameSize; + private final int maxPollParallelism; @SuppressWarnings("java:S3077") protected volatile Map subscribedTopics = new HashMap<>(); + @Deprecated public boolean allSnapshotTopicMessagesHaveBeenConsumed() { - return subscribedTopics.values().stream() - .noneMatch( - (config) -> config.getAttributesWithSourceMode().containsValue(MODE_SNAPSHOT_VALUE)); + return allTopicMessagesHaveBeenConsumed(subscribedTopics.keySet()); + } + + public boolean allTopicMessagesHaveBeenConsumed() { + return allTopicMessagesHaveBeenConsumed(subscribedTopics.keySet()); + } + + private boolean allTopicMessagesHaveBeenConsumed(final Collection topicNames) { + // For the topic that needs to be detected, there are two scenarios to consider: + // 1. If configs as live, it cannot be determined whether the topic has been fully consumed. + // 2. If configs as snapshot, it means the topic has not been automatically unsubscribed. + // Therefore, the logic can be summarized as follows: if there is a matching topic in subscribed + // topics, then it has not been fully consumed. + return topicNames.stream().map(subscribedTopics::get).noneMatch(Objects::nonNull); } /////////////////////////////// getter /////////////////////////////// @@ -155,17 +185,16 @@ protected SubscriptionConsumer(final Builder builder) { this.fileSaveDir = builder.fileSaveDir; this.fileSaveFsync = builder.fileSaveFsync; + + this.thriftMaxFrameSize = builder.thriftMaxFrameSize; + this.maxPollParallelism = builder.maxPollParallelism; } protected SubscriptionConsumer(final Builder builder, final Properties properties) { this( builder - .host( - (String) - properties.getOrDefault(ConsumerConstant.HOST_KEY, SessionConfig.DEFAULT_HOST)) - .port( - (Integer) - properties.getOrDefault(ConsumerConstant.PORT_KEY, SessionConfig.DEFAULT_PORT)) + .host((String) properties.get(ConsumerConstant.HOST_KEY)) + .port((Integer) properties.get(ConsumerConstant.PORT_KEY)) .nodeUrls((List) properties.get(ConsumerConstant.NODE_URLS_KEY)) .username( (String) @@ -196,7 +225,17 @@ protected SubscriptionConsumer(final Builder builder, final Properties propertie (Boolean) properties.getOrDefault( ConsumerConstant.FILE_SAVE_FSYNC_KEY, - ConsumerConstant.FILE_SAVE_FSYNC_DEFAULT_VALUE))); + ConsumerConstant.FILE_SAVE_FSYNC_DEFAULT_VALUE)) + .thriftMaxFrameSize( + (Integer) + properties.getOrDefault( + ConsumerConstant.THRIFT_MAX_FRAME_SIZE_KEY, + SessionConfig.DEFAULT_MAX_FRAME_SIZE)) + .maxPollParallelism( + (Integer) + properties.getOrDefault( + ConsumerConstant.MAX_POLL_PARALLELISM_KEY, + ConsumerConstant.MAX_POLL_PARALLELISM_DEFAULT_VALUE))); } /////////////////////////////// open & close /////////////////////////////// @@ -286,7 +325,9 @@ private void subscribe(Set topicNames, final boolean needParse) if (needParse) { topicNames = - topicNames.stream().map(IdentifierUtils::parseIdentifier).collect(Collectors.toSet()); + topicNames.stream() + .map(IdentifierUtils::checkAndParseIdentifier) + .collect(Collectors.toSet()); } providers.acquireReadLock(); @@ -316,7 +357,9 @@ private void unsubscribe(Set topicNames, final boolean needParse) if (needParse) { topicNames = - topicNames.stream().map(IdentifierUtils::parseIdentifier).collect(Collectors.toSet()); + topicNames.stream() + .map(IdentifierUtils::checkAndParseIdentifier) + .collect(Collectors.toSet()); } providers.acquireReadLock(); @@ -333,7 +376,12 @@ SubscriptionProvider constructProviderAndHandshake(final TEndPoint endPoint) throws SubscriptionException { final SubscriptionProvider provider = new SubscriptionProvider( - endPoint, this.username, this.password, this.consumerId, this.consumerGroupId); + endPoint, + this.username, + this.password, + this.consumerId, + this.consumerGroupId, + this.thriftMaxFrameSize); try { provider.handshake(); } catch (final Exception e) { @@ -342,7 +390,9 @@ SubscriptionProvider constructProviderAndHandshake(final TEndPoint endPoint) } catch (final Exception ignored) { } throw new SubscriptionConnectionException( - String.format("Failed to handshake with subscription provider %s", provider)); + String.format( + "Failed to handshake with subscription provider %s because of %s", provider, e), + e); } // update consumer id and consumer group id if not exist @@ -366,33 +416,47 @@ private Path getFileDir(final String topicName) throws IOException { } private Path getFilePath( + final SubscriptionCommitContext commitContext, final String topicName, final String fileName, final boolean allowFileAlreadyExistsException, final boolean allowInvalidPathException) throws SubscriptionException { try { - final Path filePath = getFileDir(topicName).resolve(fileName); - Files.createFile(filePath); - return filePath; - } catch (final FileAlreadyExistsException fileAlreadyExistsException) { - if (allowFileAlreadyExistsException) { - final String suffix = RandomStringGenerator.generate(16); - LOGGER.warn( - "Detect already existed file {} when polling topic {}, add random suffix {} to filename", - fileName, - topicName, - suffix); - return getFilePath(topicName, fileName + "." + suffix, false, true); + final Path filePath; + try { + filePath = getFileDir(topicName).resolve(fileName); + } catch (final InvalidPathException invalidPathException) { + if (allowInvalidPathException) { + return getFilePath(commitContext, URLEncoder.encode(topicName), fileName, true, false); + } + throw new SubscriptionRuntimeNonCriticalException( + invalidPathException.getMessage(), invalidPathException); } - throw new SubscriptionRuntimeNonCriticalException( - fileAlreadyExistsException.getMessage(), fileAlreadyExistsException); - } catch (final InvalidPathException invalidPathException) { - if (allowInvalidPathException) { - return getFilePath(URLEncoder.encode(topicName), fileName, true, false); + + try { + Files.createFile(filePath); + return filePath; + } catch (final FileAlreadyExistsException fileAlreadyExistsException) { + if (allowFileAlreadyExistsException) { + if (inFlightFilesCommitContextSet.contains(commitContext)) { + LOGGER.info( + "Detect already existed file {} when polling topic {}, resume consumption", + fileName, + topicName); + return filePath; + } + final String suffix = RandomStringGenerator.generate(16); + LOGGER.warn( + "Detect already existed file {} when polling topic {}, add random suffix {} to filename", + fileName, + topicName, + suffix); + return getFilePath(commitContext, topicName, fileName + "." + suffix, false, true); + } + throw new SubscriptionRuntimeNonCriticalException( + fileAlreadyExistsException.getMessage(), fileAlreadyExistsException); } - throw new SubscriptionRuntimeNonCriticalException( - invalidPathException.getMessage(), invalidPathException); } catch (final IOException e) { throw new SubscriptionRuntimeNonCriticalException(e.getMessage(), e); } @@ -400,121 +464,263 @@ private Path getFilePath( /////////////////////////////// poll /////////////////////////////// - protected List poll( - /* @NotNull */ final Set topicNames, final long timeoutMs) - throws SubscriptionException { - // check topic names - if (subscribedTopics.isEmpty()) { - LOGGER.info("SubscriptionConsumer {} has not subscribed to any topics yet", this); + private final Map< + SubscriptionPollResponseType, + BiFunction>> + responseTransformer = + Collections.unmodifiableMap( + new HashMap< + SubscriptionPollResponseType, + BiFunction< + SubscriptionPollResponse, PollTimer, Optional>>() { + { + put(TABLETS, (resp, timer) -> pollTablets(resp, timer)); + put(FILE_INIT, (resp, timer) -> pollFile(resp, timer)); + put( + ERROR, + (resp, timer) -> { + final ErrorPayload payload = (ErrorPayload) resp.getPayload(); + final String errorMessage = payload.getErrorMessage(); + if (payload.isCritical()) { + throw new SubscriptionRuntimeCriticalException(errorMessage); + } else { + throw new SubscriptionRuntimeNonCriticalException(errorMessage); + } + }); + put( + TERMINATION, + (resp, timer) -> { + final SubscriptionCommitContext commitContext = resp.getCommitContext(); + final String topicNameToUnsubscribe = commitContext.getTopicName(); + LOGGER.info( + "Termination occurred when SubscriptionConsumer {} polling topics, unsubscribe topic {} automatically", + coreReportMessage(), + topicNameToUnsubscribe); + unsubscribe(Collections.singleton(topicNameToUnsubscribe), false); + return Optional.empty(); + }); + } + }); + + protected List multiplePoll( + /* @NotNull */ final Set topicNames, final long timeoutMs) { + if (topicNames.isEmpty()) { return Collections.emptyList(); } - topicNames.stream() - .filter(topicName -> !subscribedTopics.containsKey(topicName)) - .forEach( - topicName -> - LOGGER.warn( - "SubscriptionConsumer {} does not subscribe to topic {}", this, topicName)); + // execute single task in current thread + final int availableCount = + SubscriptionExecutorServiceManager.getAvailableThreadCountForPollTasks(); + if (availableCount == 0) { + // non-strict timeout + return singlePoll(topicNames, timeoutMs); + } - final List messages = new ArrayList<>(); - final SubscriptionPollTimer timer = - new SubscriptionPollTimer(System.currentTimeMillis(), timeoutMs); + // dividing topics + final List tasks = new ArrayList<>(); + final List> partitionedTopicNames = + partition(topicNames, Math.min(maxPollParallelism, availableCount)); + for (final Set partition : partitionedTopicNames) { + tasks.add(new PollTask(partition, timeoutMs)); + } - do { - try { - // poll tablets or file - for (final SubscriptionPollResponse pollResponse : pollInternal(topicNames)) { - final short responseType = pollResponse.getResponseType(); - if (!SubscriptionPollResponseType.isValidatedResponseType(responseType)) { - LOGGER.warn("unexpected response type: {}", responseType); + // submit multiple tasks to poll messages + final List messages = new ArrayList<>(); + SubscriptionRuntimeCriticalException lastSubscriptionRuntimeCriticalException = null; + try { + // strict timeout + for (final Future> future : + SubscriptionExecutorServiceManager.submitMultiplePollTasks(tasks, timeoutMs)) { + try { + if (future.isCancelled()) { continue; } - switch (SubscriptionPollResponseType.valueOf(responseType)) { - case TABLETS: - messages.add( - new SubscriptionMessage( - pollResponse.getCommitContext(), - ((TabletsPayload) pollResponse.getPayload()).getTablets())); - break; - case FILE_INIT: - pollFile( - pollResponse.getCommitContext(), - ((FileInitPayload) pollResponse.getPayload()).getFileName()) - .ifPresent(messages::add); - break; - case ERROR: - final ErrorPayload payload = (ErrorPayload) pollResponse.getPayload(); - final String errorMessage = payload.getErrorMessage(); - if (payload.isCritical()) { - throw new SubscriptionRuntimeCriticalException(errorMessage); - } else { - throw new SubscriptionRuntimeNonCriticalException(errorMessage); - } - case TERMINATION: - final SubscriptionCommitContext commitContext = pollResponse.getCommitContext(); - final String topicNameToUnsubscribe = commitContext.getTopicName(); - LOGGER.info( - "Termination occurred when SubscriptionConsumer {} polling topics {}, unsubscribe topic {} automatically", + messages.addAll(future.get()); + } catch (final CancellationException ignored) { + + } catch (final ExecutionException e) { + final Throwable cause = e.getCause(); + if (cause instanceof SubscriptionRuntimeCriticalException) { + final SubscriptionRuntimeCriticalException ex = + (SubscriptionRuntimeCriticalException) cause; + LOGGER.warn( + "SubscriptionRuntimeCriticalException occurred when SubscriptionConsumer {} polling topics {}", + this, + topicNames, + ex); + lastSubscriptionRuntimeCriticalException = ex; + } else { + LOGGER.warn( + "ExecutionException occurred when SubscriptionConsumer {} polling topics {}", + this, + topicNames, + e); + } + } + } + } catch (final InterruptedException e) { + LOGGER.warn( + "InterruptedException occurred when SubscriptionConsumer {} polling topics {}", + this, + topicNames, + e); + Thread.currentThread().interrupt(); // restore interrupted state + } + + // TODO: ignore possible interrupted state? + + // even if a SubscriptionRuntimeCriticalException is encountered, try to deliver the message to + // the client + if (messages.isEmpty() && Objects.nonNull(lastSubscriptionRuntimeCriticalException)) { + throw lastSubscriptionRuntimeCriticalException; + } + + return messages; + } + + private class PollTask implements Callable> { + + private final Set topicNames; + private final long timeoutMs; + + public PollTask(final Set topicNames, final long timeoutMs) { + this.topicNames = topicNames; + this.timeoutMs = timeoutMs; + } + + @Override + public List call() { + return singlePoll(topicNames, timeoutMs); + } + } + + private List singlePoll( + /* @NotNull */ final Set topicNames, final long timeoutMs) + throws SubscriptionException { + if (topicNames.isEmpty()) { + return Collections.emptyList(); + } + + final List messages = new ArrayList<>(); + List currentResponses = new ArrayList<>(); + final PollTimer timer = new PollTimer(System.currentTimeMillis(), timeoutMs); + + try { + do { + final List currentMessages = new ArrayList<>(); + try { + currentResponses.clear(); + currentResponses = pollInternal(topicNames, timer.remainingMs()); + for (final SubscriptionPollResponse response : currentResponses) { + final short responseType = response.getResponseType(); + if (!SubscriptionPollResponseType.isValidatedResponseType(responseType)) { + LOGGER.warn("unexpected response type: {}", responseType); + continue; + } + try { + responseTransformer + .getOrDefault( + SubscriptionPollResponseType.valueOf(responseType), + (resp, ignored) -> { + LOGGER.warn("unexpected response type: {}", responseType); + return Optional.empty(); + }) + // TODO: reuse previous timer? + .apply(response, new PollTimer(System.currentTimeMillis(), timeoutMs)) + .ifPresent(currentMessages::add); + } catch (final SubscriptionRuntimeNonCriticalException e) { + LOGGER.warn( + "SubscriptionRuntimeNonCriticalException occurred when SubscriptionConsumer {} polling topics {}", this, topicNames, - topicNameToUnsubscribe); - unsubscribe(Collections.singleton(topicNameToUnsubscribe), false); - break; - default: - LOGGER.warn("unexpected response type: {}", responseType); - break; + e); + // assume the corresponding response has been nacked + } + } + } catch (final SubscriptionRuntimeCriticalException e) { + LOGGER.warn( + "SubscriptionRuntimeCriticalException occurred when SubscriptionConsumer {} polling topics {}", + this, + topicNames, + e); + // nack and clear current responses + try { + nack(currentResponses); + currentResponses.clear(); + } catch (final Exception ignored) { + } + // nack and clear result messages + try { + nack(messages); + messages.clear(); + } catch (final Exception ignored) { } + + // the upper layer perceives ExecutionException + throw e; } - } catch (final SubscriptionRuntimeNonCriticalException e) { - LOGGER.warn( - "SubscriptionRuntimeNonCriticalException occurred when SubscriptionConsumer {} polling topics {}", - this, - topicNames, - e); - // nack and clear messages - try { - nack(messages); - messages.clear(); - } catch (final Exception ignored) { + + // add all current messages to result messages + messages.addAll(currentMessages); + + // TODO: maybe we can poll a few more times + if (!messages.isEmpty()) { + break; } - } catch (final SubscriptionRuntimeCriticalException e) { - LOGGER.warn( - "SubscriptionRuntimeCriticalException occurred when SubscriptionConsumer {} polling topics {}", - this, - topicNames, - e); - // nack and clear messages - try { - nack(messages); - messages.clear(); - } catch (final Exception ignored) { + + // check if all topic messages have been consumed + if (allTopicMessagesHaveBeenConsumed(topicNames)) { + break; } - // rethrow - throw e; + + // update timer + timer.update(); + + // TODO: associated with timeoutMs instead of hardcoding + // random sleep time within the range [SLEEP_DELTA_MS, SLEEP_DELTA_MS + SLEEP_MS) + Thread.sleep(((long) (Math.random() * SLEEP_MS)) + SLEEP_DELTA_MS); + + // the use of TIMER_DELTA_MS here slightly reduces the timeout to avoid being interrupted as + // much as possible + } while (timer.notExpired(TIMER_DELTA_MS)); + } catch (final InterruptedException e) { + Thread.currentThread().interrupt(); // restore interrupted state + } + + if (Thread.currentThread().isInterrupted()) { + // nack and clear current responses + try { + nack(currentResponses); + currentResponses.clear(); + } catch (final Exception ignored) { } - if (!messages.isEmpty()) { - return messages; + // nack and clear result messages + try { + nack(messages); + messages.clear(); + } catch (final Exception ignored) { } - // update timer - timer.update(); - // TODO: associated with timeoutMs instead of hardcoding - LockSupport.parkNanos(SLEEP_NS); // wait some time - } while (timer.notExpired()); - LOGGER.info( - "SubscriptionConsumer {} poll empty message after {} millisecond(s)", this, timeoutMs); + // the upper layer perceives CancellationException + return Collections.emptyList(); + } + return messages; } private Optional pollFile( - final SubscriptionCommitContext commitContext, final String fileName) - throws SubscriptionException { + final SubscriptionPollResponse response, final PollTimer timer) throws SubscriptionException { + final SubscriptionCommitContext commitContext = response.getCommitContext(); + final String fileName = ((FileInitPayload) response.getPayload()).getFileName(); final String topicName = commitContext.getTopicName(); - final Path filePath = getFilePath(topicName, fileName, true, true); + final Path filePath = getFilePath(commitContext, topicName, fileName, true, true); final File file = filePath.toFile(); try (final RandomAccessFile fileWriter = new RandomAccessFile(file, "rw")) { - return Optional.of(pollFileInternal(commitContext, file, fileWriter)); + return pollFileInternal(commitContext, fileName, file, fileWriter, timer); } catch (final Exception e) { + if (!(e instanceof SubscriptionPollTimeoutException)) { + inFlightFilesCommitContextSet.remove(commitContext); + } // construct temporary message to nack nack( Collections.singletonList( @@ -523,36 +729,46 @@ private Optional pollFile( } } - private SubscriptionMessage pollFileInternal( + private Optional pollFileInternal( final SubscriptionCommitContext commitContext, + final String rawFileName, final File file, - final RandomAccessFile fileWriter) + final RandomAccessFile fileWriter, + final PollTimer timer) throws IOException, SubscriptionException { - final int dataNodeId = commitContext.getDataNodeId(); - final String topicName = commitContext.getTopicName(); - final String fileName = file.getName(); + long writingOffset = fileWriter.length(); LOGGER.info( - "{} start to poll file {} with commit context {}", + "{} start to poll file {} with commit context {} at offset {}", this, file.getAbsolutePath(), - commitContext); + commitContext, + writingOffset); - long writingOffset = fileWriter.length(); + fileWriter.seek(writingOffset); while (true) { + timer.update(); + if (timer.isExpired(TIMER_DELTA_MS)) { + // resume from breakpoint if timeout happened when polling files + inFlightFilesCommitContextSet.add(commitContext); + final String message = + String.format( + "Timeout occurred when SubscriptionConsumer %s polling file %s with commit context %s, record writing offset %s for subsequent poll", + this, file.getAbsolutePath(), commitContext, writingOffset); + LOGGER.info(message); + throw new SubscriptionRuntimeNonCriticalException(message); + } + final List responses = - pollFileInternal(dataNodeId, topicName, fileName, writingOffset); + pollFileInternal(commitContext, writingOffset, timer.remainingMs()); - // It's agreed that the server will always return at least one response, even in case of - // failure. + // If responses is empty, it means that some outdated subscription events may be being polled, + // so just return. if (responses.isEmpty()) { - final String errorMessage = - String.format("SubscriptionConsumer %s poll empty response", this); - LOGGER.warn(errorMessage); - throw new SubscriptionRuntimeNonCriticalException(errorMessage); + return Optional.empty(); } - // Only one SubscriptionEvent polled currently... + // only one SubscriptionEvent polled currently final SubscriptionPollResponse response = responses.get(0); final SubscriptionPollPayload payload = response.getPayload(); final short responseType = response.getResponseType(); @@ -578,11 +794,11 @@ private SubscriptionMessage pollFileInternal( } // check file name - if (!fileName.startsWith(((FilePiecePayload) payload).getFileName())) { + if (!Objects.equals(rawFileName, ((FilePiecePayload) payload).getFileName())) { final String errorMessage = String.format( "inconsistent file name, current is %s, incoming is %s, consumer: %s", - fileName, ((FilePiecePayload) payload).getFileName(), this); + rawFileName, ((FilePiecePayload) payload).getFileName(), this); LOGGER.warn(errorMessage); throw new SubscriptionRuntimeNonCriticalException(errorMessage); } @@ -625,11 +841,11 @@ private SubscriptionMessage pollFileInternal( } // check file name - if (!fileName.startsWith(((FileSealPayload) payload).getFileName())) { + if (!Objects.equals(rawFileName, ((FileSealPayload) payload).getFileName())) { final String errorMessage = String.format( "inconsistent file name, current is %s, incoming is %s, consumer: %s", - fileName, ((FileSealPayload) payload).getFileName(), this); + rawFileName, ((FileSealPayload) payload).getFileName(), this); LOGGER.warn(errorMessage); throw new SubscriptionRuntimeNonCriticalException(errorMessage); } @@ -657,7 +873,8 @@ private SubscriptionMessage pollFileInternal( commitContext); // generate subscription message - return new SubscriptionMessage(commitContext, file.getAbsolutePath()); + inFlightFilesCommitContextSet.remove(commitContext); + return Optional.of(new SubscriptionMessage(commitContext, file.getAbsolutePath())); } case ERROR: { @@ -665,10 +882,136 @@ private SubscriptionMessage pollFileInternal( final String errorMessage = ((ErrorPayload) payload).getErrorMessage(); final boolean critical = ((ErrorPayload) payload).isCritical(); + if (!critical + && Objects.nonNull(errorMessage) + && errorMessage.contains(SubscriptionTimeoutException.KEYWORD)) { + // resume from breakpoint if timeout happened when polling files + inFlightFilesCommitContextSet.add(commitContext); + final String message = + String.format( + "Timeout occurred when SubscriptionConsumer %s polling file %s with commit context %s, record writing offset %s for subsequent poll", + this, file.getAbsolutePath(), commitContext, writingOffset); + LOGGER.info(message); + throw new SubscriptionPollTimeoutException(message); + } else { + LOGGER.warn( + "Error occurred when SubscriptionConsumer {} polling file {} with commit context {}: {}, critical: {}", + this, + file.getAbsolutePath(), + commitContext, + errorMessage, + critical); + if (critical) { + throw new SubscriptionRuntimeCriticalException(errorMessage); + } else { + throw new SubscriptionRuntimeNonCriticalException(errorMessage); + } + } + } + default: + final String errorMessage = String.format("unexpected response type: %s", responseType); + LOGGER.warn(errorMessage); + throw new SubscriptionRuntimeNonCriticalException(errorMessage); + } + } + } + + private Optional pollTablets( + final SubscriptionPollResponse response, final PollTimer timer) throws SubscriptionException { + try { + return pollTabletsInternal(response, timer); + } catch (final Exception e) { + // construct temporary message to nack + nack( + Collections.singletonList( + new SubscriptionMessage(response.getCommitContext(), Collections.emptyList()))); + throw new SubscriptionRuntimeNonCriticalException(e.getMessage(), e); + } + } + + private Optional pollTabletsInternal( + final SubscriptionPollResponse initialResponse, final PollTimer timer) { + final List tablets = ((TabletsPayload) initialResponse.getPayload()).getTablets(); + final SubscriptionCommitContext commitContext = initialResponse.getCommitContext(); + + int nextOffset = ((TabletsPayload) initialResponse.getPayload()).getNextOffset(); + while (true) { + if (nextOffset <= 0) { + if (!Objects.equals(tablets.size(), -nextOffset)) { + final String errorMessage = + String.format( + "inconsistent tablet size, current is %s, incoming is %s, consumer: %s", + tablets.size(), -nextOffset, this); + LOGGER.warn(errorMessage); + throw new SubscriptionRuntimeNonCriticalException(errorMessage); + } + return Optional.of(new SubscriptionMessage(commitContext, tablets)); + } + + timer.update(); + if (timer.isExpired(TIMER_DELTA_MS)) { + final String errorMessage = + String.format( + "timeout while poll tablets with commit context: %s, consumer: %s", + commitContext, this); + LOGGER.warn(errorMessage); + throw new SubscriptionRuntimeNonCriticalException(errorMessage); + } + + final List responses = + pollTabletsInternal(commitContext, nextOffset, timer.remainingMs()); + + // If responses is empty, it means that some outdated subscription events may be being polled, + // so just return. + if (responses.isEmpty()) { + return Optional.empty(); + } + + // only one SubscriptionEvent polled currently + final SubscriptionPollResponse response = responses.get(0); + final SubscriptionPollPayload payload = response.getPayload(); + final short responseType = response.getResponseType(); + if (!SubscriptionPollResponseType.isValidatedResponseType(responseType)) { + final String errorMessage = String.format("unexpected response type: %s", responseType); + LOGGER.warn(errorMessage); + throw new SubscriptionRuntimeNonCriticalException(errorMessage); + } + + switch (SubscriptionPollResponseType.valueOf(responseType)) { + case TABLETS: + { + // check commit context + final SubscriptionCommitContext incomingCommitContext = response.getCommitContext(); + if (Objects.isNull(incomingCommitContext) + || !Objects.equals(commitContext, incomingCommitContext)) { + final String errorMessage = + String.format( + "inconsistent commit context, current is %s, incoming is %s, consumer: %s", + commitContext, incomingCommitContext, this); + LOGGER.warn(errorMessage); + throw new SubscriptionRuntimeNonCriticalException(errorMessage); + } + + // update tablets + tablets.addAll(((TabletsPayload) response.getPayload()).getTablets()); + + // update offset + nextOffset = ((TabletsPayload) payload).getNextOffset(); + break; + } + case ERROR: + { + // no need to check commit context + + final String errorMessage = ((ErrorPayload) payload).getErrorMessage(); + final boolean critical = ((ErrorPayload) payload).isCritical(); + if (Objects.equals(payload, ErrorPayload.OUTDATED_ERROR_PAYLOAD)) { + // suppress warn log when poll outdated subscription event + return Optional.empty(); + } LOGGER.warn( - "Error occurred when SubscriptionConsumer {} polling file {} with commit context {}: {}, critical: {}", + "Error occurred when SubscriptionConsumer {} polling tablets with commit context {}: {}, critical: {}", this, - file.getAbsolutePath(), commitContext, errorMessage, critical); @@ -686,8 +1029,8 @@ private SubscriptionMessage pollFileInternal( } } - private List pollInternal(final Set topicNames) - throws SubscriptionException { + private List pollInternal( + final Set topicNames, final long timeoutMs) throws SubscriptionException { providers.acquireReadLock(); try { final SubscriptionProvider provider = providers.getNextAvailableProvider(); @@ -702,9 +1045,7 @@ private List pollInternal(final Set topicNames } // ignore SubscriptionConnectionException to improve poll auto retry try { - return provider.poll( - new SubscriptionPollRequest( - SubscriptionPollRequestType.POLL.getType(), new PollPayload(topicNames), 0L)); + return provider.poll(topicNames, timeoutMs); } catch (final SubscriptionConnectionException ignored) { return Collections.emptyList(); } @@ -714,8 +1055,9 @@ private List pollInternal(final Set topicNames } private List pollFileInternal( - final int dataNodeId, final String topicName, final String fileName, final long writingOffset) + final SubscriptionCommitContext commitContext, final long writingOffset, final long timeoutMs) throws SubscriptionException { + final int dataNodeId = commitContext.getDataNodeId(); providers.acquireReadLock(); try { final SubscriptionProvider provider = providers.getProvider(dataNodeId); @@ -730,11 +1072,34 @@ private List pollFileInternal( } // ignore SubscriptionConnectionException to improve poll auto retry try { - return provider.poll( - new SubscriptionPollRequest( - SubscriptionPollRequestType.POLL_FILE.getType(), - new PollFilePayload(topicName, fileName, writingOffset), - 0L)); + return provider.pollFile(commitContext, writingOffset, timeoutMs); + } catch (final SubscriptionConnectionException ignored) { + return Collections.emptyList(); + } + } finally { + providers.releaseReadLock(); + } + } + + private List pollTabletsInternal( + final SubscriptionCommitContext commitContext, final int offset, final long timeoutMs) + throws SubscriptionException { + final int dataNodeId = commitContext.getDataNodeId(); + providers.acquireReadLock(); + try { + final SubscriptionProvider provider = providers.getProvider(dataNodeId); + if (Objects.isNull(provider) || !provider.isAvailable()) { + if (isClosed()) { + return Collections.emptyList(); + } + throw new SubscriptionConnectionException( + String.format( + "something unexpected happened when %s poll tablets from subscription provider with data node id %s, the subscription provider may be unavailable or not existed", + this, dataNodeId)); + } + // ignore SubscriptionConnectionException to improve poll auto retry + try { + return provider.pollTablets(commitContext, offset, timeoutMs); } catch (final SubscriptionConnectionException ignored) { return Collections.emptyList(); } @@ -753,7 +1118,7 @@ protected void ack(final Iterable messages) throws Subscrip .computeIfAbsent(message.getCommitContext().getDataNodeId(), (id) -> new ArrayList<>()) .add(message.getCommitContext()); } - for (final Map.Entry> entry : + for (final Entry> entry : dataNodeIdToSubscriptionCommitContexts.entrySet()) { commitInternal(entry.getKey(), entry.getValue(), false); } @@ -765,7 +1130,10 @@ protected void nack(final Iterable messages) throws Subscri for (final SubscriptionMessage message : messages) { // make every effort to delete stale intermediate file if (Objects.equals( - SubscriptionMessageType.TS_FILE_HANDLER.getType(), message.getMessageType())) { + SubscriptionMessageType.TS_FILE_HANDLER.getType(), message.getMessageType()) + && + // do not delete file that can resume from breakpoint + !inFlightFilesCommitContextSet.contains(message.getCommitContext())) { try { message.getTsFileHandler().deleteFile(); } catch (final Exception ignored) { @@ -775,7 +1143,22 @@ protected void nack(final Iterable messages) throws Subscri .computeIfAbsent(message.getCommitContext().getDataNodeId(), (id) -> new ArrayList<>()) .add(message.getCommitContext()); } - for (final Map.Entry> entry : + for (final Entry> entry : + dataNodeIdToSubscriptionCommitContexts.entrySet()) { + commitInternal(entry.getKey(), entry.getValue(), true); + } + } + + private void nack(final List responses) throws SubscriptionException { + final Map> dataNodeIdToSubscriptionCommitContexts = + new HashMap<>(); + for (final SubscriptionPollResponse response : responses) { + // there is no stale intermediate file here + dataNodeIdToSubscriptionCommitContexts + .computeIfAbsent(response.getCommitContext().getDataNodeId(), (id) -> new ArrayList<>()) + .add(response.getCommitContext()); + } + for (final Entry> entry : dataNodeIdToSubscriptionCommitContexts.entrySet()) { commitInternal(entry.getKey(), entry.getValue(), true); } @@ -911,6 +1294,11 @@ private void subscribeWithRedirection(final Set topicNames) throws Subsc subscribedTopics = provider.subscribe(topicNames); return; } catch (final Exception e) { + if (e instanceof SubscriptionPipeTimeoutException) { + // degrade exception to log for pipe timeout + LOGGER.warn(e.getMessage()); + return; + } LOGGER.warn( "{} failed to subscribe topics {} from subscription provider {}, try next subscription provider...", this, @@ -941,6 +1329,11 @@ private void unsubscribeWithRedirection(final Set topicNames) subscribedTopics = provider.unsubscribe(topicNames); return; } catch (final Exception e) { + if (e instanceof SubscriptionPipeTimeoutException) { + // degrade exception to log for pipe timeout + LOGGER.warn(e.getMessage()); + return; + } LOGGER.warn( "{} failed to unsubscribe topics {} from subscription provider {}, try next subscription provider...", this, @@ -966,7 +1359,7 @@ Map fetchAllEndPointsWithRedirection() throws SubscriptionEx } for (final SubscriptionProvider provider : providers) { try { - return provider.getSessionConnection().fetchAllEndPoints(); + return provider.heartbeat().getEndPoints(); } catch (final Exception e) { LOGGER.warn( "{} failed to fetch all endpoints from subscription provider {}, try next subscription provider...", @@ -1004,12 +1397,15 @@ public abstract static class Builder { protected String fileSaveDir = ConsumerConstant.FILE_SAVE_DIR_DEFAULT_VALUE; protected boolean fileSaveFsync = ConsumerConstant.FILE_SAVE_FSYNC_DEFAULT_VALUE; + protected int thriftMaxFrameSize = SessionConfig.DEFAULT_MAX_FRAME_SIZE; + protected int maxPollParallelism = ConsumerConstant.MAX_POLL_PARALLELISM_DEFAULT_VALUE; + public Builder host(final String host) { this.host = host; return this; } - public Builder port(final int port) { + public Builder port(final Integer port) { this.port = port; return this; } @@ -1029,13 +1425,19 @@ public Builder password(final String password) { return this; } - public Builder consumerId(final String consumerId) { - this.consumerId = IdentifierUtils.parseIdentifier(consumerId); + public Builder consumerId(@Nullable final String consumerId) { + if (Objects.isNull(consumerId)) { + return this; + } + this.consumerId = IdentifierUtils.checkAndParseIdentifier(consumerId); return this; } - public Builder consumerGroupId(final String consumerGroupId) { - this.consumerGroupId = IdentifierUtils.parseIdentifier(consumerGroupId); + public Builder consumerGroupId(@Nullable final String consumerGroupId) { + if (Objects.isNull(consumerGroupId)) { + return this; + } + this.consumerGroupId = IdentifierUtils.checkAndParseIdentifier(consumerGroupId); return this; } @@ -1061,6 +1463,19 @@ public Builder fileSaveFsync(final boolean fileSaveFsync) { return this; } + public Builder thriftMaxFrameSize(final int thriftMaxFrameSize) { + this.thriftMaxFrameSize = thriftMaxFrameSize; + return this; + } + + public Builder maxPollParallelism(final int maxPollParallelism) { + // Here the minimum value of max poll parallelism is set to 1 instead of 0, in order to use a + // single thread to execute poll whenever there are idle resources available, thereby + // achieving strict timeout. + this.maxPollParallelism = Math.max(maxPollParallelism, 1); + return this; + } + public abstract SubscriptionPullConsumer buildPullConsumer(); public abstract SubscriptionPushConsumer buildPushConsumer(); @@ -1069,17 +1484,21 @@ public Builder fileSaveFsync(final boolean fileSaveFsync) { /////////////////////////////// stringify /////////////////////////////// protected Map coreReportMessage() { - final Map result = new HashMap<>(5); + final Map result = new HashMap<>(); result.put("consumerId", consumerId); result.put("consumerGroupId", consumerGroupId); result.put("isClosed", isClosed.toString()); result.put("fileSaveDir", fileSaveDir); - result.put("subscribedTopicNames", subscribedTopics.keySet().toString()); + result.put( + "inFlightFilesCommitContextSet", + CollectionUtils.getLimitedString(inFlightFilesCommitContextSet, 32)); + result.put( + "subscribedTopicNames", CollectionUtils.getLimitedString(subscribedTopics.keySet(), 32)); return result; } protected Map allReportMessage() { - final Map result = new HashMap<>(10); + final Map result = new HashMap<>(); result.put("consumerId", consumerId); result.put("consumerGroupId", consumerGroupId); result.put("heartbeatIntervalMs", String.valueOf(heartbeatIntervalMs)); @@ -1089,6 +1508,9 @@ protected Map allReportMessage() { result.put("isReleased", isReleased.toString()); result.put("fileSaveDir", fileSaveDir); result.put("fileSaveFsync", String.valueOf(fileSaveFsync)); + result.put("inFlightFilesCommitContextSet", inFlightFilesCommitContextSet.toString()); + result.put("thriftMaxFrameSize", String.valueOf(thriftMaxFrameSize)); + result.put("maxPollParallelism", String.valueOf(maxPollParallelism)); result.put("subscribedTopics", subscribedTopics.toString()); return result; } diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionExecutorServiceManager.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionExecutorServiceManager.java index 6ce5946d4fcc6..b8f35b392b0c3 100644 --- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionExecutorServiceManager.java +++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionExecutorServiceManager.java @@ -22,7 +22,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collection; +import java.util.List; import java.util.Objects; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.ScheduledExecutorService; @@ -42,26 +46,18 @@ public final class SubscriptionExecutorServiceManager { private static final String DOWNSTREAM_DATA_FLOW_EXECUTOR_NAME = "SubscriptionDownstreamDataFlowExecutor"; - /** - * Control Flow Executor: execute heartbeat worker and endpoints syncer for {@link - * SubscriptionConsumer} - */ - private static final SubscriptionExecutorService CONTROL_FLOW_EXECUTOR = - new SubscriptionExecutorService( + /** Control Flow Executor: execute heartbeat worker, endpoints syncer and auto poll worker */ + private static final SubscriptionScheduledExecutorService CONTROL_FLOW_EXECUTOR = + new SubscriptionScheduledExecutorService( CONTROL_FLOW_EXECUTOR_NAME, Math.max(Runtime.getRuntime().availableProcessors() / 2, 1)); - /** - * Upstream Data Flow Executor: execute auto commit worker and async commit worker for {@link - * SubscriptionPullConsumer} - */ - private static final SubscriptionExecutorService UPSTREAM_DATA_FLOW_EXECUTOR = - new SubscriptionExecutorService( + /** Upstream Data Flow Executor: execute auto commit worker and async commit worker */ + private static final SubscriptionScheduledExecutorService UPSTREAM_DATA_FLOW_EXECUTOR = + new SubscriptionScheduledExecutorService( UPSTREAM_DATA_FLOW_EXECUTOR_NAME, Math.max(Runtime.getRuntime().availableProcessors() / 2, 1)); - /** - * Downstream Data Flow Executor: execute auto poll worker for {@link SubscriptionPushConsumer} - */ + /** Downstream Data Flow Executor: execute poll task */ private static final SubscriptionExecutorService DOWNSTREAM_DATA_FLOW_EXECUTOR = new SubscriptionExecutorService( DOWNSTREAM_DATA_FLOW_EXECUTOR_NAME, @@ -127,6 +123,17 @@ public static ScheduledFuture submitEndpointsSyncer( TimeUnit.MILLISECONDS); } + @SuppressWarnings("unsafeThreadSchedule") + public static ScheduledFuture submitAutoPollWorker( + final Runnable task, final long autoPollIntervalMs) { + CONTROL_FLOW_EXECUTOR.launchIfNeeded(); + return CONTROL_FLOW_EXECUTOR.scheduleWithFixedDelay( + task, + generateRandomInitialDelayMs(autoPollIntervalMs), + autoPollIntervalMs, + TimeUnit.MILLISECONDS); + } + @SuppressWarnings("unsafeThreadSchedule") public static ScheduledFuture submitAutoCommitWorker( final Runnable task, final long autoCommitIntervalMs) { @@ -143,15 +150,16 @@ public static void submitAsyncCommitWorker(final Runnable task) { UPSTREAM_DATA_FLOW_EXECUTOR.submit(task); } - @SuppressWarnings("unsafeThreadSchedule") - public static ScheduledFuture submitAutoPollWorker( - final Runnable task, final long autoPollIntervalMs) { + public static List> submitMultiplePollTasks( + final Collection> tasks, final long timeoutMs) + throws InterruptedException { DOWNSTREAM_DATA_FLOW_EXECUTOR.launchIfNeeded(); - return DOWNSTREAM_DATA_FLOW_EXECUTOR.scheduleWithFixedDelay( - task, - generateRandomInitialDelayMs(autoPollIntervalMs), - autoPollIntervalMs, - TimeUnit.MILLISECONDS); + return DOWNSTREAM_DATA_FLOW_EXECUTOR.invokeAll(tasks, timeoutMs); + } + + public static int getAvailableThreadCountForPollTasks() { + DOWNSTREAM_DATA_FLOW_EXECUTOR.launchIfNeeded(); + return DOWNSTREAM_DATA_FLOW_EXECUTOR.getAvailableCount(); } /////////////////////////////// subscription executor service /////////////////////////////// @@ -160,7 +168,7 @@ private static class SubscriptionExecutorService { String name; volatile int corePoolSize; - volatile ScheduledExecutorService executor; + volatile ExecutorService executor; SubscriptionExecutorService(final String name, final int corePoolSize) { this.name = name; @@ -194,7 +202,7 @@ void launchIfNeeded() { LOGGER.info("Launching {} with core pool size {}...", this.name, this.corePoolSize); this.executor = - Executors.newScheduledThreadPool( + Executors.newFixedThreadPool( this.corePoolSize, r -> { final Thread t = @@ -244,31 +252,98 @@ void shutdown() { } } - @SuppressWarnings("unsafeThreadSchedule") - ScheduledFuture scheduleWithFixedDelay( - final Runnable task, final long initialDelay, final long delay, final TimeUnit unit) { + Future submit(final Runnable task) { if (!isShutdown()) { synchronized (this) { if (!isShutdown()) { - return this.executor.scheduleWithFixedDelay(task, initialDelay, delay, unit); + return this.executor.submit(task); } } } - LOGGER.warn("{} has not been launched, ignore scheduleWithFixedDelay for task", this.name); + LOGGER.warn("{} has not been launched, ignore submit task", this.name); return null; } - Future submit(final Runnable task) { + List> invokeAll( + final Collection> tasks, final long timeoutMs) + throws InterruptedException { if (!isShutdown()) { synchronized (this) { if (!isShutdown()) { - return this.executor.submit(task); + return this.executor.invokeAll(tasks, timeoutMs, TimeUnit.MILLISECONDS); } } } - LOGGER.warn("{} has not been launched, ignore submit task", this.name); + LOGGER.warn("{} has not been launched, ignore invoke all tasks", this.name); + return null; + } + + int getAvailableCount() { + if (!isShutdown()) { + synchronized (this) { + if (!isShutdown()) { + // TODO: temporarily disable multiple poll + return 0; + // return Math.max( + // ((ThreadPoolExecutor) this.executor).getCorePoolSize() + // - ((ThreadPoolExecutor) this.executor).getActiveCount(), + // 0); + } + } + } + + LOGGER.warn("{} has not been launched, return zero", this.name); + return 0; + } + } + + private static class SubscriptionScheduledExecutorService extends SubscriptionExecutorService { + + SubscriptionScheduledExecutorService(final String name, final int corePoolSize) { + super(name, corePoolSize); + } + + @Override + void launchIfNeeded() { + if (isShutdown()) { + synchronized (this) { + if (isShutdown()) { + LOGGER.info("Launching {} with core pool size {}...", this.name, this.corePoolSize); + + this.executor = + Executors.newScheduledThreadPool( + this.corePoolSize, + r -> { + final Thread t = + new Thread(Thread.currentThread().getThreadGroup(), r, this.name, 0); + if (!t.isDaemon()) { + t.setDaemon(true); + } + if (t.getPriority() != Thread.NORM_PRIORITY) { + t.setPriority(Thread.NORM_PRIORITY); + } + return t; + }); + } + } + } + } + + @SuppressWarnings("unsafeThreadSchedule") + ScheduledFuture scheduleWithFixedDelay( + final Runnable task, final long initialDelay, final long delay, final TimeUnit unit) { + if (!isShutdown()) { + synchronized (this) { + if (!isShutdown()) { + return ((ScheduledExecutorService) this.executor) + .scheduleWithFixedDelay(task, initialDelay, delay, unit); + } + } + } + + LOGGER.warn("{} has not been launched, ignore scheduleWithFixedDelay for task", this.name); return null; } } diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionProvider.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionProvider.java index ba99774590b55..03eefeaddba7f 100644 --- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionProvider.java +++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionProvider.java @@ -27,10 +27,15 @@ import org.apache.iotdb.rpc.subscription.config.TopicConfig; import org.apache.iotdb.rpc.subscription.exception.SubscriptionConnectionException; import org.apache.iotdb.rpc.subscription.exception.SubscriptionException; +import org.apache.iotdb.rpc.subscription.exception.SubscriptionPipeTimeoutException; import org.apache.iotdb.rpc.subscription.exception.SubscriptionRuntimeCriticalException; import org.apache.iotdb.rpc.subscription.exception.SubscriptionRuntimeNonCriticalException; +import org.apache.iotdb.rpc.subscription.payload.poll.PollFilePayload; +import org.apache.iotdb.rpc.subscription.payload.poll.PollPayload; +import org.apache.iotdb.rpc.subscription.payload.poll.PollTabletsPayload; import org.apache.iotdb.rpc.subscription.payload.poll.SubscriptionCommitContext; import org.apache.iotdb.rpc.subscription.payload.poll.SubscriptionPollRequest; +import org.apache.iotdb.rpc.subscription.payload.poll.SubscriptionPollRequestType; import org.apache.iotdb.rpc.subscription.payload.poll.SubscriptionPollResponse; import org.apache.iotdb.rpc.subscription.payload.request.PipeSubscribeCloseReq; import org.apache.iotdb.rpc.subscription.payload.request.PipeSubscribeCommitReq; @@ -40,6 +45,7 @@ import org.apache.iotdb.rpc.subscription.payload.request.PipeSubscribeSubscribeReq; import org.apache.iotdb.rpc.subscription.payload.request.PipeSubscribeUnsubscribeReq; import org.apache.iotdb.rpc.subscription.payload.response.PipeSubscribeHandshakeResp; +import org.apache.iotdb.rpc.subscription.payload.response.PipeSubscribeHeartbeatResp; import org.apache.iotdb.rpc.subscription.payload.response.PipeSubscribePollResp; import org.apache.iotdb.rpc.subscription.payload.response.PipeSubscribeSubscribeResp; import org.apache.iotdb.rpc.subscription.payload.response.PipeSubscribeUnsubscribeResp; @@ -62,6 +68,14 @@ final class SubscriptionProvider extends SubscriptionSession { private static final Logger LOGGER = LoggerFactory.getLogger(SubscriptionProvider.class); + private static final String STATUS_FORMATTER = "Status code is [%s], status message is [%s]."; + private static final String INTERNAL_ERROR_FORMATTER = + "Internal error occurred. " + STATUS_FORMATTER; + private static final String SUBSCRIPTION_PIPE_TIMEOUT_FORMATTER = + "A timeout has occurred in procedures related to the pipe within the subscription procedure. " + + "Please manually check the subscription correctness later. " + + STATUS_FORMATTER; + private String consumerId; private String consumerGroupId; @@ -76,8 +90,9 @@ final class SubscriptionProvider extends SubscriptionSession { final String username, final String password, final String consumerId, - final String consumerGroupId) { - super(endPoint.ip, endPoint.port, username, password); + final String consumerGroupId, + final int thriftMaxFrameSize) { + super(endPoint.ip, endPoint.port, username, password, thriftMaxFrameSize); this.endPoint = endPoint; this.consumerId = consumerId; @@ -203,7 +218,7 @@ void closeInternal() throws SubscriptionException { /////////////////////////////// subscription APIs /////////////////////////////// - void heartbeat() throws SubscriptionException { + PipeSubscribeHeartbeatResp heartbeat() throws SubscriptionException { final TPipeSubscribeResp resp; try { resp = getSessionConnection().pipeSubscribe(PipeSubscribeHeartbeatReq.toTPipeSubscribeReq()); @@ -217,6 +232,7 @@ void heartbeat() throws SubscriptionException { throw new SubscriptionConnectionException(e.getMessage(), e); } verifyPipeSubscribeSuccess(resp.status); + return PipeSubscribeHeartbeatResp.fromTPipeSubscribeResp(resp); } Map subscribe(final Set topicNames) throws SubscriptionException { @@ -281,6 +297,38 @@ Map unsubscribe(final Set topicNames) throws Subscr return unsubscribeResp.getTopics(); } + List poll(final Set topicNames, final long timeoutMs) + throws SubscriptionException { + return poll( + new SubscriptionPollRequest( + SubscriptionPollRequestType.POLL.getType(), + new PollPayload(topicNames), + timeoutMs, + thriftMaxFrameSize)); + } + + List pollFile( + final SubscriptionCommitContext commitContext, final long writingOffset, final long timeoutMs) + throws SubscriptionException { + return poll( + new SubscriptionPollRequest( + SubscriptionPollRequestType.POLL_FILE.getType(), + new PollFilePayload(commitContext, writingOffset), + timeoutMs, + thriftMaxFrameSize)); + } + + List pollTablets( + final SubscriptionCommitContext commitContext, final int offset, final long timeoutMs) + throws SubscriptionException { + return poll( + new SubscriptionPollRequest( + SubscriptionPollRequestType.POLL_TABLETS.getType(), + new PollTabletsPayload(commitContext, offset), + timeoutMs, + thriftMaxFrameSize)); + } + List poll(final SubscriptionPollRequest pollMessage) throws SubscriptionException { final PipeSubscribePollReq req; @@ -353,20 +401,26 @@ private static void verifyPipeSubscribeSuccess(final TSStatus status) case 1906: // SUBSCRIPTION_CLOSE_ERROR case 1907: // SUBSCRIPTION_SUBSCRIBE_ERROR case 1908: // SUBSCRIPTION_UNSUBSCRIBE_ERROR - LOGGER.warn( - "Internal error occurred, status code {}, status message {}", - status.code, - status.message); - throw new SubscriptionRuntimeNonCriticalException(status.message); + { + final String errorMessage = + String.format(INTERNAL_ERROR_FORMATTER, status.code, status.message); + LOGGER.warn(errorMessage); + throw new SubscriptionRuntimeNonCriticalException(errorMessage); + } + case 1911: // SUBSCRIPTION_PIPE_TIMEOUT_ERROR + throw new SubscriptionPipeTimeoutException( + String.format(SUBSCRIPTION_PIPE_TIMEOUT_FORMATTER, status.code, status.message)); case 1900: // SUBSCRIPTION_VERSION_ERROR case 1901: // SUBSCRIPTION_TYPE_ERROR case 1909: // SUBSCRIPTION_MISSING_CUSTOMER + case 1912: // SUBSCRIPTION_NOT_ENABLED_ERROR default: - LOGGER.warn( - "Internal error occurred, status code {}, status message {}", - status.code, - status.message); - throw new SubscriptionRuntimeCriticalException(status.message); + { + final String errorMessage = + String.format(INTERNAL_ERROR_FORMATTER, status.code, status.message); + LOGGER.warn(errorMessage); + throw new SubscriptionRuntimeCriticalException(status.message); + } } } diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionProviders.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionProviders.java index 6d250f6d20177..6c0b7d03b184f 100644 --- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionProviders.java +++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionProviders.java @@ -23,6 +23,7 @@ import org.apache.iotdb.rpc.IoTDBConnectionException; import org.apache.iotdb.rpc.subscription.exception.SubscriptionConnectionException; import org.apache.iotdb.rpc.subscription.exception.SubscriptionException; +import org.apache.iotdb.rpc.subscription.payload.response.PipeSubscribeHeartbeatResp; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -85,7 +86,8 @@ void openProviders(final SubscriptionConsumer consumer) throws SubscriptionExcep try { defaultProvider = consumer.constructProviderAndHandshake(endPoint); } catch (final Exception e) { - LOGGER.warn("Failed to create connection with {}", endPoint, e); + LOGGER.warn( + "{} failed to create connection with {} because of {}", consumer, endPoint, e, e); continue; // try next endpoint } defaultDataNodeId = defaultProvider.getDataNodeId(); @@ -93,10 +95,11 @@ void openProviders(final SubscriptionConsumer consumer) throws SubscriptionExcep final Map allEndPoints; try { - allEndPoints = defaultProvider.getSessionConnection().fetchAllEndPoints(); + allEndPoints = defaultProvider.heartbeat().getEndPoints(); } catch (final Exception e) { - LOGGER.warn("Failed to fetch all endpoints from {}, will retry later...", endPoint, e); - break; // retry later + LOGGER.warn( + "{} failed to fetch all endpoints from {} because of {}", consumer, endPoint, e, e); + break; } for (final Map.Entry entry : allEndPoints.entrySet()) { @@ -109,8 +112,12 @@ void openProviders(final SubscriptionConsumer consumer) throws SubscriptionExcep provider = consumer.constructProviderAndHandshake(entry.getValue()); } catch (final Exception e) { LOGGER.warn( - "Failed to create connection with {}, will retry later...", entry.getValue(), e); - continue; // retry later + "{} failed to create connection with {} because of {}", + consumer, + entry.getValue(), + e, + e); + continue; } addProvider(entry.getKey(), provider); } @@ -133,7 +140,8 @@ void closeProviders() { for (final SubscriptionProvider provider : getAllProviders()) { try { provider.close(); - } catch (final Exception ignored) { + } catch (final Exception e) { + LOGGER.warn("Failed to close subscription provider {} because of {}", provider, e, e); } } subscriptionProviders.clear(); @@ -227,21 +235,33 @@ void heartbeat(final SubscriptionConsumer consumer) { acquireWriteLock(); try { - heartbeatInternal(); + heartbeatInternal(consumer); } finally { releaseWriteLock(); } } - private void heartbeatInternal() { + private void heartbeatInternal(final SubscriptionConsumer consumer) { for (final SubscriptionProvider provider : getAllProviders()) { try { - provider.heartbeat(); + final PipeSubscribeHeartbeatResp resp = provider.heartbeat(); + // update subscribed topics + consumer.subscribedTopics = resp.getTopics(); + // unsubscribe completed topics + for (final String topicName : resp.getTopicNamesToUnsubscribe()) { + LOGGER.info( + "Termination occurred when SubscriptionConsumer {} polling topics, unsubscribe topic {} automatically", + consumer.coreReportMessage(), + topicName); + consumer.unsubscribe(topicName); + } provider.setAvailable(); } catch (final Exception e) { LOGGER.warn( - "something unexpected happened when sending heartbeat to subscription provider {}, set subscription provider unavailable", + "{} failed to sending heartbeat to subscription provider {} because of {}, set subscription provider unavailable", + consumer, provider, + e, e); provider.setUnavailable(); } @@ -268,7 +288,7 @@ private void syncInternal(final SubscriptionConsumer consumer) { try { openProviders(consumer); } catch (final Exception e) { - LOGGER.warn("something unexpected happened when syncing subscription endpoints...", e); + LOGGER.warn("Failed to open providers for consumer {} because of {}", consumer, e, e); return; } } @@ -277,8 +297,8 @@ private void syncInternal(final SubscriptionConsumer consumer) { try { allEndPoints = consumer.fetchAllEndPointsWithRedirection(); } catch (final Exception e) { - LOGGER.warn("Failed to fetch all endpoints, will retry later...", e); - return; // retry later + LOGGER.warn("Failed to fetch all endpoints for consumer {} because of {}", consumer, e, e); + return; } // add new providers or handshake existing providers @@ -292,19 +312,21 @@ private void syncInternal(final SubscriptionConsumer consumer) { newProvider = consumer.constructProviderAndHandshake(endPoint); } catch (final Exception e) { LOGGER.warn( - "Failed to create connection with endpoint {}, will retry later...", endPoint, e); - continue; // retry later + "{} failed to create connection with {} because of {}", consumer, endPoint, e, e); + continue; } addProvider(entry.getKey(), newProvider); } else { // existing provider try { - provider.heartbeat(); + consumer.subscribedTopics = provider.heartbeat().getTopics(); provider.setAvailable(); } catch (final Exception e) { LOGGER.warn( - "something unexpected happened when sending heartbeat to subscription provider {}, set subscription provider unavailable", + "{} failed to sending heartbeat to subscription provider {} because of {}, set subscription provider unavailable", + consumer, provider, + e, e); provider.setUnavailable(); } @@ -314,8 +336,10 @@ private void syncInternal(final SubscriptionConsumer consumer) { closeAndRemoveProvider(entry.getKey()); } catch (final Exception e) { LOGGER.warn( - "Exception occurred when closing and removing subscription provider with data node id {}", - entry.getKey(), + "Exception occurred when {} closing and removing subscription provider {} because of {}", + consumer, + provider, + e, e); } } @@ -330,8 +354,10 @@ private void syncInternal(final SubscriptionConsumer consumer) { closeAndRemoveProvider(dataNodeId); } catch (final Exception e) { LOGGER.warn( - "Exception occurred when closing and removing subscription provider with data node id {}", - dataNodeId, + "Exception occurred when {} closing and removing subscription provider {} because of {}", + consumer, + provider, + e, e); } } diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionPullConsumer.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionPullConsumer.java index 377341874044c..cc56df8978296 100644 --- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionPullConsumer.java +++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionPullConsumer.java @@ -22,6 +22,7 @@ import org.apache.iotdb.rpc.subscription.config.ConsumerConstant; import org.apache.iotdb.rpc.subscription.exception.SubscriptionException; import org.apache.iotdb.session.subscription.payload.SubscriptionMessage; +import org.apache.iotdb.session.subscription.util.CollectionUtils; import org.apache.iotdb.session.subscription.util.IdentifierUtils; import org.slf4j.Logger; @@ -151,15 +152,41 @@ public List poll(final Set topicNames, final Durati return poll(topicNames, timeout.toMillis()); } - @Override public List poll(final Set topicNames, final long timeoutMs) throws SubscriptionException { // parse topic names from external source - final Set parsedTopicNames = - topicNames.stream().map(IdentifierUtils::parseIdentifier).collect(Collectors.toSet()); + Set parsedTopicNames = + topicNames.stream() + .map(IdentifierUtils::checkAndParseIdentifier) + .collect(Collectors.toSet()); + + if (!parsedTopicNames.isEmpty()) { + // filter unsubscribed topics + parsedTopicNames.stream() + .filter(topicName -> !subscribedTopics.containsKey(topicName)) + .forEach( + topicName -> + LOGGER.warn( + "SubscriptionPullConsumer {} does not subscribe to topic {}", + this, + topicName)); + } else { + parsedTopicNames = subscribedTopics.keySet(); + } - // poll messages - final List messages = super.poll(parsedTopicNames, timeoutMs); + if (parsedTopicNames.isEmpty()) { + return Collections.emptyList(); + } + + final List messages = multiplePoll(parsedTopicNames, timeoutMs); + if (messages.isEmpty()) { + LOGGER.info( + "SubscriptionPullConsumer {} poll empty message from topics {} after {} millisecond(s)", + this, + CollectionUtils.getLimitedString(parsedTopicNames, 32), + timeoutMs); + return messages; + } // add to uncommitted messages if (autoCommit) { @@ -274,7 +301,7 @@ public Builder host(final String host) { } @Override - public Builder port(final int port) { + public Builder port(final Integer port) { super.port(port); return this; } @@ -333,6 +360,18 @@ public Builder fileSaveFsync(final boolean fileSaveFsync) { return this; } + @Override + public Builder thriftMaxFrameSize(final int thriftMaxFrameSize) { + super.thriftMaxFrameSize(thriftMaxFrameSize); + return this; + } + + @Override + public Builder maxPollParallelism(final int maxPollParallelism) { + super.maxPollParallelism(maxPollParallelism); + return this; + } + public Builder autoCommit(final boolean autoCommit) { this.autoCommit = autoCommit; return this; diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionPushConsumer.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionPushConsumer.java index e2dfe7974575d..d3c34b7ecafa6 100644 --- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionPushConsumer.java +++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/consumer/SubscriptionPushConsumer.java @@ -22,6 +22,7 @@ import org.apache.iotdb.rpc.subscription.config.ConsumerConstant; import org.apache.iotdb.rpc.subscription.exception.SubscriptionException; import org.apache.iotdb.session.subscription.payload.SubscriptionMessage; +import org.apache.iotdb.session.subscription.util.CollectionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -103,6 +104,7 @@ private SubscriptionPushConsumer( this.ackStrategy = ackStrategy; this.consumeListener = consumeListener; + // avoid interval less than or equal to zero this.autoPollIntervalMs = Math.max(autoPollIntervalMs, 1); this.autoPollTimeoutMs = Math.max(autoPollTimeoutMs, ConsumerConstant.AUTO_POLL_TIMEOUT_MS_MIN_VALUE); @@ -172,7 +174,15 @@ public void run() { try { final List messages = - poll(subscribedTopics.keySet(), autoPollTimeoutMs); + multiplePoll(subscribedTopics.keySet(), autoPollTimeoutMs); + if (messages.isEmpty()) { + LOGGER.info( + "SubscriptionPushConsumer {} poll empty message from topics {} after {} millisecond(s)", + this, + CollectionUtils.getLimitedString(subscribedTopics.keySet(), 32), + autoPollTimeoutMs); + return; + } if (ackStrategy.equals(AckStrategy.BEFORE_CONSUME)) { ack(messages); @@ -184,7 +194,7 @@ public void run() { final ConsumeResult consumeResult; try { consumeResult = consumeListener.onReceive(message); - if (Objects.equals(consumeResult, ConsumeResult.SUCCESS)) { + if (Objects.equals(ConsumeResult.SUCCESS, consumeResult)) { messagesToAck.add(message); } else { LOGGER.warn("Consumer listener result failure when consuming message: {}", message); @@ -224,7 +234,7 @@ public Builder host(final String host) { } @Override - public Builder port(final int port) { + public Builder port(final Integer port) { super.port(port); return this; } @@ -283,6 +293,18 @@ public Builder fileSaveFsync(final boolean fileSaveFsync) { return this; } + @Override + public Builder thriftMaxFrameSize(final int thriftMaxFrameSize) { + super.thriftMaxFrameSize(thriftMaxFrameSize); + return this; + } + + @Override + public Builder maxPollParallelism(final int maxPollParallelism) { + super.maxPollParallelism(maxPollParallelism); + return this; + } + public Builder ackStrategy(final AckStrategy ackStrategy) { this.ackStrategy = ackStrategy; return this; diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/model/Subscription.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/model/Subscription.java index e5f227be17b80..01e454cae2cc2 100644 --- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/model/Subscription.java +++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/model/Subscription.java @@ -21,16 +21,26 @@ public class Subscription { + private final String subscriptionId; private final String topicName; private final String consumerGroupId; private final String consumerIds; - public Subscription(String topicName, String consumerGroupId, String consumerIds) { + public Subscription( + final String subscriptionId, + final String topicName, + final String consumerGroupId, + final String consumerIds) { + this.subscriptionId = subscriptionId; this.topicName = topicName; this.consumerGroupId = consumerGroupId; this.consumerIds = consumerIds; } + public String getSubscriptionId() { + return subscriptionId; + } + public String getTopicName() { return topicName; } @@ -45,7 +55,9 @@ public String getConsumerIds() { @Override public String toString() { - return "Subscription{topicName=" + return "Subscription{subscriptionId=" + + subscriptionId + + ", topicName=" + topicName + ", consumerGroupId=" + consumerGroupId diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/payload/SubscriptionFileHandler.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/payload/SubscriptionFileHandler.java index 0ec121f96992a..8f2b3aaa860a9 100644 --- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/payload/SubscriptionFileHandler.java +++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/payload/SubscriptionFileHandler.java @@ -20,6 +20,7 @@ package org.apache.iotdb.session.subscription.payload; import org.apache.iotdb.rpc.subscription.exception.SubscriptionIncompatibleHandlerException; +import org.apache.iotdb.session.util.RetryUtils; import java.io.File; import java.io.IOException; @@ -56,8 +57,11 @@ public synchronized Path getPath() { */ public synchronized Path deleteFile() throws IOException { final Path sourcePath = getPath(); - Files.delete(sourcePath); - return sourcePath; + return RetryUtils.retryOnException( + () -> { + Files.delete(sourcePath); + return sourcePath; + }); } /** @@ -66,7 +70,7 @@ public synchronized Path deleteFile() throws IOException { * @throws IOException if an I/O error occurs */ public synchronized Path moveFile(final String target) throws IOException { - return this.moveFile(Paths.get(target)); + return RetryUtils.retryOnException(() -> this.moveFile(Paths.get(target))); } /** @@ -78,7 +82,8 @@ public synchronized Path moveFile(final Path target) throws IOException { if (!Files.exists(target.getParent())) { Files.createDirectories(target.getParent()); } - return Files.move(getPath(), target, StandardCopyOption.REPLACE_EXISTING); + return RetryUtils.retryOnException( + () -> Files.move(getPath(), target, StandardCopyOption.REPLACE_EXISTING)); } /** @@ -87,7 +92,7 @@ public synchronized Path moveFile(final Path target) throws IOException { * @throws IOException if an I/O error occurs */ public synchronized Path copyFile(final String target) throws IOException { - return this.copyFile(Paths.get(target)); + return RetryUtils.retryOnException(() -> this.copyFile(Paths.get(target))); } /** @@ -99,8 +104,13 @@ public synchronized Path copyFile(final Path target) throws IOException { if (!Files.exists(target.getParent())) { Files.createDirectories(target.getParent()); } - return Files.copy( - getPath(), target, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.COPY_ATTRIBUTES); + return RetryUtils.retryOnException( + () -> + Files.copy( + getPath(), + target, + StandardCopyOption.REPLACE_EXISTING, + StandardCopyOption.COPY_ATTRIBUTES)); } @Override diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/payload/SubscriptionSessionDataSet.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/payload/SubscriptionSessionDataSet.java index ac54941f5151e..80c25f202a08b 100644 --- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/payload/SubscriptionSessionDataSet.java +++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/payload/SubscriptionSessionDataSet.java @@ -105,7 +105,9 @@ public RowRecord next() { for (int columnIndex = 0; columnIndex < columnSize; ++columnIndex) { final Field field; - if (tablet.bitMaps[columnIndex].isMarked(rowIndex)) { + if (tablet.bitMaps != null + && tablet.bitMaps[columnIndex] != null + && tablet.bitMaps[columnIndex].isMarked(rowIndex)) { field = new Field(null); } else { final TSDataType dataType = tablet.getSchemas().get(columnIndex).getType(); diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/payload/SubscriptionSessionDataSetsHandler.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/payload/SubscriptionSessionDataSetsHandler.java index 9da0aa3805a98..6bca41eb6d997 100644 --- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/payload/SubscriptionSessionDataSetsHandler.java +++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/payload/SubscriptionSessionDataSetsHandler.java @@ -23,26 +23,33 @@ import org.apache.tsfile.write.record.Tablet; -import java.util.ArrayList; import java.util.Iterator; import java.util.List; public class SubscriptionSessionDataSetsHandler implements Iterable, SubscriptionMessageHandler { - private final List dataSets; - private final List tablets; public SubscriptionSessionDataSetsHandler(final List tablets) { - this.dataSets = new ArrayList<>(); this.tablets = tablets; - tablets.forEach((tablet -> this.dataSets.add(new SubscriptionSessionDataSet(tablet)))); } @Override public Iterator iterator() { - return dataSets.iterator(); + return new Iterator() { + final Iterator tabletsIterator = tablets.iterator(); + + @Override + public boolean hasNext() { + return tabletsIterator.hasNext(); + } + + @Override + public SubscriptionSessionDataSet next() { + return new SubscriptionSessionDataSet(tabletsIterator.next()); + } + }; } public Iterator tabletIterator() { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/exception/ProcedureSuspendedException.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/util/CollectionUtils.java similarity index 66% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/exception/ProcedureSuspendedException.java rename to iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/util/CollectionUtils.java index 1b6f049bd48b4..5f2cfb5ba60ba 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/exception/ProcedureSuspendedException.java +++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/util/CollectionUtils.java @@ -17,23 +17,15 @@ * under the License. */ -package org.apache.iotdb.confignode.procedure.exception; +package org.apache.iotdb.session.subscription.util; -public class ProcedureSuspendedException extends ProcedureException { +import java.util.Collection; +import java.util.stream.Collectors; - private static final long serialVersionUID = -8328419627678496269L; +public class CollectionUtils { - /** default constructor */ - public ProcedureSuspendedException() { - super(); - } - - /** - * Constructor - * - * @param s message - */ - public ProcedureSuspendedException(String s) { - super(s); + public static String getLimitedString(final Collection collection, final int limit) { + return collection.stream().limit(limit).collect(Collectors.toList()) + + (collection.size() > limit ? " ... (" + (collection.size() - limit) + " more)" : ""); } } diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/util/IdentifierUtils.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/util/IdentifierUtils.java index 9f6d09ef44e0f..6947ac9ef7b42 100644 --- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/util/IdentifierUtils.java +++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/util/IdentifierUtils.java @@ -24,12 +24,20 @@ import org.apache.tsfile.common.constant.TsFileConstant; import org.apache.tsfile.read.common.parser.PathVisitor; +import java.util.Objects; + public class IdentifierUtils { /** * refer org.apache.iotdb.db.queryengine.plan.parser.ASTVisitor#parseIdentifier(java.lang.String) */ - public static String parseIdentifier(final String src) { + public static String checkAndParseIdentifier(final String src) { + if (Objects.isNull(src)) { + throw new SubscriptionIdentifierSemanticException("null identifier is not supported"); + } + if (src.isEmpty()) { + throw new SubscriptionIdentifierSemanticException("empty identifier is not supported"); + } if (src.startsWith(TsFileConstant.BACK_QUOTE_STRING) && src.endsWith(TsFileConstant.BACK_QUOTE_STRING)) { return src.substring(1, src.length() - 1) diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/util/SubscriptionPollTimer.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/util/PollTimer.java similarity index 83% rename from iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/util/SubscriptionPollTimer.java rename to iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/util/PollTimer.java index 756b1fd678f42..1dd077854e8a8 100644 --- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/util/SubscriptionPollTimer.java +++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/util/PollTimer.java @@ -19,14 +19,14 @@ package org.apache.iotdb.session.subscription.util; -public class SubscriptionPollTimer { +public class PollTimer { private long startMs; private long currentTimeMs; private long deadlineMs; private long timeoutMs; - public SubscriptionPollTimer(long startMs, long timeoutMs) { + public PollTimer(final long startMs, final long timeoutMs) { this.update(startMs); this.reset(timeoutMs); } @@ -35,11 +35,19 @@ public boolean isExpired() { return this.currentTimeMs >= this.deadlineMs; } + public boolean isExpired(final long deltaMs) { + return this.currentTimeMs >= this.deadlineMs - Math.max(deltaMs, 0); + } + public boolean notExpired() { return !this.isExpired(); } - public void reset(long timeoutMs) { + public boolean notExpired(final long deltaMs) { + return !this.isExpired(deltaMs); + } + + public void reset(final long timeoutMs) { if (timeoutMs < 0L) { throw new IllegalArgumentException("Invalid negative timeout " + timeoutMs); } else { @@ -57,7 +65,7 @@ public void update() { update(System.currentTimeMillis()); } - public void update(long currentTimeMs) { + public void update(final long currentTimeMs) { this.currentTimeMs = Math.max(currentTimeMs, this.currentTimeMs); } diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/util/SetPartitioner.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/util/SetPartitioner.java new file mode 100644 index 0000000000000..69e46642de4a1 --- /dev/null +++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/subscription/util/SetPartitioner.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.session.subscription.util; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +public class SetPartitioner { + + /** + * Partitions the given set into the specified number of subsets. + * + *

Ensures that each partition contains at least one element, even if the number of elements in + * the set is less than the number of partitions. When the number of elements is greater than or + * equal to the number of partitions, elements are evenly distributed across the partitions. + * + *

Example: + * + *

    + *
  • 1 topic, 4 partitions: [topic1 | topic1 | topic1 | topic1] + *
  • 3 topics, 4 partitions: [topic1 | topic2 | topic3 | topic1] + *
  • 2 topics, 4 partitions: [topic1 | topic2 | topic1 | topic2] + *
  • 5 topics, 4 partitions: [topic1, topic4 | topic2 | topic5 | topic3] + *
  • 7 topics, 3 partitions: [topic1, topic6, topic7 | topic2, topic3 | topic5, topic4] + *
+ * + * @param set the given set + * @param partitions the number of partitions + * @param the type of the elements in the set + * @return a list containing the specified number of subsets + */ + public static List> partition(final Set set, final int partitions) { + final List> result = new ArrayList<>(partitions); + for (int i = 0; i < partitions; i++) { + result.add(new HashSet<>()); + } + + final List elements = new ArrayList<>(set); + int index = 0; + + // When the number of elements is less than the number of partitions, distribute elements + // repeatedly + for (int i = 0; i < partitions; i++) { + result.get(i).add(elements.get(index)); + index = (index + 1) % elements.size(); + } + + // When the number of elements is greater than or equal to the number of partitions, distribute + // elements normally + for (int i = partitions; i < elements.size(); i++) { + result.get(i % partitions).add(elements.get(i)); + } + + return result; + } +} diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/util/RetryUtils.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/util/RetryUtils.java new file mode 100644 index 0000000000000..bd767d270b530 --- /dev/null +++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/util/RetryUtils.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.session.util; + +public class RetryUtils { + + public interface CallableWithException { + T call() throws E; + } + + public static final int MAX_RETRIES = 3; + + public static T retryOnException( + final CallableWithException callable) throws E { + int attempt = 0; + while (true) { + try { + return callable.call(); + } catch (Exception e) { + attempt++; + if (attempt >= MAX_RETRIES) { + throw e; + } + } + } + } + + private RetryUtils() { + // utility class + } +} diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/util/SessionUtils.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/util/SessionUtils.java index 88ecda0524892..31ca44aa3ffd9 100644 --- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/util/SessionUtils.java +++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/util/SessionUtils.java @@ -33,6 +33,8 @@ import org.apache.tsfile.write.UnSupportedDataTypeException; import org.apache.tsfile.write.record.Tablet; import org.apache.tsfile.write.schema.MeasurementSchema; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.nio.ByteBuffer; import java.time.LocalDate; @@ -43,6 +45,7 @@ public class SessionUtils { + private static final Logger LOGGER = LoggerFactory.getLogger(SessionUtils.class); private static final byte TYPE_NULL = -2; public static ByteBuffer getTimeBuffer(Tablet tablet) { @@ -77,10 +80,47 @@ public static ByteBuffer getValueBuffer(Tablet tablet) { return valueBuffer; } - public static ByteBuffer getValueBuffer(List types, List values) + private static int calOccupationOfOneColumn( + TSDataType dataType, Object[] values, int columnIndex, int rowSize) { + int valueOccupation = 0; + switch (dataType) { + case BOOLEAN: + valueOccupation += rowSize; + break; + case INT32: + case FLOAT: + case DATE: + valueOccupation += rowSize * 4; + break; + case INT64: + case DOUBLE: + case TIMESTAMP: + valueOccupation += rowSize * 8; + break; + case TEXT: + case BLOB: + case STRING: + valueOccupation += rowSize * 4; + Binary[] binaries = (Binary[]) values[columnIndex]; + for (int rowIndex = 0; rowIndex < rowSize; rowIndex++) { + valueOccupation += + binaries[rowIndex] != null + ? binaries[rowIndex].getLength() + : Binary.EMPTY_VALUE.getLength(); + } + break; + default: + throw new UnSupportedDataTypeException( + String.format("Data type %s is not supported.", dataType)); + } + return valueOccupation; + } + + public static ByteBuffer getValueBuffer( + List types, List values, List measurements) throws IoTDBConnectionException { ByteBuffer buffer = ByteBuffer.allocate(SessionUtils.calculateLength(types, values)); - SessionUtils.putValues(types, values, buffer); + SessionUtils.putValues(types, values, buffer, measurements); return buffer; } @@ -136,53 +176,60 @@ private static int calculateLength(List types, List values) * @param buffer buffer to insert * @throws IoTDBConnectionException */ - private static void putValues(List types, List values, ByteBuffer buffer) + private static void putValues( + List types, List values, ByteBuffer buffer, List measurements) throws IoTDBConnectionException { for (int i = 0; i < values.size(); i++) { - if (values.get(i) == null) { - ReadWriteIOUtils.write(TYPE_NULL, buffer); - continue; - } - ReadWriteIOUtils.write(types.get(i), buffer); - switch (types.get(i)) { - case BOOLEAN: - ReadWriteIOUtils.write((Boolean) values.get(i), buffer); - break; - case INT32: - ReadWriteIOUtils.write((Integer) values.get(i), buffer); - break; - case DATE: - ReadWriteIOUtils.write( - DateUtils.parseDateExpressionToInt((LocalDate) values.get(i)), buffer); - break; - case INT64: - case TIMESTAMP: - ReadWriteIOUtils.write((Long) values.get(i), buffer); - break; - case FLOAT: - ReadWriteIOUtils.write((Float) values.get(i), buffer); - break; - case DOUBLE: - ReadWriteIOUtils.write((Double) values.get(i), buffer); - break; - case TEXT: - case STRING: - byte[] bytes; - if (values.get(i) instanceof Binary) { + try { + if (values.get(i) == null) { + ReadWriteIOUtils.write(TYPE_NULL, buffer); + continue; + } + ReadWriteIOUtils.write(types.get(i), buffer); + switch (types.get(i)) { + case BOOLEAN: + ReadWriteIOUtils.write((Boolean) values.get(i), buffer); + break; + case INT32: + ReadWriteIOUtils.write((Integer) values.get(i), buffer); + break; + case DATE: + ReadWriteIOUtils.write( + DateUtils.parseDateExpressionToInt((LocalDate) values.get(i)), buffer); + break; + case INT64: + case TIMESTAMP: + ReadWriteIOUtils.write((Long) values.get(i), buffer); + break; + case FLOAT: + ReadWriteIOUtils.write((Float) values.get(i), buffer); + break; + case DOUBLE: + ReadWriteIOUtils.write((Double) values.get(i), buffer); + break; + case TEXT: + case STRING: + byte[] bytes; + if (values.get(i) instanceof Binary) { + bytes = ((Binary) values.get(i)).getValues(); + } else { + bytes = ((String) values.get(i)).getBytes(TSFileConfig.STRING_CHARSET); + } + ReadWriteIOUtils.write(bytes.length, buffer); + buffer.put(bytes); + break; + case BLOB: bytes = ((Binary) values.get(i)).getValues(); - } else { - bytes = ((String) values.get(i)).getBytes(TSFileConfig.STRING_CHARSET); - } - ReadWriteIOUtils.write(bytes.length, buffer); - buffer.put(bytes); - break; - case BLOB: - bytes = ((Binary) values.get(i)).getValues(); - ReadWriteIOUtils.write(bytes.length, buffer); - buffer.put(bytes); - break; - default: - throw new IoTDBConnectionException(MSG_UNSUPPORTED_DATA_TYPE + types.get(i)); + ReadWriteIOUtils.write(bytes.length, buffer); + buffer.put(bytes); + break; + default: + throw new IoTDBConnectionException(MSG_UNSUPPORTED_DATA_TYPE + types.get(i)); + } + } catch (Throwable e) { + LOGGER.error( + "Cannot put values for measurement {}, type={}", measurements.get(i), types.get(i), e); + throw e; } } buffer.flip(); diff --git a/iotdb-client/session/src/test/java/org/apache/iotdb/session/SessionTest.java b/iotdb-client/session/src/test/java/org/apache/iotdb/session/SessionTest.java index 4ecd3f436bbb9..7a377aeb90d2f 100644 --- a/iotdb-client/session/src/test/java/org/apache/iotdb/session/SessionTest.java +++ b/iotdb-client/session/src/test/java/org/apache/iotdb/session/SessionTest.java @@ -104,6 +104,18 @@ public void testBuildSession() { .username("username") .password("pwd") .build(); + session1 = + new Session.Builder() + .nodeUrls(Collections.nCopies(2, "host:port")) + .username("username") + .password("pwd") + .build(); + session1 = + new Session.Builder() + .nodeUrls(Collections.unmodifiableList(Arrays.asList("host:port1", "host:port2"))) + .username("username") + .password("pwd") + .build(); session1 = new Session.Builder() .host("host") diff --git a/iotdb-client/session/src/test/java/org/apache/iotdb/session/TabletTest.java b/iotdb-client/session/src/test/java/org/apache/iotdb/session/TabletTest.java index 689ebab468466..fdf019bb8c850 100644 --- a/iotdb-client/session/src/test/java/org/apache/iotdb/session/TabletTest.java +++ b/iotdb-client/session/src/test/java/org/apache/iotdb/session/TabletTest.java @@ -21,10 +21,13 @@ import org.apache.tsfile.enums.TSDataType; import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.utils.Binary; import org.apache.tsfile.write.record.Tablet; import org.apache.tsfile.write.schema.MeasurementSchema; import org.junit.Test; +import java.nio.charset.StandardCharsets; +import java.time.LocalDate; import java.util.ArrayList; import java.util.List; @@ -36,6 +39,15 @@ public void testSortTablet() { Session session = new Session("127.0.0.1", 1234); List schemaList = new ArrayList<>(); schemaList.add(new MeasurementSchema("s1", TSDataType.INT64, TSEncoding.RLE)); + schemaList.add(new MeasurementSchema("s2", TSDataType.TIMESTAMP)); + schemaList.add(new MeasurementSchema("s3", TSDataType.INT32)); + schemaList.add(new MeasurementSchema("s4", TSDataType.DATE)); + schemaList.add(new MeasurementSchema("s5", TSDataType.BOOLEAN)); + schemaList.add(new MeasurementSchema("s6", TSDataType.DOUBLE)); + schemaList.add(new MeasurementSchema("s7", TSDataType.BLOB)); + schemaList.add(new MeasurementSchema("s8", TSDataType.TEXT)); + schemaList.add(new MeasurementSchema("s9", TSDataType.STRING)); + ; // insert three rows data Tablet tablet = new Tablet("root.sg1.d1", schemaList, 3); long[] timestamps = tablet.timestamps; @@ -52,11 +64,32 @@ public void testSortTablet() { timestamps[0] = 2; timestamps[1] = 0; timestamps[2] = 1; - // just one column INT64 data - long[] sensor = (long[]) values[0]; - sensor[0] = 0; - sensor[1] = 1; - sensor[2] = 2; + values[0] = new long[] {0, 1, 2}; + values[1] = new long[] {0, 1, 2}; + values[2] = new int[] {0, 1, 2}; + values[3] = + new LocalDate[] {LocalDate.ofEpochDay(0), LocalDate.ofEpochDay(1), LocalDate.ofEpochDay(2)}; + values[4] = new boolean[] {true, false, true}; + values[5] = new double[] {0.0, 1.0, 2.0}; + values[6] = + new Binary[] { + new Binary("0".getBytes(StandardCharsets.UTF_8)), + new Binary("1".getBytes(StandardCharsets.UTF_8)), + new Binary("2".getBytes(StandardCharsets.UTF_8)) + }; + values[7] = + new Binary[] { + new Binary("0".getBytes(StandardCharsets.UTF_8)), + new Binary("1".getBytes(StandardCharsets.UTF_8)), + new Binary("2".getBytes(StandardCharsets.UTF_8)) + }; + values[8] = + new Binary[] { + new Binary("0".getBytes(StandardCharsets.UTF_8)), + new Binary("1".getBytes(StandardCharsets.UTF_8)), + new Binary("2".getBytes(StandardCharsets.UTF_8)) + }; + tablet.rowSize = 3; session.sortTablet(tablet); @@ -72,10 +105,36 @@ public void testSortTablet() { If the data equal to above tablet, test pass, otherwise test fialed */ long[] resTimestamps = tablet.timestamps; - long[] resValues = (long[]) tablet.values[0]; long[] expectedTimestamps = new long[] {0, 1, 2}; - long[] expectedValues = new long[] {1, 2, 0}; assertArrayEquals(expectedTimestamps, resTimestamps); - assertArrayEquals(expectedValues, resValues); + assertArrayEquals(new long[] {1, 2, 0}, ((long[]) tablet.values[0])); + assertArrayEquals(new long[] {1, 2, 0}, ((long[]) tablet.values[1])); + assertArrayEquals(new int[] {1, 2, 0}, ((int[]) tablet.values[2])); + assertArrayEquals( + new LocalDate[] {LocalDate.ofEpochDay(1), LocalDate.ofEpochDay(2), LocalDate.ofEpochDay(0)}, + ((LocalDate[]) tablet.values[3])); + assertArrayEquals(new boolean[] {false, true, true}, ((boolean[]) tablet.values[4])); + assertArrayEquals(new double[] {1.0, 2.0, 0.0}, ((double[]) tablet.values[5]), 0.001); + assertArrayEquals( + new Binary[] { + new Binary("1".getBytes(StandardCharsets.UTF_8)), + new Binary("2".getBytes(StandardCharsets.UTF_8)), + new Binary("0".getBytes(StandardCharsets.UTF_8)) + }, + ((Binary[]) tablet.values[6])); + assertArrayEquals( + new Binary[] { + new Binary("1".getBytes(StandardCharsets.UTF_8)), + new Binary("2".getBytes(StandardCharsets.UTF_8)), + new Binary("0".getBytes(StandardCharsets.UTF_8)) + }, + ((Binary[]) tablet.values[7])); + assertArrayEquals( + new Binary[] { + new Binary("1".getBytes(StandardCharsets.UTF_8)), + new Binary("2".getBytes(StandardCharsets.UTF_8)), + new Binary("0".getBytes(StandardCharsets.UTF_8)) + }, + ((Binary[]) tablet.values[8])); } } diff --git a/iotdb-client/session/src/test/java/org/apache/iotdb/session/util/SessionUtilsTest.java b/iotdb-client/session/src/test/java/org/apache/iotdb/session/util/SessionUtilsTest.java index 7fb93e49a2342..82f19d3d33293 100644 --- a/iotdb-client/session/src/test/java/org/apache/iotdb/session/util/SessionUtilsTest.java +++ b/iotdb-client/session/src/test/java/org/apache/iotdb/session/util/SessionUtilsTest.java @@ -35,8 +35,11 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; +import static org.junit.Assert.assertThrows; + public class SessionUtilsTest { @Test @@ -122,24 +125,42 @@ public void testGetValueBuffer2() throws IoTDBConnectionException { TSDataType.DOUBLE, TSDataType.TEXT, TSDataType.BOOLEAN); - ByteBuffer timeBuffer = SessionUtils.getValueBuffer(typeList, valueList); + List measurements = Arrays.asList("s1", "s2", "s3", "s4", "s5", "s6"); + ByteBuffer timeBuffer = SessionUtils.getValueBuffer(typeList, valueList, measurements); Assert.assertNotNull(timeBuffer); valueList = new ArrayList<>(); valueList.add(null); - typeList = Arrays.asList(TSDataType.INT32); - timeBuffer = SessionUtils.getValueBuffer(typeList, valueList); + typeList = Collections.singletonList(TSDataType.INT32); + timeBuffer = SessionUtils.getValueBuffer(typeList, valueList, measurements); Assert.assertNotNull(timeBuffer); valueList = Arrays.asList(false); typeList = Arrays.asList(TSDataType.UNKNOWN); try { - timeBuffer = SessionUtils.getValueBuffer(typeList, valueList); + SessionUtils.getValueBuffer(typeList, valueList, measurements); } catch (Exception e) { Assert.assertTrue(e instanceof IoTDBConnectionException); } } + @Test + public void testGetValueBufferWithWrongType() { + List valueList = Arrays.asList(12L, 13, 1.2, 0.707f, false, "false"); + List typeList = + Arrays.asList( + TSDataType.INT32, + TSDataType.INT64, + TSDataType.FLOAT, + TSDataType.DOUBLE, + TSDataType.TEXT, + TSDataType.BOOLEAN); + List measurements = Arrays.asList("s1", "s2", "s3", "s4", "s5", "s6"); + assertThrows( + ClassCastException.class, + () -> SessionUtils.getValueBuffer(typeList, valueList, measurements)); + } + @Test public void testParseSeedNodeUrls() { List nodeUrls = Arrays.asList("127.0.0.1:1234"); diff --git a/iotdb-core/ainode/.gitignore b/iotdb-core/ainode/.gitignore new file mode 100644 index 0000000000000..b7ad350dc979e --- /dev/null +++ b/iotdb-core/ainode/.gitignore @@ -0,0 +1,11 @@ +# generated by Thrift +/iotdb/thrift/ + +# generated by maven +/iotdb/conf/ + +# .whl of ainode, generated by Poetry +/dist/ + +# the config to build ainode, it will be generated automatically +pyproject.toml diff --git a/iotdb-core/ainode/README.md b/iotdb-core/ainode/README.md new file mode 100644 index 0000000000000..150ad93e499b0 --- /dev/null +++ b/iotdb-core/ainode/README.md @@ -0,0 +1,22 @@ + + +# Apache IoTDB AINode \ No newline at end of file diff --git a/iotdb-core/ainode/README_ZH.md b/iotdb-core/ainode/README_ZH.md new file mode 100644 index 0000000000000..150ad93e499b0 --- /dev/null +++ b/iotdb-core/ainode/README_ZH.md @@ -0,0 +1,22 @@ + + +# Apache IoTDB AINode \ No newline at end of file diff --git a/iotdb-core/ainode/ainode.xml b/iotdb-core/ainode/ainode.xml new file mode 100644 index 0000000000000..480c3e7221e68 --- /dev/null +++ b/iotdb-core/ainode/ainode.xml @@ -0,0 +1,62 @@ + + + + ainode-assembly + + dir + zip + + + + README.md + + + README_ZH.md + + + ${maven.multiModuleProjectDirectory}/LICENSE-binary + LICENSE + + + ${maven.multiModuleProjectDirectory}/NOTICE-binary + NOTICE + + + + + resources/conf + conf + + + resources/sbin + sbin + 0755 + + + dist + lib + + *.whl + + + + diff --git a/iotdb-core/ainode/iotdb/__init__.py b/iotdb-core/ainode/iotdb/__init__.py new file mode 100644 index 0000000000000..2a1e720805f29 --- /dev/null +++ b/iotdb-core/ainode/iotdb/__init__.py @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# diff --git a/iotdb-core/ainode/iotdb/ainode/__init__.py b/iotdb-core/ainode/iotdb/ainode/__init__.py new file mode 100644 index 0000000000000..2a1e720805f29 --- /dev/null +++ b/iotdb-core/ainode/iotdb/ainode/__init__.py @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# diff --git a/iotdb-core/ainode/iotdb/ainode/client.py b/iotdb-core/ainode/iotdb/ainode/client.py new file mode 100644 index 0000000000000..e44cdf476760e --- /dev/null +++ b/iotdb-core/ainode/iotdb/ainode/client.py @@ -0,0 +1,204 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +import time + +from thrift.Thrift import TException +from thrift.protocol import TCompactProtocol, TBinaryProtocol +from thrift.transport import TSocket, TTransport + +from iotdb.ainode.config import AINodeDescriptor +from iotdb.ainode.constant import TSStatusCode +from iotdb.ainode.log import Logger +from iotdb.ainode.util.decorator import singleton +from iotdb.ainode.util.status import verify_success +from iotdb.thrift.common.ttypes import TEndPoint, TSStatus, TAINodeLocation, TAINodeConfiguration +from iotdb.thrift.confignode import IConfigNodeRPCService +from iotdb.thrift.confignode.ttypes import (TAINodeRemoveReq, TNodeVersionInfo, + TAINodeRegisterReq, TAINodeRestartReq) + +logger = Logger() + + +@singleton +class ClientManager(object): + def __init__(self): + self._config_node_endpoint = AINodeDescriptor().get_config().get_ain_target_config_node_list() + + def borrow_config_node_client(self): + return ConfigNodeClient(config_leader=self._config_node_endpoint) + + +class ConfigNodeClient(object): + def __init__(self, config_leader: TEndPoint): + self._config_leader = config_leader + self._config_nodes = [] + self._cursor = 0 + self._transport = None + self._client = None + + self._MSG_RECONNECTION_FAIL = "Fail to connect to any config node. Please check status of ConfigNodes" + self._RETRY_NUM = 5 + self._RETRY_INTERVAL_MS = 1 + + self._try_to_connect() + + def _try_to_connect(self) -> None: + if self._config_leader is not None: + try: + self._connect(self._config_leader) + return + except TException: + logger.warning("The current node {} may have been down, try next node", self._config_leader) + self._config_leader = None + + if self._transport is not None: + self._transport.close() + + try_host_num = 0 + while try_host_num < len(self._config_nodes): + self._cursor = (self._cursor + 1) % len(self._config_nodes) + + try_endpoint = self._config_nodes[self._cursor] + try: + self._connect(try_endpoint) + return + except TException: + logger.warning("The current node {} may have been down, try next node", try_endpoint) + + try_host_num = try_host_num + 1 + + raise TException(self._MSG_RECONNECTION_FAIL) + + def _connect(self, target_config_node: TEndPoint) -> None: + transport = TTransport.TFramedTransport( + TSocket.TSocket(target_config_node.ip, target_config_node.port) + ) + if not transport.isOpen(): + try: + transport.open() + except TTransport.TTransportException as e: + logger.error("TTransportException: {}".format(e)) + raise e + + if AINodeDescriptor().get_config().get_ain_thrift_compression_enabled(): + protocol = TCompactProtocol.TCompactProtocol(transport) + else: + protocol = TBinaryProtocol.TBinaryProtocol(transport) + self._client = IConfigNodeRPCService.Client(protocol) + + def _wait_and_reconnect(self) -> None: + # wait to start the next try + time.sleep(self._RETRY_INTERVAL_MS) + + try: + self._try_to_connect() + except TException: + # can not connect to each config node + self._sync_latest_config_node_list() + self._try_to_connect() + + def _sync_latest_config_node_list(self) -> None: + # TODO + pass + + def _update_config_node_leader(self, status: TSStatus) -> bool: + if status.code == TSStatusCode.REDIRECTION_RECOMMEND.get_status_code(): + if status.redirectNode is not None: + self._config_leader = status.redirectNode + else: + self._config_leader = None + return True + return False + + def node_register(self, cluster_name: str, configuration: TAINodeConfiguration, + version_info: TNodeVersionInfo) -> int: + req = TAINodeRegisterReq( + clusterName=cluster_name, + aiNodeConfiguration=configuration, + versionInfo=version_info + ) + + for _ in range(0, self._RETRY_NUM): + try: + resp = self._client.registerAINode(req) + if not self._update_config_node_leader(resp.status): + verify_success(resp.status, "An error occurs when calling node_register()") + self._config_nodes = resp.configNodeList + return resp.aiNodeId + except TTransport.TException: + logger.warning("Failed to connect to ConfigNode {} from AINode when executing node_register()", + self._config_leader) + self._config_leader = None + self._wait_and_reconnect() + + raise TException(self._MSG_RECONNECTION_FAIL) + + def node_restart(self, cluster_name: str, configuration: TAINodeConfiguration, + version_info: TNodeVersionInfo) -> None: + req = TAINodeRestartReq( + clusterName=cluster_name, + aiNodeConfiguration=configuration, + versionInfo=version_info + ) + + for _ in range(0, self._RETRY_NUM): + try: + resp = self._client.restartAINode(req) + if not self._update_config_node_leader(resp.status): + verify_success(resp.status, "An error occurs when calling node_restart()") + self._config_nodes = resp.configNodeList + return resp.status + except TTransport.TException: + logger.warning("Failed to connect to ConfigNode {} from AINode when executing node_restart()", + self._config_leader) + self._config_leader = None + self._wait_and_reconnect() + + raise TException(self._MSG_RECONNECTION_FAIL) + + def node_remove(self, location: TAINodeLocation): + req = TAINodeRemoveReq( + aiNodeLocation=location + ) + for _ in range(0, self._RETRY_NUM): + try: + status = self._client.removeAINode(req) + if not self._update_config_node_leader(status): + verify_success(status, "An error occurs when calling node_restart()") + return status + except TTransport.TException: + logger.warning("Failed to connect to ConfigNode {} from AINode when executing node_remove()", + self._config_leader) + self._config_leader = None + self._wait_and_reconnect() + raise TException(self._MSG_RECONNECTION_FAIL) + + def get_ainode_configuration(self, node_id: int) -> map: + for _ in range(0, self._RETRY_NUM): + try: + resp = self._client.getAINodeConfiguration(node_id) + if not self._update_config_node_leader(resp.status): + verify_success(resp.status, "An error occurs when calling get_ainode_configuration()") + return resp.aiNodeConfigurationMap + except TTransport.TException: + logger.warning("Failed to connect to ConfigNode {} from AINode when executing " + "get_ainode_configuration()", + self._config_leader) + self._config_leader = None + self._wait_and_reconnect() + raise TException(self._MSG_RECONNECTION_FAIL) diff --git a/iotdb-core/ainode/iotdb/ainode/config.py b/iotdb-core/ainode/iotdb/ainode/config.py new file mode 100644 index 0000000000000..af66bc48ccf38 --- /dev/null +++ b/iotdb-core/ainode/iotdb/ainode/config.py @@ -0,0 +1,248 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +import os + +from iotdb.ainode.constant import (AINODE_CONF_DIRECTORY_NAME, + AINODE_CONF_FILE_NAME, + AINODE_MODELS_DIR, AINODE_LOG_DIR, AINODE_SYSTEM_DIR, AINODE_INFERENCE_RPC_ADDRESS, + AINODE_INFERENCE_RPC_PORT, AINODE_THRIFT_COMPRESSION_ENABLED, + AINODE_SYSTEM_FILE_NAME, AINODE_CLUSTER_NAME, AINODE_VERSION_INFO, AINODE_BUILD_INFO, + AINODE_CONF_GIT_FILE_NAME, AINODE_CONF_POM_FILE_NAME, AINODE_ROOT_DIR, + AINODE_ROOT_CONF_DIRECTORY_NAME) +from iotdb.ainode.exception import BadNodeUrlError +from iotdb.ainode.log import Logger +from iotdb.ainode.util.decorator import singleton +from iotdb.thrift.common.ttypes import TEndPoint + +logger = Logger() + + +class AINodeConfig(object): + def __init__(self): + # Used for connection of DataNode/ConfigNode clients + self._ain_inference_rpc_address: str = AINODE_INFERENCE_RPC_ADDRESS + self._ain_inference_rpc_port: int = AINODE_INFERENCE_RPC_PORT + + # log directory + self._ain_logs_dir: str = AINODE_LOG_DIR + + # Directory to save models + self._ain_models_dir = AINODE_MODELS_DIR + + self._ain_system_dir = AINODE_SYSTEM_DIR + + # Whether to enable compression for thrift + self._ain_thrift_compression_enabled = AINODE_THRIFT_COMPRESSION_ENABLED + + # Cache number of model storage to avoid repeated loading + self._ain_model_storage_cache_size = 30 + + # Target ConfigNode to be connected by AINode + self._ain_target_config_node_list: TEndPoint = TEndPoint("127.0.0.1", 10710) + + # use for node management + self._ainode_id = 0 + self._cluster_name = AINODE_CLUSTER_NAME + + self._version_info = AINODE_VERSION_INFO + self._build_info = AINODE_BUILD_INFO + + def get_cluster_name(self) -> str: + return self._cluster_name + + def set_cluster_name(self, cluster_name: str) -> None: + self._cluster_name = cluster_name + + def get_version_info(self) -> str: + return self._version_info + + def get_ainode_id(self) -> int: + return self._ainode_id + + def set_ainode_id(self, ainode_id: int) -> None: + self._ainode_id = ainode_id + + def get_build_info(self) -> str: + return self._build_info + + def set_build_info(self, build_info: str) -> None: + self._build_info = build_info + + def set_version_info(self, version_info: str) -> None: + self._version_info = version_info + + def get_ain_inference_rpc_address(self) -> str: + return self._ain_inference_rpc_address + + def set_ain_inference_rpc_address(self, ain_inference_rpc_address: str) -> None: + self._ain_inference_rpc_address = ain_inference_rpc_address + + def get_ain_inference_rpc_port(self) -> int: + return self._ain_inference_rpc_port + + def set_ain_inference_rpc_port(self, ain_inference_rpc_port: int) -> None: + self._ain_inference_rpc_port = ain_inference_rpc_port + + def get_ain_logs_dir(self) -> str: + return self._ain_logs_dir + + def set_ain_logs_dir(self, ain_logs_dir: str) -> None: + self._ain_logs_dir = ain_logs_dir + + def get_ain_models_dir(self) -> str: + return self._ain_models_dir + + def set_ain_models_dir(self, ain_models_dir: str) -> None: + self._ain_models_dir = ain_models_dir + + def get_ain_system_dir(self) -> str: + return self._ain_system_dir + + def set_ain_system_dir(self, ain_system_dir: str) -> None: + self._ain_system_dir = ain_system_dir + + def get_ain_thrift_compression_enabled(self) -> bool: + return self._ain_thrift_compression_enabled + + def set_ain_thrift_compression_enabled(self, ain_thrift_compression_enabled: int) -> None: + self._ain_thrift_compression_enabled = ain_thrift_compression_enabled + + def get_ain_model_storage_cache_size(self) -> int: + return self._ain_model_storage_cache_size + + def get_ain_target_config_node_list(self) -> TEndPoint: + return self._ain_target_config_node_list + + def set_ain_target_config_node_list(self, ain_target_config_node_list: str) -> None: + self._ain_target_config_node_list = parse_endpoint_url(ain_target_config_node_list) + + +@singleton +class AINodeDescriptor(object): + + def __init__(self): + self._config = AINodeConfig() + self._load_config_from_file() + logger.info("AINodeDescriptor is init successfully.") + + def _load_config_from_file(self) -> None: + system_properties_file = os.path.join(self._config.get_ain_system_dir(), AINODE_SYSTEM_FILE_NAME) + if os.path.exists(system_properties_file): + system_configs = load_properties(system_properties_file) + if 'ainode_id' in system_configs: + self._config.set_ainode_id(int(system_configs['ainode_id'])) + + git_file = os.path.join(AINODE_ROOT_DIR, AINODE_ROOT_CONF_DIRECTORY_NAME, AINODE_CONF_GIT_FILE_NAME) + if os.path.exists(git_file): + git_configs = load_properties(git_file) + if 'git.commit.id.abbrev' in git_configs: + build_info = git_configs['git.commit.id.abbrev'] + if 'git.dirty' in git_configs: + if git_configs['git.dirty'] == "true": + build_info += "-dev" + self._config.set_build_info(build_info) + + pom_file = os.path.join(AINODE_ROOT_DIR, AINODE_ROOT_CONF_DIRECTORY_NAME, AINODE_CONF_POM_FILE_NAME) + if os.path.exists(pom_file): + pom_configs = load_properties(pom_file) + if 'version' in pom_configs: + self._config.set_version_info(pom_configs['version']) + + conf_file = os.path.join(AINODE_CONF_DIRECTORY_NAME, AINODE_CONF_FILE_NAME) + if not os.path.exists(conf_file): + logger.info("Cannot find AINode config file '{}', use default configuration.".format(conf_file)) + return + + # noinspection PyBroadException + try: + file_configs = load_properties(conf_file) + + config_keys = file_configs.keys() + + if 'ain_inference_rpc_address' in config_keys: + self._config.set_ain_inference_rpc_address(file_configs['ain_inference_rpc_address']) + + if 'ain_inference_rpc_port' in config_keys: + self._config.set_ain_inference_rpc_port(int(file_configs['ain_inference_rpc_port'])) + + if 'ain_models_dir' in config_keys: + self._config.set_ain_models_dir(file_configs['ain_models_dir']) + + if 'ain_system_dir' in config_keys: + self._config.set_ain_system_dir(file_configs['ain_system_dir']) + + if 'ain_seed_config_node' in config_keys: + self._config.set_ain_target_config_node_list(file_configs['ain_seed_config_node']) + + if 'cluster_name' in config_keys: + self._config.set_cluster_name(file_configs['cluster_name']) + + if 'ain_thrift_compression_enabled' in config_keys: + self._config.set_ain_thrift_compression_enabled(int(file_configs['ain_thrift_compression_enabled'])) + + if 'ain_logs_dir' in config_keys: + log_dir = file_configs['ain_logs_dir'] + self._config.set_ain_logs_dir(log_dir) + Logger(log_dir=log_dir).info(f"Successfully load config from {conf_file}.") + + except BadNodeUrlError: + logger.warning("Cannot load AINode conf file, use default configuration.") + + except Exception as e: + logger.warning("Cannot load AINode conf file caused by: {}, use default configuration. ".format(e)) + + def get_config(self) -> AINodeConfig: + return self._config + + +def load_properties(filepath, sep='=', comment_char='#'): + """ + Read the file passed as parameter as a properties file. + """ + props = {} + with open(filepath, "rt") as f: + for line in f: + l = line.strip() + if l and not l.startswith(comment_char): + key_value = l.split(sep) + key = key_value[0].strip() + value = sep.join(key_value[1:]).strip().strip('"') + props[key] = value + return props + + +def parse_endpoint_url(endpoint_url: str) -> TEndPoint: + """ Parse TEndPoint from a given endpoint url. + Args: + endpoint_url: an endpoint url, format: ip:port + Returns: + TEndPoint + Raises: + BadNodeUrlError + """ + split = endpoint_url.split(":") + if len(split) != 2: + raise BadNodeUrlError(endpoint_url) + + ip = split[0] + try: + port = int(split[1]) + result = TEndPoint(ip, port) + return result + except ValueError: + raise BadNodeUrlError(endpoint_url) diff --git a/iotdb-core/ainode/iotdb/ainode/constant.py b/iotdb-core/ainode/iotdb/ainode/constant.py new file mode 100644 index 0000000000000..596cde0b224ac --- /dev/null +++ b/iotdb-core/ainode/iotdb/ainode/constant.py @@ -0,0 +1,246 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +import inspect +import logging +import os +from enum import Enum +from typing import List + +AINODE_CONF_DIRECTORY_NAME = "conf" +AINODE_ROOT_CONF_DIRECTORY_NAME = "conf" +AINODE_CONF_FILE_NAME = "iotdb-ainode.properties" +AINODE_CONF_GIT_FILE_NAME = "git.properties" +AINODE_CONF_POM_FILE_NAME = "pom.properties" +AINODE_SYSTEM_FILE_NAME = "system.properties" +# inference_rpc_address +AINODE_INFERENCE_RPC_ADDRESS = "127.0.0.1" +AINODE_INFERENCE_RPC_PORT = 10810 +AINODE_MODELS_DIR = "data/ainode/models" +AINODE_SYSTEM_DIR = "data/ainode/system" +AINODE_LOG_DIR = "logs/ainode" +AINODE_THRIFT_COMPRESSION_ENABLED = False +# use for node management +AINODE_CLUSTER_NAME = "defaultCluster" +AINODE_VERSION_INFO = "UNKNOWN" +AINODE_BUILD_INFO = "UNKNOWN" +AINODE_ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))) + +# AINode log +AINODE_LOG_FILE_NAMES = ['log_ainode_all.log', + 'log_ainode_info.log', + 'log_ainode_warning.log', + 'log_ainode_error.log'] +AINODE_LOG_FILE_LEVELS = [ + logging.DEBUG, + logging.INFO, + logging.WARNING, + logging.ERROR] + +TRIAL_ID_PREFIX = "__trial_" +DEFAULT_TRIAL_ID = TRIAL_ID_PREFIX + "0" +DEFAULT_MODEL_FILE_NAME = "model.pt" +DEFAULT_CONFIG_FILE_NAME = "config.yaml" +DEFAULT_CHUNK_SIZE = 8192 + +DEFAULT_RECONNECT_TIMEOUT = 20 +DEFAULT_RECONNECT_TIMES = 3 + +STD_LEVEL = logging.INFO + + +class TSStatusCode(Enum): + SUCCESS_STATUS = 200 + REDIRECTION_RECOMMEND = 400 + AINODE_INTERNAL_ERROR = 1510 + INVALID_URI_ERROR = 1511 + INVALID_INFERENCE_CONFIG = 1512 + INFERENCE_INTERNAL_ERROR = 1520 + + def get_status_code(self) -> int: + return self.value + + +class TaskType(Enum): + FORECAST = "forecast" + + +class OptionsKey(Enum): + # common + TASK_TYPE = "task_type" + MODEL_TYPE = "model_type" + AUTO_TUNING = "auto_tuning" + INPUT_VARS = "input_vars" + + # forecast + INPUT_LENGTH = "input_length" + PREDICT_LENGTH = "predict_length" + PREDICT_INDEX_LIST = "predict_index_list" + INPUT_TYPE_LIST = "input_type_list" + + def name(self) -> str: + return self.value + + +class HyperparameterName(Enum): + # Training hyperparameter + LEARNING_RATE = "learning_rate" + EPOCHS = "epochs" + BATCH_SIZE = "batch_size" + USE_GPU = "use_gpu" + NUM_WORKERS = "num_workers" + + # Structure hyperparameter + KERNEL_SIZE = "kernel_size" + INPUT_VARS = "input_vars" + BLOCK_TYPE = "block_type" + D_MODEL = "d_model" + INNER_LAYERS = "inner_layer" + OUTER_LAYERS = "outer_layer" + + def name(self): + return self.value + + +class ForecastModelType(Enum): + DLINEAR = "dlinear" + DLINEAR_INDIVIDUAL = "dlinear_individual" + NBEATS = "nbeats" + + @classmethod + def values(cls) -> List[str]: + values = [] + for item in list(cls): + values.append(item.value) + return values + + +class ModelInputName(Enum): + DATA_X = "data_x" + TIME_STAMP_X = "time_stamp_x" + TIME_STAMP_Y = "time_stamp_y" + DEC_INP = "dec_inp" + + +class BuiltInModelType(Enum): + # forecast models + ARIMA = "_arima" + EXPONENTIAL_SMOOTHING = "_exponentialsmoothing" + NAIVE_FORECASTER = "_naiveforecaster" + STL_FORECASTER = "_stlforecaster" + + # anomaly detection models + GAUSSIAN_HMM = "_gaussianhmm" + GMM_HMM = "_gmmhmm" + STRAY = "_stray" + + @classmethod + def values(cls) -> List[str]: + values = [] + for item in list(cls): + values.append(item.value) + return values + + +class AttributeName(Enum): + # forecast Attribute + PREDICT_LENGTH = "predict_length" + + # NaiveForecaster + STRATEGY = 'strategy' + SP = 'sp' + + # STLForecaster + # SP = 'sp' + SEASONAL = 'seasonal' + SEASONAL_DEG = 'seasonal_deg' + TREND_DEG = 'trend_deg' + LOW_PASS_DEG = 'low_pass_deg' + SEASONAL_JUMP = 'seasonal_jump' + TREND_JUMP = 'trend_jump' + LOSS_PASS_JUMP = 'low_pass_jump' + + # ExponentialSmoothing + DAMPED_TREND = 'damped_trend' + INITIALIZATION_METHOD = 'initialization_method' + OPTIMIZED = 'optimized' + REMOVE_BIAS = 'remove_bias' + USE_BRUTE = 'use_brute' + + # Arima + ORDER = "order" + SEASONAL_ORDER = "seasonal_order" + METHOD = "method" + MAXITER = "maxiter" + SUPPRESS_WARNINGS = "suppress_warnings" + OUT_OF_SAMPLE_SIZE = "out_of_sample_size" + SCORING = "scoring" + WITH_INTERCEPT = "with_intercept" + TIME_VARYING_REGRESSION = "time_varying_regression" + ENFORCE_STATIONARITY = "enforce_stationarity" + ENFORCE_INVERTIBILITY = "enforce_invertibility" + SIMPLE_DIFFERENCING = "simple_differencing" + MEASUREMENT_ERROR = "measurement_error" + MLE_REGRESSION = "mle_regression" + HAMILTON_REPRESENTATION = "hamilton_representation" + CONCENTRATE_SCALE = "concentrate_scale" + + # GAUSSIAN_HMM + N_COMPONENTS = "n_components" + COVARIANCE_TYPE = "covariance_type" + MIN_COVAR = "min_covar" + STARTPROB_PRIOR = "startprob_prior" + TRANSMAT_PRIOR = "transmat_prior" + MEANS_PRIOR = "means_prior" + MEANS_WEIGHT = "means_weight" + COVARS_PRIOR = "covars_prior" + COVARS_WEIGHT = "covars_weight" + ALGORITHM = "algorithm" + N_ITER = "n_iter" + TOL = "tol" + PARAMS = "params" + INIT_PARAMS = "init_params" + IMPLEMENTATION = "implementation" + + # GMMHMM + # N_COMPONENTS = "n_components" + N_MIX = "n_mix" + # MIN_COVAR = "min_covar" + # STARTPROB_PRIOR = "startprob_prior" + # TRANSMAT_PRIOR = "transmat_prior" + WEIGHTS_PRIOR = "weights_prior" + + # MEANS_PRIOR = "means_prior" + # MEANS_WEIGHT = "means_weight" + # ALGORITHM = "algorithm" + # COVARIANCE_TYPE = "covariance_type" + # N_ITER = "n_iter" + # TOL = "tol" + # INIT_PARAMS = "init_params" + # PARAMS = "params" + # IMPLEMENTATION = "implementation" + + # STRAY + ALPHA = "alpha" + K = "k" + KNN_ALGORITHM = "knn_algorithm" + P = "p" + SIZE_THRESHOLD = "size_threshold" + OUTLIER_TAIL = "outlier_tail" + + def name(self) -> str: + return self.value diff --git a/iotdb-core/ainode/iotdb/ainode/exception.py b/iotdb-core/ainode/iotdb/ainode/exception.py new file mode 100644 index 0000000000000..56186ee2bef15 --- /dev/null +++ b/iotdb-core/ainode/iotdb/ainode/exception.py @@ -0,0 +1,134 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +import re + +from iotdb.ainode.constant import DEFAULT_MODEL_FILE_NAME, DEFAULT_CONFIG_FILE_NAME + + +class _BaseError(Exception): + """Base class for exceptions in this module.""" + + def __init__(self): + self.message = None + + def __str__(self) -> str: + return self.message + + +class BadNodeUrlError(_BaseError): + def __init__(self, node_url: str): + self.message = "Bad node url: {}".format(node_url) + + +class ModelNotExistError(_BaseError): + def __init__(self, file_path: str): + self.message = "Model path is not exists: {} ".format(file_path) + + +class BadConfigValueError(_BaseError): + def __init__(self, config_name: str, config_value, hint: str = ''): + self.message = "Bad value [{0}] for config {1}. {2}".format(config_value, config_name, hint) + + +class MissingConfigError(_BaseError): + def __init__(self, config_name: str): + self.message = "Missing config: {}".format(config_name) + + +class MissingOptionError(_BaseError): + def __init__(self, config_name: str): + self.message = "Missing task option: {}".format(config_name) + + +class RedundantOptionError(_BaseError): + def __init__(self, option_name: str): + self.message = "Redundant task option: {}".format(option_name) + + +class WrongTypeConfigError(_BaseError): + def __init__(self, config_name: str, expected_type: str): + self.message = "Wrong type for config: {0}, expected: {1}".format(config_name, expected_type) + + +class UnsupportedError(_BaseError): + def __init__(self, msg: str): + self.message = "{0} is not supported in current version".format(msg) + + +class InvalidUriError(_BaseError): + def __init__(self, uri: str): + self.message = "Invalid uri: {}, there are no {} or {} under this uri.".format(uri, DEFAULT_MODEL_FILE_NAME, + DEFAULT_CONFIG_FILE_NAME) + + +class InvalidWindowArgumentError(_BaseError): + def __init__( + self, + window_interval, + window_step, + dataset_length): + self.message = f"Invalid inference input: window_interval {window_interval}, window_step {window_step}, dataset_length {dataset_length}" + + +class InferenceModelInternalError(_BaseError): + def __init__(self, msg: str): + self.message = "Inference model internal error: {0}".format(msg) + + +class BuiltInModelNotSupportError(_BaseError): + def __init__(self, msg: str): + self.message = "Built-in model not support: {0}".format(msg) + + +class WrongAttributeTypeError(_BaseError): + def __init__(self, attribute_name: str, expected_type: str): + self.message = "Wrong type for attribute: {0}, expected: {1}".format(attribute_name, expected_type) + + +class NumericalRangeException(_BaseError): + def __init__(self, attribute_name: str, value, min_value, max_value): + self.message = "Attribute {0} expect value between {1} and {2}, got {3} instead." \ + .format(attribute_name, min_value, max_value, value) + + +class StringRangeException(_BaseError): + def __init__(self, attribute_name: str, value: str, expect_value): + self.message = "Attribute {0} expect value in {1}, got {2} instead." \ + .format(attribute_name, expect_value, value) + + +class ListRangeException(_BaseError): + def __init__(self, attribute_name: str, value: list, expected_type: str): + self.message = "Attribute {0} expect value type list[{1}], got {2} instead." \ + .format(attribute_name, expected_type, value) + + +class AttributeNotSupportError(_BaseError): + def __init__(self, model_name: str, attribute_name: str): + self.message = "Attribute {0} is not supported in model {1}".format(attribute_name, model_name) + + +# This is used to extract the key message in RuntimeError instead of the traceback message +def runtime_error_extractor(error_message): + pattern = re.compile(r"RuntimeError: (.+)") + match = pattern.search(error_message) + + if match: + return match.group(1) + else: + return "" diff --git a/iotdb-core/ainode/iotdb/ainode/handler.py b/iotdb-core/ainode/iotdb/ainode/handler.py new file mode 100644 index 0000000000000..c27be605e51e7 --- /dev/null +++ b/iotdb-core/ainode/iotdb/ainode/handler.py @@ -0,0 +1,43 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from iotdb.ainode.manager.cluster_manager import ClusterManager +from iotdb.ainode.manager.inference_manager import InferenceManager +from iotdb.ainode.manager.model_manager import ModelManager +from iotdb.thrift.ainode import IAINodeRPCService +from iotdb.thrift.ainode.ttypes import (TDeleteModelReq, TRegisterModelReq, + TAIHeartbeatReq, TInferenceReq, TRegisterModelResp, TInferenceResp, + TAIHeartbeatResp) +from iotdb.thrift.common.ttypes import TSStatus + + +class AINodeRPCServiceHandler(IAINodeRPCService.Iface): + def __init__(self): + self._model_manager = ModelManager() + + def registerModel(self, req: TRegisterModelReq) -> TRegisterModelResp: + return self._model_manager.register_model(req) + + def deleteModel(self, req: TDeleteModelReq) -> TSStatus: + return self._model_manager.delete_model(req) + + def inference(self, req: TInferenceReq) -> TInferenceResp: + return InferenceManager.inference(req, self._model_manager) + + def getAIHeartbeat(self, req: TAIHeartbeatReq) -> TAIHeartbeatResp: + return ClusterManager.get_heart_beat(req) diff --git a/iotdb-core/ainode/iotdb/ainode/log.py b/iotdb-core/ainode/iotdb/ainode/log.py new file mode 100644 index 0000000000000..4c796dbfcf615 --- /dev/null +++ b/iotdb-core/ainode/iotdb/ainode/log.py @@ -0,0 +1,133 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +import inspect +import logging +import multiprocessing +import os +import random +import sys +import threading + +from iotdb.ainode.constant import STD_LEVEL, AINODE_LOG_FILE_NAMES, AINODE_LOG_FILE_LEVELS +from iotdb.ainode.util.decorator import singleton + + +class LoggerFilter(logging.Filter): + def filter(self, record): + record.msg = f"{self.custom_log_info()}: {record.msg}" + return True + + @staticmethod + def custom_log_info(): + frame = inspect.currentframe() + stack_trace = inspect.getouterframes(frame) + + pid = os.getpid() + process_name = multiprocessing.current_process().name + + stack_info = "" + frame_info = stack_trace[7] + file_name = frame_info.filename + # if file_name is not in current working directory, find the first "iotdb" in the path + for l in range(len(file_name)): + i = len(file_name) - l - 1 + if file_name[i:].startswith("iotdb/") or file_name[i:].startswith("iotdb\\"): + file_name = file_name[i:] + break + + stack_info += f"{file_name}:{frame_info.lineno}-{frame_info.function}" + + return f"[{pid}:{process_name}] {stack_info}" + + +@singleton +class Logger: + """ Logger is a singleton, it will be initialized when AINodeDescriptor is inited for the first time. + You can just use Logger() to get it anywhere. + + Args: + log_dir: log directory + + logger_format: log format + logger: global logger with custom format and level + file_handlers: file handlers for different levels + console_handler: console handler for stdout + _lock: process lock for logger. This is just a precaution, we currently do not have multiprocessing + """ + + def __init__(self, log_dir=None): + + self.logger_format = logging.Formatter(fmt='%(asctime)s %(levelname)s %(' + 'message)s', + datefmt='%Y-%m-%d %H:%M:%S') + + self.logger = logging.getLogger(str(random.random())) + self.logger.handlers.clear() + self.logger.setLevel(logging.DEBUG) + self.console_handler = logging.StreamHandler(sys.stdout) + self.console_handler.setLevel(STD_LEVEL) + self.console_handler.setFormatter(self.logger_format) + + self.logger.addHandler(self.console_handler) + + if log_dir is not None: + file_names = AINODE_LOG_FILE_NAMES + file_levels = AINODE_LOG_FILE_LEVELS + if not os.path.exists(log_dir): + os.makedirs(log_dir) + os.chmod(log_dir, 0o777) + for file_name in file_names: + log_path = log_dir + "/" + file_name + if not os.path.exists(log_path): + f = open(log_path, mode='w', encoding='utf-8') + f.close() + os.chmod(log_path, 0o777) + self.file_handlers = [] + for l in range(len(file_names)): + self.file_handlers.append(logging.FileHandler(log_dir + "/" + file_names[l], mode='a')) + self.file_handlers[l].setLevel(file_levels[l]) + self.file_handlers[l].setFormatter(self.logger_format) + + for file_handler in self.file_handlers: + self.logger.addHandler(file_handler) + else: + log_dir = "default path" + + self.logger.addFilter(LoggerFilter()) + self._lock = threading.Lock() + self.info(f"Logger init successfully. Log will be written to {log_dir}") + + def debug(self, *args) -> None: + self._lock.acquire() + self.logger.debug(' '.join(map(str, args))) + self._lock.release() + + def info(self, *args) -> None: + self._lock.acquire() + self.logger.info(' '.join(map(str, args))) + self._lock.release() + + def warning(self, *args) -> None: + self._lock.acquire() + self.logger.warning(' '.join(map(str, args))) + self._lock.release() + + def error(self, *args) -> None: + self._lock.acquire() + self.logger.error(' '.join(map(str, args))) + self._lock.release() diff --git a/iotdb-core/ainode/iotdb/ainode/manager/__init__.py b/iotdb-core/ainode/iotdb/ainode/manager/__init__.py new file mode 100644 index 0000000000000..2a1e720805f29 --- /dev/null +++ b/iotdb-core/ainode/iotdb/ainode/manager/__init__.py @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# diff --git a/iotdb-core/ainode/iotdb/ainode/manager/cluster_manager.py b/iotdb-core/ainode/iotdb/ainode/manager/cluster_manager.py new file mode 100644 index 0000000000000..dff290a0b84bb --- /dev/null +++ b/iotdb-core/ainode/iotdb/ainode/manager/cluster_manager.py @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +import psutil + +from iotdb.thrift.ainode.ttypes import TAIHeartbeatResp, TAIHeartbeatReq +from iotdb.thrift.common.ttypes import TLoadSample + + +class ClusterManager: + @staticmethod + def get_heart_beat(req: TAIHeartbeatReq) -> TAIHeartbeatResp: + if req.needSamplingLoad: + cpu_percent = psutil.cpu_percent(interval=1) + memory_percent = psutil.virtual_memory().percent + disk_usage = psutil.disk_usage('/') + disk_free = disk_usage.free + load_sample = TLoadSample(cpuUsageRate=cpu_percent, + memoryUsageRate=memory_percent, + diskUsageRate=disk_usage.percent, + freeDiskSpace=disk_free / 1024 / 1024 / 1024) + return TAIHeartbeatResp(heartbeatTimestamp=req.heartbeatTimestamp, + status="Running", + loadSample=load_sample) + else: + return TAIHeartbeatResp(heartbeatTimestamp=req.heartbeatTimestamp, + status="Running") diff --git a/iotdb-core/ainode/iotdb/ainode/manager/inference_manager.py b/iotdb-core/ainode/iotdb/ainode/manager/inference_manager.py new file mode 100644 index 0000000000000..4c33ef6f918f7 --- /dev/null +++ b/iotdb-core/ainode/iotdb/ainode/manager/inference_manager.py @@ -0,0 +1,209 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +import pandas as pd +from torch import tensor + +from iotdb.ainode.constant import TSStatusCode +from iotdb.ainode.exception import InvalidWindowArgumentError, InferenceModelInternalError, runtime_error_extractor +from iotdb.ainode.log import Logger +from iotdb.ainode.manager.model_manager import ModelManager +from iotdb.ainode.util.serde import convert_to_binary, convert_to_df +from iotdb.ainode.util.status import get_status +from iotdb.thrift.ainode.ttypes import TInferenceReq, TInferenceResp + +logger = Logger() + + +class InferenceManager: + @staticmethod + def inference(req: TInferenceReq, model_manager: ModelManager): + logger.info(f"start inference registered model {req.modelId}") + try: + model_id, full_data, window_interval, window_step, inference_attributes = _parse_inference_request(req) + + if model_id.startswith('_'): + # built-in models + logger.info(f"start inference built-in model {model_id}") + # parse the inference attributes and create the built-in model + model = _get_built_in_model(model_id, model_manager, inference_attributes) + inference_results = _inference_with_built_in_model( + model, full_data) + else: + # user-registered models + model = _get_model(model_id, model_manager, inference_attributes) + inference_results = _inference_with_registered_model( + model, full_data, window_interval, window_step) + for i in range(len(inference_results)): + inference_results[i] = convert_to_binary(inference_results[i]) + return TInferenceResp( + get_status( + TSStatusCode.SUCCESS_STATUS), + inference_results) + except Exception as e: + logger.warning(e) + inference_results = [] + return TInferenceResp(get_status(TSStatusCode.AINODE_INTERNAL_ERROR, str(e)), inference_results) + + +def _process_data(full_data): + """ + Args: + full_data: a tuple of (data, time_stamp, type_list, column_name_list), where the data is a DataFrame with shape + (L, C), time_stamp is a DataFrame with shape(L, 1), type_list is a list of data types with length C, + column_name_list is a list of column names with length C, where L is the number of data points, C is the + number of variables, the data and time_stamp are aligned by index + Returns: + data: a tensor with shape (1, L, C) + data_length: the number of data points + Description: + the process_data module will convert the input data into a tensor with shape (1, L, C), where L is the number of + data points, C is the number of variables, the data and time_stamp are aligned by index. The module will also + convert the data type of each column to the corresponding type. + """ + data, time_stamp, type_list, _ = full_data + data_length = time_stamp.shape[0] + data = data.fillna(0) + for i in range(len(type_list)): + if type_list[i] == "TEXT": + data[data.columns[i]] = 0 + elif type_list[i] == "BOOLEAN": + data[data.columns[i]] = data[data.columns[i]].astype("int") + data = tensor(data.values).unsqueeze(0) + return data, data_length + + +def _inference_with_registered_model(model, full_data, window_interval, window_step): + """ + Args: + model: the user-defined model + full_data: a tuple of (data, time_stamp, type_list, column_name_list), where the data is a DataFrame with shape + (L, C), time_stamp is a DataFrame with shape(L, 1), type_list is a list of data types with length C, + column_name_list is a list of column names with length C, where L is the number of data points, C is the + number of variables, the data and time_stamp are aligned by index + window_interval: the length of each sliding window + window_step: the step between two adjacent sliding windows + Returns: + outputs: a list of output DataFrames, where each DataFrame has shape (H', C'), where H' is the output window + interval, C' is the number of variables in the output DataFrame + Description: + the inference_with_registered_model function will inference with deep learning model, which is registered in + user register process. This module will split the input data into several sliding windows which has the same + shape (1, H, C), where H is the window interval, and then feed each sliding window into the model to get the + output, the output is a DataFrame with shape (H', C'), where H' is the output window interval, C' is the number + of variables in the output DataFrame. Then the inference module will concatenate all the output DataFrames into + a list. + """ + + dataset, dataset_length = _process_data(full_data) + + # check the validity of window_interval and window_step, the two arguments must be positive integers, and the + # window_interval should not be larger than the dataset length + if window_interval is None or window_step is None \ + or window_interval > dataset_length \ + or window_interval <= 0 or \ + window_step <= 0: + raise InvalidWindowArgumentError(window_interval, window_step, dataset_length) + + sliding_times = int((dataset_length - window_interval) // window_step + 1) + outputs = [] + try: + # split the input data into several sliding windows + for sliding_time in range(sliding_times): + if window_step == float('inf'): + start_index = 0 + else: + start_index = sliding_time * window_step + end_index = start_index + window_interval + # input_data: tensor, shape: (1, H, C), where H is input window interval + input_data = dataset[:, start_index:end_index, :] + # output: tensor, shape: (1, H', C'), where H' is the output window interval + output = model(input_data) + # output: DataFrame, shape: (H', C') + output = pd.DataFrame(output.squeeze(0).detach().numpy()) + outputs.append(output) + except Exception as e: + error_msg = runtime_error_extractor(str(e)) + if error_msg != "": + raise InferenceModelInternalError(error_msg) + raise InferenceModelInternalError(str(e)) + + return outputs + + +def _inference_with_built_in_model(model, full_data): + """ + Args: + model: the built-in model + full_data: a tuple of (data, time_stamp, type_list, column_name_list), where the data is a DataFrame with shape + (L, C), time_stamp is a DataFrame with shape(L, 1), type_list is a list of data types with length C, + column_name_list is a list of column names with length C, where L is the number of data points, C is the + number of variables, the data and time_stamp are aligned by index + Returns: + outputs: a list of output DataFrames, where each DataFrame has shape (H', C'), where H' is the output window + interval, C' is the number of variables in the output DataFrame + Description: + the inference_with_built_in_model function will inference with built-in model, which does not + require user registration. This module will parse the inference attributes and create the built-in model, then + feed the input data into the model to get the output, the output is a DataFrame with shape (H', C'), where H' + is the output window interval, C' is the number of variables in the output DataFrame. Then the inference module + will concatenate all the output DataFrames into a list. + """ + + data, _, _, _ = full_data + output = model.inference(data) + # output: DataFrame, shape: (H', C') + output = pd.DataFrame(output) + outputs = [output] + return outputs + + +def _get_model(model_id: str, model_manager: ModelManager, inference_attributes: {}): + if inference_attributes is None or 'acceleration' not in inference_attributes: + # if the acceleration is not specified, then the acceleration will be set to default value False + acceleration = False + else: + # if the acceleration is specified, then the acceleration will be set to the specified value + acceleration = (inference_attributes['acceleration'].lower() == 'true') + return model_manager.load_model(model_id, acceleration) + + +def _get_built_in_model(model_id: str, model_manager: ModelManager, inference_attributes: {}): + return model_manager.load_built_in_model(model_id, inference_attributes) + + +def _parse_inference_request(req: TInferenceReq): + binary_dataset = req.dataset + type_list = req.typeList + column_name_list = req.columnNameList + column_name_index = req.columnNameIndexMap + data = convert_to_df(column_name_list, type_list, column_name_index, [binary_dataset]) + time_stamp, data = data[data.columns[0:1]], data[data.columns[1:]] + full_data = (data, time_stamp, type_list, column_name_list) + inference_attributes = req.inferenceAttributes + if inference_attributes is None: + inference_attributes = {} + + window_params = req.windowParams + if window_params is None: + # set default window_step to infinity and window_interval to dataset length + window_step = float('inf') + window_interval = data.shape[0] + else: + window_step = window_params.windowStep + window_interval = window_params.windowInterval + return req.modelId, full_data, window_interval, window_step, inference_attributes diff --git a/iotdb-core/ainode/iotdb/ainode/manager/model_manager.py b/iotdb-core/ainode/iotdb/ainode/manager/model_manager.py new file mode 100644 index 0000000000000..1ccdea959985f --- /dev/null +++ b/iotdb-core/ainode/iotdb/ainode/manager/model_manager.py @@ -0,0 +1,84 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +from typing import Callable + +from yaml import YAMLError + +from iotdb.ainode.constant import TSStatusCode, BuiltInModelType +from iotdb.ainode.exception import InvalidUriError, BadConfigValueError, BuiltInModelNotSupportError +from iotdb.ainode.log import Logger +from iotdb.ainode.model.built_in_model_factory import fetch_built_in_model +from iotdb.ainode.model.model_storage import ModelStorage +from iotdb.ainode.util.status import get_status +from iotdb.thrift.ainode.ttypes import TRegisterModelReq, TRegisterModelResp, TDeleteModelReq +from iotdb.thrift.common.ttypes import TSStatus + +logger = Logger() + + +class ModelManager: + def __init__(self): + self.model_storage = ModelStorage() + + def register_model(self, req: TRegisterModelReq) -> TRegisterModelResp: + logger.info(f"register model {req.modelId} from {req.uri}") + try: + configs, attributes = self.model_storage.register_model(req.modelId, req.uri) + return TRegisterModelResp(get_status(TSStatusCode.SUCCESS_STATUS), configs, attributes) + except InvalidUriError as e: + logger.warning(e) + self.model_storage.delete_model(req.modelId) + return TRegisterModelResp(get_status(TSStatusCode.INVALID_URI_ERROR, e.message)) + except BadConfigValueError as e: + logger.warning(e) + self.model_storage.delete_model(req.modelId) + return TRegisterModelResp(get_status(TSStatusCode.INVALID_INFERENCE_CONFIG, e.message)) + except YAMLError as e: + logger.warning(e) + self.model_storage.delete_model(req.modelId) + if hasattr(e, 'problem_mark'): + mark = e.problem_mark + return TRegisterModelResp(get_status(TSStatusCode.INVALID_INFERENCE_CONFIG, + f"An error occurred while parsing the yaml file, " + f"at line {mark.line + 1} column {mark.column + 1}.")) + return TRegisterModelResp( + get_status(TSStatusCode.INVALID_INFERENCE_CONFIG, f"An error occurred while parsing the yaml file")) + except Exception as e: + logger.warning(e) + self.model_storage.delete_model(req.modelId) + return TRegisterModelResp(get_status(TSStatusCode.AINODE_INTERNAL_ERROR)) + + def delete_model(self, req: TDeleteModelReq) -> TSStatus: + logger.info(f"delete model {req.modelId}") + try: + self.model_storage.delete_model(req.modelId) + return get_status(TSStatusCode.SUCCESS_STATUS) + except Exception as e: + logger.warning(e) + return get_status(TSStatusCode.AINODE_INTERNAL_ERROR, str(e)) + + def load_model(self, model_id: str, acceleration: bool = False) -> Callable: + logger.info(f"load model {model_id}") + return self.model_storage.load_model(model_id, acceleration) + + @staticmethod + def load_built_in_model(model_id: str, attributes: {}): + model_id = model_id.lower() + if model_id not in BuiltInModelType.values(): + raise BuiltInModelNotSupportError(model_id) + return fetch_built_in_model(model_id, attributes) diff --git a/iotdb-core/ainode/iotdb/ainode/model/__init__.py b/iotdb-core/ainode/iotdb/ainode/model/__init__.py new file mode 100644 index 0000000000000..2a1e720805f29 --- /dev/null +++ b/iotdb-core/ainode/iotdb/ainode/model/__init__.py @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# diff --git a/iotdb-core/ainode/iotdb/ainode/model/built_in_model_factory.py b/iotdb-core/ainode/iotdb/ainode/model/built_in_model_factory.py new file mode 100644 index 0000000000000..82443012176b0 --- /dev/null +++ b/iotdb-core/ainode/iotdb/ainode/model/built_in_model_factory.py @@ -0,0 +1,924 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +from abc import abstractmethod +from typing import List, Dict + +import numpy as np +from sklearn.preprocessing import MinMaxScaler +from sktime.annotation.hmm_learn import GaussianHMM, GMMHMM +from sktime.annotation.stray import STRAY +from sktime.forecasting.arima import ARIMA +from sktime.forecasting.exp_smoothing import ExponentialSmoothing +from sktime.forecasting.naive import NaiveForecaster +from sktime.forecasting.trend import STLForecaster + +from iotdb.ainode.constant import AttributeName, BuiltInModelType +from iotdb.ainode.exception import InferenceModelInternalError, AttributeNotSupportError +from iotdb.ainode.exception import WrongAttributeTypeError, NumericalRangeException, StringRangeException, \ + ListRangeException, BuiltInModelNotSupportError +from iotdb.ainode.log import Logger + +logger = Logger() + + +def get_model_attributes(model_id: str): + if model_id == BuiltInModelType.ARIMA.value: + attribute_map = arima_attribute_map + elif model_id == BuiltInModelType.NAIVE_FORECASTER.value: + attribute_map = naive_forecaster_attribute_map + elif model_id == BuiltInModelType.EXPONENTIAL_SMOOTHING.value: + attribute_map = exponential_smoothing_attribute_map + elif model_id == BuiltInModelType.STL_FORECASTER.value: + attribute_map = stl_forecaster_attribute_map + elif model_id == BuiltInModelType.GMM_HMM.value: + attribute_map = gmmhmm_attribute_map + elif model_id == BuiltInModelType.GAUSSIAN_HMM.value: + attribute_map = gaussian_hmm_attribute_map + elif model_id == BuiltInModelType.STRAY.value: + attribute_map = stray_attribute_map + else: + raise BuiltInModelNotSupportError(model_id) + return attribute_map + + +def fetch_built_in_model(model_id, inference_attributes): + """ + Args: + model_id: the unique id of the model + inference_attributes: a list of attributes to be inferred, in this function, the attributes will include some + parameters of the built-in model. Some parameters are optional, and if the parameters are not + specified, the default value will be used. + Returns: + model: the built-in model + attributes: a dict of attributes, where the key is the attribute name, the value is the parsed value of the + attribute + Description: + the create_built_in_model function will create the built-in model, which does not require user + registration. This module will parse the inference attributes and create the built-in model. + """ + attribute_map = get_model_attributes(model_id) + + # validate the inference attributes + for attribute_name in inference_attributes: + if attribute_name not in attribute_map: + raise AttributeNotSupportError(model_id, attribute_name) + + # parse the inference attributes, attributes is a Dict[str, Any] + attributes = parse_attribute(inference_attributes, attribute_map) + + # build the built-in model + if model_id == BuiltInModelType.ARIMA.value: + model = ArimaModel(attributes) + elif model_id == BuiltInModelType.EXPONENTIAL_SMOOTHING.value: + model = ExponentialSmoothingModel(attributes) + elif model_id == BuiltInModelType.NAIVE_FORECASTER.value: + model = NaiveForecasterModel(attributes) + elif model_id == BuiltInModelType.STL_FORECASTER.value: + model = STLForecasterModel(attributes) + elif model_id == BuiltInModelType.GMM_HMM.value: + model = GMMHMMModel(attributes) + elif model_id == BuiltInModelType.GAUSSIAN_HMM.value: + model = GaussianHmmModel(attributes) + elif model_id == BuiltInModelType.STRAY.value: + model = STRAYModel(attributes) + else: + raise BuiltInModelNotSupportError(model_id) + + return model + + +class Attribute(object): + def __init__(self, name: str): + """ + Args: + name: the name of the attribute + """ + self._name = name + + @abstractmethod + def get_default_value(self): + raise NotImplementedError + + @abstractmethod + def validate_value(self, value): + raise NotImplementedError + + @abstractmethod + def parse(self, string_value: str): + raise NotImplementedError + + +class IntAttribute(Attribute): + def __init__(self, name: str, + default_value: int, + default_low: int, + default_high: int, + ): + super(IntAttribute, self).__init__(name) + self.__default_value = default_value + self.__default_low = default_low + self.__default_high = default_high + + def get_default_value(self): + return self.__default_value + + def validate_value(self, value): + if self.__default_low <= value <= self.__default_high: + return True + raise NumericalRangeException(self._name, value, self.__default_low, self.__default_high) + + def parse(self, string_value: str): + try: + int_value = int(string_value) + except: + raise WrongAttributeTypeError(self._name, "int") + return int_value + + +class FloatAttribute(Attribute): + def __init__(self, name: str, + default_value: float, + default_low: float, + default_high: float, + ): + super(FloatAttribute, self).__init__(name) + self.__default_value = default_value + self.__default_low = default_low + self.__default_high = default_high + + def get_default_value(self): + return self.__default_value + + def validate_value(self, value): + if self.__default_low <= value <= self.__default_high: + return True + raise NumericalRangeException(self._name, value, self.__default_low, self.__default_high) + + def parse(self, string_value: str): + try: + float_value = float(string_value) + except: + raise WrongAttributeTypeError(self._name, "float") + return float_value + + +class StringAttribute(Attribute): + def __init__(self, name: str, default_value: str, value_choices: List[str]): + super(StringAttribute, self).__init__(name) + self.__default_value = default_value + self.__value_choices = value_choices + + def get_default_value(self): + return self.__default_value + + def validate_value(self, value): + if value in self.__value_choices: + return True + raise StringRangeException(self._name, value, self.__value_choices) + + def parse(self, string_value: str): + return string_value + + +class BooleanAttribute(Attribute): + def __init__(self, name: str, default_value: bool): + super(BooleanAttribute, self).__init__(name) + self.__default_value = default_value + + def get_default_value(self): + return self.__default_value + + def validate_value(self, value): + if isinstance(value, bool): + return True + raise WrongAttributeTypeError(self._name, "bool") + + def parse(self, string_value: str): + if string_value.lower() == "true": + return True + elif string_value.lower() == "false": + return False + else: + raise WrongAttributeTypeError(self._name, "bool") + + +class ListAttribute(Attribute): + def __init__(self, name: str, default_value: List, value_type): + """ + value_type is the type of the elements in the list, e.g. int, float, str + """ + super(ListAttribute, self).__init__(name) + self.__default_value = default_value + self.__value_type = value_type + self.__type_to_str = {str: "str", int: "int", float: "float"} + + def get_default_value(self): + return self.__default_value + + def validate_value(self, value): + if not isinstance(value, list): + raise WrongAttributeTypeError(self._name, "list") + for value_item in value: + if not isinstance(value_item, self.__value_type): + raise WrongAttributeTypeError(self._name, self.__value_type) + return True + + def parse(self, string_value: str): + try: + list_value = eval(string_value) + except: + raise WrongAttributeTypeError(self._name, "list") + if not isinstance(list_value, list): + raise WrongAttributeTypeError(self._name, "list") + for i in range(len(list_value)): + try: + list_value[i] = self.__value_type(list_value[i]) + except: + raise ListRangeException(self._name, list_value, self.__type_to_str[self.__value_type]) + return list_value + + +class TupleAttribute(Attribute): + def __init__(self, name: str, default_value: tuple, value_type): + """ + value_type is the type of the elements in the list, e.g. int, float, str + """ + super(TupleAttribute, self).__init__(name) + self.__default_value = default_value + self.__value_type = value_type + self.__type_to_str = {str: "str", int: "int", float: "float"} + + def get_default_value(self): + return self.__default_value + + def validate_value(self, value): + if not isinstance(value, tuple): + raise WrongAttributeTypeError(self._name, "tuple") + for value_item in value: + if not isinstance(value_item, self.__value_type): + raise WrongAttributeTypeError(self._name, self.__value_type) + return True + + def parse(self, string_value: str): + try: + tuple_value = eval(string_value) + except: + raise WrongAttributeTypeError(self._name, "tuple") + if not isinstance(tuple_value, tuple): + raise WrongAttributeTypeError(self._name, "tuple") + list_value = list(tuple_value) + for i in range(len(list_value)): + try: + list_value[i] = self.__value_type(list_value[i]) + except: + raise ListRangeException(self._name, list_value, self.__type_to_str[self.__value_type]) + tuple_value = tuple(list_value) + return tuple_value + + +def parse_attribute(input_attributes: Dict[str, str], attribute_map: Dict[str, Attribute]): + """ + Args: + input_attributes: a dict of attributes, where the key is the attribute name, the value is the string value of + the attribute + attribute_map: a dict of hyperparameters, where the key is the attribute name, the value is the Attribute + object + Returns: + a dict of attributes, where the key is the attribute name, the value is the parsed value of the attribute + """ + attributes = {} + for attribute_name in attribute_map: + # user specified the attribute + if attribute_name in input_attributes: + attribute = attribute_map[attribute_name] + value = attribute.parse(input_attributes[attribute_name]) + attribute.validate_value(value) + attributes[attribute_name] = value + # user did not specify the attribute, use the default value + else: + try: + attributes[attribute_name] = attribute_map[attribute_name].get_default_value() + except NotImplementedError as e: + logger.error(f"attribute {attribute_name} is not implemented.") + raise e + return attributes + + +# built-in sktime model attributes +# NaiveForecaster +naive_forecaster_attribute_map = { + AttributeName.PREDICT_LENGTH.value: IntAttribute( + name=AttributeName.PREDICT_LENGTH.value, + default_value=1, + default_low=1, + default_high=5000 + ), + AttributeName.STRATEGY.value: StringAttribute( + name=AttributeName.STRATEGY.value, + default_value="last", + value_choices=["last", "mean"], + ), + AttributeName.SP.value: IntAttribute( + name=AttributeName.SP.value, + default_value=1, + default_low=1, + default_high=5000 + ), +} +# ExponentialSmoothing +exponential_smoothing_attribute_map = { + AttributeName.PREDICT_LENGTH.value: IntAttribute( + name=AttributeName.PREDICT_LENGTH.value, + default_value=1, + default_low=1, + default_high=5000 + ), + AttributeName.DAMPED_TREND.value: BooleanAttribute( + name=AttributeName.DAMPED_TREND.value, + default_value=False, + ), + AttributeName.INITIALIZATION_METHOD.value: StringAttribute( + name=AttributeName.INITIALIZATION_METHOD.value, + default_value="estimated", + value_choices=["estimated", "heuristic", "legacy-heuristic", "known"], + ), + AttributeName.OPTIMIZED.value: BooleanAttribute( + name=AttributeName.OPTIMIZED.value, + default_value=True, + ), + AttributeName.REMOVE_BIAS.value: BooleanAttribute( + name=AttributeName.REMOVE_BIAS.value, + default_value=False, + ), + AttributeName.USE_BRUTE.value: BooleanAttribute( + name=AttributeName.USE_BRUTE.value, + default_value=False, + ) +} +# Arima +arima_attribute_map = { + AttributeName.PREDICT_LENGTH.value: IntAttribute( + name=AttributeName.PREDICT_LENGTH.value, + default_value=1, + default_low=1, + default_high=5000 + ), + AttributeName.ORDER.value: TupleAttribute( + name=AttributeName.ORDER.value, + default_value=(1, 0, 0), + value_type=int + ), + AttributeName.SEASONAL_ORDER.value: TupleAttribute( + name=AttributeName.SEASONAL_ORDER.value, + default_value=(0, 0, 0, 0), + value_type=int + ), + AttributeName.METHOD.value: StringAttribute( + name=AttributeName.METHOD.value, + default_value="lbfgs", + value_choices=["lbfgs", "bfgs", "newton", "nm", "cg", "ncg", "powell"], + ), + AttributeName.MAXITER.value: IntAttribute( + name=AttributeName.MAXITER.value, + default_value=1, + default_low=1, + default_high=5000 + ), + AttributeName.SUPPRESS_WARNINGS.value: BooleanAttribute( + name=AttributeName.SUPPRESS_WARNINGS.value, + default_value=True, + ), + AttributeName.OUT_OF_SAMPLE_SIZE.value: IntAttribute( + name=AttributeName.OUT_OF_SAMPLE_SIZE.value, + default_value=0, + default_low=0, + default_high=5000 + ), + AttributeName.SCORING.value: StringAttribute( + name=AttributeName.SCORING.value, + default_value="mse", + value_choices=["mse", "mae", "rmse", "mape", "smape", "rmsle", "r2"], + ), + AttributeName.WITH_INTERCEPT.value: BooleanAttribute( + name=AttributeName.WITH_INTERCEPT.value, + default_value=True, + ), + AttributeName.TIME_VARYING_REGRESSION.value: BooleanAttribute( + name=AttributeName.TIME_VARYING_REGRESSION.value, + default_value=False, + ), + AttributeName.ENFORCE_STATIONARITY.value: BooleanAttribute( + name=AttributeName.ENFORCE_STATIONARITY.value, + default_value=True, + ), + AttributeName.ENFORCE_INVERTIBILITY.value: BooleanAttribute( + name=AttributeName.ENFORCE_INVERTIBILITY.value, + default_value=True, + ), + AttributeName.SIMPLE_DIFFERENCING.value: BooleanAttribute( + name=AttributeName.SIMPLE_DIFFERENCING.value, + default_value=False, + ), + AttributeName.MEASUREMENT_ERROR.value: BooleanAttribute( + name=AttributeName.MEASUREMENT_ERROR.value, + default_value=False, + ), + AttributeName.MLE_REGRESSION.value: BooleanAttribute( + name=AttributeName.MLE_REGRESSION.value, + default_value=True, + ), + AttributeName.HAMILTON_REPRESENTATION.value: BooleanAttribute( + name=AttributeName.HAMILTON_REPRESENTATION.value, + default_value=False, + ), + AttributeName.CONCENTRATE_SCALE.value: BooleanAttribute( + name=AttributeName.CONCENTRATE_SCALE.value, + default_value=False, + ) +} +# STLForecaster +stl_forecaster_attribute_map = { + AttributeName.PREDICT_LENGTH.value: IntAttribute( + name=AttributeName.PREDICT_LENGTH.value, + default_value=1, + default_low=1, + default_high=5000 + ), + AttributeName.SP.value: IntAttribute( + name=AttributeName.SP.value, + default_value=2, + default_low=1, + default_high=5000 + ), + AttributeName.SEASONAL.value: IntAttribute( + name=AttributeName.SEASONAL.value, + default_value=7, + default_low=1, + default_high=5000 + ), + AttributeName.SEASONAL_DEG.value: IntAttribute( + name=AttributeName.SEASONAL_DEG.value, + default_value=1, + default_low=0, + default_high=5000 + ), + AttributeName.TREND_DEG.value: IntAttribute( + name=AttributeName.TREND_DEG.value, + default_value=1, + default_low=0, + default_high=5000 + ), + AttributeName.LOW_PASS_DEG.value: IntAttribute( + name=AttributeName.LOW_PASS_DEG.value, + default_value=1, + default_low=0, + default_high=5000 + ), + AttributeName.SEASONAL_JUMP.value: IntAttribute( + name=AttributeName.SEASONAL_JUMP.value, + default_value=1, + default_low=0, + default_high=5000 + ), + AttributeName.TREND_JUMP.value: IntAttribute( + name=AttributeName.TREND_JUMP.value, + default_value=1, + default_low=0, + default_high=5000 + ), + AttributeName.LOSS_PASS_JUMP.value: IntAttribute( + name=AttributeName.LOSS_PASS_JUMP.value, + default_value=1, + default_low=0, + default_high=5000 + ), +} + +# GAUSSIAN_HMM +gaussian_hmm_attribute_map = { + AttributeName.N_COMPONENTS.value: IntAttribute( + name=AttributeName.N_COMPONENTS.value, + default_value=1, + default_low=1, + default_high=5000 + ), + AttributeName.COVARIANCE_TYPE.value: StringAttribute( + name=AttributeName.COVARIANCE_TYPE.value, + default_value="diag", + value_choices=["spherical", "diag", "full", "tied"], + ), + AttributeName.MIN_COVAR.value: FloatAttribute( + name=AttributeName.MIN_COVAR.value, + default_value=1e-3, + default_low=-1e10, + default_high=1e10, + ), + AttributeName.STARTPROB_PRIOR.value: FloatAttribute( + name=AttributeName.STARTPROB_PRIOR.value, + default_value=1.0, + default_low=-1e10, + default_high=1e10, + ), + AttributeName.TRANSMAT_PRIOR.value: FloatAttribute( + name=AttributeName.TRANSMAT_PRIOR.value, + default_value=1.0, + default_low=-1e10, + default_high=1e10, + ), + AttributeName.MEANS_PRIOR.value: FloatAttribute( + name=AttributeName.MEANS_PRIOR.value, + default_value=0.0, + default_low=-1e10, + default_high=1e10, + ), + AttributeName.MEANS_WEIGHT.value: FloatAttribute( + name=AttributeName.MEANS_WEIGHT.value, + default_value=0.0, + default_low=-1e10, + default_high=1e10, + ), + AttributeName.COVARS_PRIOR.value: FloatAttribute( + name=AttributeName.COVARS_PRIOR.value, + default_value=1e-2, + default_low=-1e10, + default_high=1e10, + ), + AttributeName.COVARS_WEIGHT.value: FloatAttribute( + name=AttributeName.COVARS_WEIGHT.value, + default_value=1.0, + default_low=-1e10, + default_high=1e10, + ), + AttributeName.ALGORITHM.value: StringAttribute( + name=AttributeName.ALGORITHM.value, + default_value="viterbi", + value_choices=["viterbi", "map"], + ), + AttributeName.N_ITER.value: IntAttribute( + name=AttributeName.N_ITER.value, + default_value=10, + default_low=1, + default_high=5000 + ), + AttributeName.TOL.value: FloatAttribute( + name=AttributeName.TOL.value, + default_value=1e-2, + default_low=-1e10, + default_high=1e10, + ), + AttributeName.PARAMS.value: StringAttribute( + name=AttributeName.PARAMS.value, + default_value="stmc", + value_choices=["stmc", "stm"], + ), + AttributeName.INIT_PARAMS.value: StringAttribute( + name=AttributeName.INIT_PARAMS.value, + default_value="stmc", + value_choices=["stmc", "stm"], + ), + AttributeName.IMPLEMENTATION.value: StringAttribute( + name=AttributeName.IMPLEMENTATION.value, + default_value="log", + value_choices=["log", "scaling"], + ) +} + +# GMMHMM +gmmhmm_attribute_map = { + AttributeName.N_COMPONENTS.value: IntAttribute( + name=AttributeName.N_COMPONENTS.value, + default_value=1, + default_low=1, + default_high=5000 + ), + AttributeName.N_MIX.value: IntAttribute( + name=AttributeName.N_MIX.value, + default_value=1, + default_low=1, + default_high=5000 + ), + AttributeName.MIN_COVAR.value: FloatAttribute( + name=AttributeName.MIN_COVAR.value, + default_value=1e-3, + default_low=-1e10, + default_high=1e10, + ), + AttributeName.STARTPROB_PRIOR.value: FloatAttribute( + name=AttributeName.STARTPROB_PRIOR.value, + default_value=1.0, + default_low=-1e10, + default_high=1e10, + ), + AttributeName.TRANSMAT_PRIOR.value: FloatAttribute( + name=AttributeName.TRANSMAT_PRIOR.value, + default_value=1.0, + default_low=-1e10, + default_high=1e10, + ), + AttributeName.WEIGHTS_PRIOR.value: FloatAttribute( + name=AttributeName.WEIGHTS_PRIOR.value, + default_value=1.0, + default_low=-1e10, + default_high=1e10, + ), + AttributeName.MEANS_PRIOR.value: FloatAttribute( + name=AttributeName.MEANS_PRIOR.value, + default_value=0.0, + default_low=-1e10, + default_high=1e10, + ), + AttributeName.MEANS_WEIGHT.value: FloatAttribute( + name=AttributeName.MEANS_WEIGHT.value, + default_value=0.0, + default_low=-1e10, + default_high=1e10, + ), + AttributeName.ALGORITHM.value: StringAttribute( + name=AttributeName.ALGORITHM.value, + default_value="viterbi", + value_choices=["viterbi", "map"], + ), + AttributeName.COVARIANCE_TYPE.value: StringAttribute( + name=AttributeName.COVARIANCE_TYPE.value, + default_value="diag", + value_choices=["sperical", "diag", "full", "tied"], + ), + AttributeName.N_ITER.value: IntAttribute( + name=AttributeName.N_ITER.value, + default_value=10, + default_low=1, + default_high=5000 + ), + AttributeName.TOL.value: FloatAttribute( + name=AttributeName.TOL.value, + default_value=1e-2, + default_low=-1e10, + default_high=1e10, + ), + AttributeName.INIT_PARAMS.value: StringAttribute( + name=AttributeName.INIT_PARAMS.value, + default_value="stmcw", + value_choices=["s", "t", "m", "c", "w", "st", "sm", "sc", "sw", "tm", "tc", "tw", "mc", "mw", "cw", "stm", + "stc", "stw", "smc", "smw", "scw", "tmc", "tmw", "tcw", "mcw", "stmc", "stmw", "stcw", "smcw", + "tmcw", "stmcw"] + ), + AttributeName.PARAMS.value: StringAttribute( + name=AttributeName.PARAMS.value, + default_value="stmcw", + value_choices=["s", "t", "m", "c", "w", "st", "sm", "sc", "sw", "tm", "tc", "tw", "mc", "mw", "cw", "stm", + "stc", "stw", "smc", "smw", "scw", "tmc", "tmw", "tcw", "mcw", "stmc", "stmw", "stcw", "smcw", + "tmcw", "stmcw"] + ), + AttributeName.IMPLEMENTATION.value: StringAttribute( + name=AttributeName.IMPLEMENTATION.value, + default_value="log", + value_choices=["log", "scaling"], + ) +} + +# STRAY +stray_attribute_map = { + AttributeName.ALPHA.value: FloatAttribute( + name=AttributeName.ALPHA.value, + default_value=0.01, + default_low=-1e10, + default_high=1e10, + ), + AttributeName.K.value: IntAttribute( + name=AttributeName.K.value, + default_value=10, + default_low=1, + default_high=5000 + ), + AttributeName.KNN_ALGORITHM.value: StringAttribute( + name=AttributeName.KNN_ALGORITHM.value, + default_value="brute", + value_choices=["brute", "kd_tree", "ball_tree", "auto"], + ), + AttributeName.P.value: FloatAttribute( + name=AttributeName.P.value, + default_value=0.5, + default_low=-1e10, + default_high=1e10, + ), + AttributeName.SIZE_THRESHOLD.value: IntAttribute( + name=AttributeName.SIZE_THRESHOLD.value, + default_value=50, + default_low=1, + default_high=5000 + ), + AttributeName.OUTLIER_TAIL.value: StringAttribute( + name=AttributeName.OUTLIER_TAIL.value, + default_value="max", + value_choices=["min", "max"], + ), +} + + +class BuiltInModel(object): + def __init__(self, attributes): + self._attributes = attributes + self._model = None + + @abstractmethod + def inference(self, data): + raise NotImplementedError + + +class ArimaModel(BuiltInModel): + def __init__(self, attributes): + super(ArimaModel, self).__init__(attributes) + self._model = ARIMA( + order=attributes['order'], + seasonal_order=attributes['seasonal_order'], + method=attributes['method'], + suppress_warnings=attributes['suppress_warnings'], + maxiter=attributes['maxiter'], + out_of_sample_size=attributes['out_of_sample_size'], + scoring=attributes['scoring'], + with_intercept=attributes['with_intercept'], + time_varying_regression=attributes['time_varying_regression'], + enforce_stationarity=attributes['enforce_stationarity'], + enforce_invertibility=attributes['enforce_invertibility'], + simple_differencing=attributes['simple_differencing'], + measurement_error=attributes['measurement_error'], + mle_regression=attributes['mle_regression'], + hamilton_representation=attributes['hamilton_representation'], + concentrate_scale=attributes['concentrate_scale'] + ) + + def inference(self, data): + try: + predict_length = self._attributes['predict_length'] + self._model.fit(data) + output = self._model.predict(fh=range(predict_length)) + output = np.array(output, dtype=np.float64) + return output + except Exception as e: + raise InferenceModelInternalError(str(e)) + + +class ExponentialSmoothingModel(BuiltInModel): + def __init__(self, attributes): + super(ExponentialSmoothingModel, self).__init__(attributes) + self._model = ExponentialSmoothing( + damped_trend=attributes['damped_trend'], + initialization_method=attributes['initialization_method'], + optimized=attributes['optimized'], + remove_bias=attributes['remove_bias'], + use_brute=attributes['use_brute'] + ) + + def inference(self, data): + try: + predict_length = self._attributes['predict_length'] + self._model.fit(data) + output = self._model.predict(fh=range(predict_length)) + output = np.array(output, dtype=np.float64) + return output + except Exception as e: + raise InferenceModelInternalError(str(e)) + + +class NaiveForecasterModel(BuiltInModel): + def __init__(self, attributes): + super(NaiveForecasterModel, self).__init__(attributes) + self._model = NaiveForecaster( + strategy=attributes['strategy'], + sp=attributes['sp'] + ) + + def inference(self, data): + try: + predict_length = self._attributes['predict_length'] + self._model.fit(data) + output = self._model.predict(fh=range(predict_length)) + output = np.array(output, dtype=np.float64) + return output + except Exception as e: + raise InferenceModelInternalError(str(e)) + + +class STLForecasterModel(BuiltInModel): + def __init__(self, attributes): + super(STLForecasterModel, self).__init__(attributes) + self._model = STLForecaster( + sp=attributes['sp'], + seasonal=attributes['seasonal'], + seasonal_deg=attributes['seasonal_deg'], + trend_deg=attributes['trend_deg'], + low_pass_deg=attributes['low_pass_deg'], + seasonal_jump=attributes['seasonal_jump'], + trend_jump=attributes['trend_jump'], + low_pass_jump=attributes['low_pass_jump'] + ) + + def inference(self, data): + try: + predict_length = self._attributes['predict_length'] + self._model.fit(data) + output = self._model.predict(fh=range(predict_length)) + output = np.array(output, dtype=np.float64) + return output + except Exception as e: + raise InferenceModelInternalError(str(e)) + + +class GMMHMMModel(BuiltInModel): + def __init__(self, attributes): + super(GMMHMMModel, self).__init__(attributes) + self._model = GMMHMM( + n_components=attributes['n_components'], + n_mix=attributes['n_mix'], + min_covar=attributes['min_covar'], + startprob_prior=attributes['startprob_prior'], + transmat_prior=attributes['transmat_prior'], + means_prior=attributes['means_prior'], + means_weight=attributes['means_weight'], + weights_prior=attributes['weights_prior'], + algorithm=attributes['algorithm'], + covariance_type=attributes['covariance_type'], + n_iter=attributes['n_iter'], + tol=attributes['tol'], + params=attributes['params'], + init_params=attributes['init_params'], + implementation=attributes['implementation'] + ) + + def inference(self, data): + try: + self._model.fit(data) + output = self._model.predict(data) + output = np.array(output, dtype=np.int32) + return output + except Exception as e: + raise InferenceModelInternalError(str(e)) + + +class GaussianHmmModel(BuiltInModel): + def __init__(self, attributes): + super(GaussianHmmModel, self).__init__(attributes) + self._model = GaussianHMM( + n_components=attributes['n_components'], + covariance_type=attributes['covariance_type'], + min_covar=attributes['min_covar'], + startprob_prior=attributes['startprob_prior'], + transmat_prior=attributes['transmat_prior'], + means_prior=attributes['means_prior'], + means_weight=attributes['means_weight'], + covars_prior=attributes['covars_prior'], + covars_weight=attributes['covars_weight'], + algorithm=attributes['algorithm'], + n_iter=attributes['n_iter'], + tol=attributes['tol'], + params=attributes['params'], + init_params=attributes['init_params'], + implementation=attributes['implementation'] + ) + + def inference(self, data): + try: + self._model.fit(data) + output = self._model.predict(data) + output = np.array(output, dtype=np.int32) + return output + except Exception as e: + raise InferenceModelInternalError(str(e)) + + +class STRAYModel(BuiltInModel): + def __init__(self, attributes): + super(STRAYModel, self).__init__(attributes) + self._model = STRAY( + alpha=attributes['alpha'], + k=attributes['k'], + knn_algorithm=attributes['knn_algorithm'], + p=attributes['p'], + size_threshold=attributes['size_threshold'], + outlier_tail=attributes['outlier_tail'] + ) + + def inference(self, data): + try: + data = MinMaxScaler().fit_transform(data) + output = self._model.fit_transform(data) + # change the output to int + output = np.array(output, dtype=np.int32) + return output + except Exception as e: + raise InferenceModelInternalError(str(e)) diff --git a/iotdb-core/ainode/iotdb/ainode/model/model_factory.py b/iotdb-core/ainode/iotdb/ainode/model/model_factory.py new file mode 100644 index 0000000000000..a163cf1fa8442 --- /dev/null +++ b/iotdb-core/ainode/iotdb/ainode/model/model_factory.py @@ -0,0 +1,235 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +import os +import shutil +from urllib.parse import urlparse, urljoin + +import yaml +from requests import Session +from requests.adapters import HTTPAdapter + +from iotdb.ainode.constant import DEFAULT_RECONNECT_TIMES, DEFAULT_RECONNECT_TIMEOUT, DEFAULT_CHUNK_SIZE, \ + DEFAULT_CONFIG_FILE_NAME, DEFAULT_MODEL_FILE_NAME +from iotdb.ainode.exception import InvalidUriError, BadConfigValueError +from iotdb.ainode.log import Logger +from iotdb.ainode.util.serde import get_data_type_byte_from_str +from iotdb.thrift.ainode.ttypes import TConfigs + +HTTP_PREFIX = "http://" +HTTPS_PREFIX = "https://" + +logger = Logger() + + +def _parse_uri(uri): + """ + Args: + uri (str): uri to parse + Returns: + is_network_path (bool): True if the url is a network path, False otherwise + parsed_uri (str): parsed uri to get related file + """ + + parse_result = urlparse(uri) + is_network_path = parse_result.scheme in ('http', 'https') + if is_network_path: + return True, uri + + # handle file:// in uri + if parse_result.scheme == 'file': + uri = uri[7:] + + # handle ~ in uri + uri = os.path.expanduser(uri) + return False, uri + + +def _download_file(url: str, storage_path: str) -> None: + """ + Args: + url: url of file to download + storage_path: path to save the file + Returns: + None + """ + logger.debug(f"download file from {url} to {storage_path}") + + session = Session() + adapter = HTTPAdapter(max_retries=DEFAULT_RECONNECT_TIMES) + session.mount(HTTP_PREFIX, adapter) + session.mount(HTTPS_PREFIX, adapter) + + response = session.get(url, timeout=DEFAULT_RECONNECT_TIMEOUT, stream=True) + response.raise_for_status() + + with open(storage_path, 'wb') as file: + for chunk in response.iter_content(chunk_size=DEFAULT_CHUNK_SIZE): + if chunk: + file.write(chunk) + + logger.debug(f"download file from {url} to {storage_path} success") + + +def _register_model_from_network(uri: str, model_storage_path: str, + config_storage_path: str) -> [TConfigs, str]: + """ + Args: + uri: network dir path of model to register, where model.pt and config.yaml are required, + e.g. https://huggingface.co/user/modelname/resolve/main/ + model_storage_path: path to save model.pt + config_storage_path: path to save config.yaml + Returns: + configs: TConfigs + attributes: str + """ + # concat uri to get complete url + uri = uri if uri.endswith("/") else uri + "/" + target_model_path = urljoin(uri, DEFAULT_MODEL_FILE_NAME) + target_config_path = urljoin(uri, DEFAULT_CONFIG_FILE_NAME) + + # download config file + _download_file(target_config_path, config_storage_path) + + # read and parse config dict from config.yaml + with open(config_storage_path, 'r', encoding='utf-8') as file: + config_dict = yaml.safe_load(file) + configs, attributes = _parse_inference_config(config_dict) + + # if config.yaml is correct, download model file + _download_file(target_model_path, model_storage_path) + return configs, attributes + + +def _register_model_from_local(uri: str, model_storage_path: str, + config_storage_path: str) -> [TConfigs, str]: + """ + Args: + uri: local dir path of model to register, where model.pt and config.yaml are required, + e.g. /Users/admin/Desktop/model + model_storage_path: path to save model.pt + config_storage_path: path to save config.yaml + Returns: + configs: TConfigs + attributes: str + """ + # concat uri to get complete path + target_model_path = os.path.join(uri, DEFAULT_MODEL_FILE_NAME) + target_config_path = os.path.join(uri, DEFAULT_CONFIG_FILE_NAME) + + # check if file exist + exist_model_file = os.path.exists(target_model_path) + exist_config_file = os.path.exists(target_config_path) + + configs = None + attributes = None + if exist_model_file and exist_config_file: + # copy config.yaml + logger.debug(f"copy file from {target_config_path} to {config_storage_path}") + shutil.copy(target_config_path, config_storage_path) + logger.debug(f"copy file from {target_config_path} to {config_storage_path} success") + + # read and parse config dict from config.yaml + with open(config_storage_path, 'r', encoding='utf-8') as file: + config_dict = yaml.safe_load(file) + configs, attributes = _parse_inference_config(config_dict) + + # if config.yaml is correct, copy model file + logger.debug(f"copy file from {target_model_path} to {model_storage_path}") + shutil.copy(target_model_path, model_storage_path) + logger.debug(f"copy file from {target_model_path} to {model_storage_path} success") + + elif not exist_model_file or not exist_config_file: + raise InvalidUriError(uri) + + return configs, attributes + + +def _parse_inference_config(config_dict): + """ + Args: + config_dict: dict + - configs: dict + - input_shape (list): input shape of the model and needs to be two-dimensional array like [96, 2] + - output_shape (list): output shape of the model and needs to be two-dimensional array like [96, 2] + - input_type (list): input type of the model and each element needs to be in ['bool', 'int32', 'int64', 'float32', 'float64', 'text'], default float64 + - output_type (list): output type of the model and each element needs to be in ['bool', 'int32', 'int64', 'float32', 'float64', 'text'], default float64 + - attributes: dict + Returns: + configs: TConfigs + attributes: str + """ + configs = config_dict['configs'] + + # check if input_shape and output_shape are two-dimensional array + if not (isinstance(configs['input_shape'], list) and len(configs['input_shape']) == 2): + raise BadConfigValueError('input_shape', configs['input_shape'], + 'input_shape should be a two-dimensional array.') + if not (isinstance(configs['output_shape'], list) and len(configs['output_shape']) == 2): + raise BadConfigValueError('output_shape', configs['output_shape'], + 'output_shape should be a two-dimensional array.') + + # check if input_shape and output_shape are positive integer + input_shape_is_positive_number = isinstance(configs['input_shape'][0], int) and isinstance( + configs['input_shape'][1], int) and configs['input_shape'][0] > 0 and configs['input_shape'][1] > 0 + if not input_shape_is_positive_number: + raise BadConfigValueError('input_shape', configs['input_shape'], + 'element in input_shape should be positive integer.') + + output_shape_is_positive_number = isinstance(configs['output_shape'][0], int) and isinstance( + configs['output_shape'][1], int) and configs['output_shape'][0] > 0 and configs['output_shape'][1] > 0 + if not output_shape_is_positive_number: + raise BadConfigValueError('output_shape', configs['output_shape'], + 'element in output_shape should be positive integer.') + + # check if input_type and output_type are one-dimensional array with right length + if 'input_type' in configs and not ( + isinstance(configs['input_type'], list) and len(configs['input_type']) == configs['input_shape'][1]): + raise BadConfigValueError('input_type', configs['input_type'], + 'input_type should be a one-dimensional array and length of it should be equal to input_shape[1].') + + if 'output_type' in configs and not ( + isinstance(configs['output_type'], list) and len(configs['output_type']) == configs['output_shape'][1]): + raise BadConfigValueError('output_type', configs['output_type'], + 'output_type should be a one-dimensional array and length of it should be equal to output_shape[1].') + + # parse input_type and output_type to byte + if 'input_type' in configs: + input_type = [get_data_type_byte_from_str(x) for x in configs['input_type']] + else: + input_type = [get_data_type_byte_from_str('float32')] * configs['input_shape'][1] + + if 'output_type' in configs: + output_type = [get_data_type_byte_from_str(x) for x in configs['output_type']] + else: + output_type = [get_data_type_byte_from_str('float32')] * configs['output_shape'][1] + + # parse attributes + attributes = "" + if 'attributes' in config_dict: + attributes = str(config_dict['attributes']) + + return TConfigs(configs['input_shape'], configs['output_shape'], input_type, output_type), attributes + + +def fetch_model_by_uri(uri: str, model_storage_path: str, config_storage_path: str): + is_network_path, uri = _parse_uri(uri) + + if is_network_path: + return _register_model_from_network(uri, model_storage_path, config_storage_path) + else: + return _register_model_from_local(uri, model_storage_path, config_storage_path) diff --git a/iotdb-core/ainode/iotdb/ainode/model/model_storage.py b/iotdb-core/ainode/iotdb/ainode/model/model_storage.py new file mode 100644 index 0000000000000..9a1df5f9c2000 --- /dev/null +++ b/iotdb-core/ainode/iotdb/ainode/model/model_storage.py @@ -0,0 +1,113 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import os +import shutil +from collections.abc import Callable + +import torch +import torch._dynamo +from pylru import lrucache + +from iotdb.ainode.config import AINodeDescriptor +from iotdb.ainode.constant import (DEFAULT_MODEL_FILE_NAME, + DEFAULT_CONFIG_FILE_NAME) +from iotdb.ainode.exception import ModelNotExistError +from iotdb.ainode.log import Logger +from iotdb.ainode.model.model_factory import fetch_model_by_uri +from iotdb.ainode.util.lock import ModelLockPool + +logger = Logger() + + +class ModelStorage(object): + def __init__(self): + self._model_dir = os.path.join(os.getcwd(), AINodeDescriptor().get_config().get_ain_models_dir()) + if not os.path.exists(self._model_dir): + try: + os.makedirs(self._model_dir) + except PermissionError as e: + logger.error(e) + raise e + self._lock_pool = ModelLockPool() + self._model_cache = lrucache(AINodeDescriptor().get_config().get_ain_model_storage_cache_size()) + + def register_model(self, model_id: str, uri: str): + """ + Args: + model_id: id of model to register + uri: network dir path or local dir path of model to register, where model.pt and config.yaml are required, + e.g. https://huggingface.co/user/modelname/resolve/main/ or /Users/admin/Desktop/model + Returns: + configs: TConfigs + attributes: str + """ + storage_path = os.path.join(self._model_dir, f'{model_id}') + # create storage dir if not exist + if not os.path.exists(storage_path): + os.makedirs(storage_path) + model_storage_path = os.path.join(storage_path, DEFAULT_MODEL_FILE_NAME) + config_storage_path = os.path.join(storage_path, DEFAULT_CONFIG_FILE_NAME) + return fetch_model_by_uri(uri, model_storage_path, config_storage_path) + + def load_model(self, model_id: str, acceleration: bool) -> Callable: + """ + Returns: + model: a ScriptModule contains model architecture and parameters, which can be deployed cross-platform + """ + ain_models_dir = os.path.join(self._model_dir, f'{model_id}') + model_path = os.path.join(ain_models_dir, DEFAULT_MODEL_FILE_NAME) + with self._lock_pool.get_lock(model_id).read_lock(): + if model_path in self._model_cache: + model = self._model_cache[model_path] + if isinstance(model, torch._dynamo.eval_frame.OptimizedModule) or not acceleration: + return model + else: + model = torch.compile(model) + self._model_cache[model_path] = model + return model + else: + if not os.path.exists(model_path): + raise ModelNotExistError(model_path) + else: + model = torch.jit.load(model_path) + if acceleration: + try: + model = torch.compile(model) + except Exception as e: + logger.warning(f"acceleration failed, fallback to normal mode: {str(e)}") + self._model_cache[model_path] = model + return model + + def delete_model(self, model_id: str) -> None: + """ + Args: + model_id: id of model to delete + Returns: + None + """ + storage_path = os.path.join(self._model_dir, f'{model_id}') + with self._lock_pool.get_lock(model_id).write_lock(): + if os.path.exists(storage_path): + for file_name in os.listdir(storage_path): + self._remove_from_cache(os.path.join(storage_path, file_name)) + shutil.rmtree(storage_path) + + def _remove_from_cache(self, file_path: str) -> None: + if file_path in self._model_cache: + del self._model_cache[file_path] diff --git a/iotdb-core/ainode/iotdb/ainode/script.py b/iotdb-core/ainode/iotdb/ainode/script.py new file mode 100644 index 0000000000000..e06a3fe77e045 --- /dev/null +++ b/iotdb-core/ainode/iotdb/ainode/script.py @@ -0,0 +1,177 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +import os +import shutil +import sys +from datetime import datetime + +import psutil + +from iotdb.ainode.client import ClientManager +from iotdb.ainode.config import AINodeDescriptor +from iotdb.ainode.constant import TSStatusCode, AINODE_SYSTEM_FILE_NAME +from iotdb.ainode.exception import MissingConfigError +from iotdb.ainode.log import Logger +from iotdb.ainode.service import RPCService +from iotdb.thrift.common.ttypes import TAINodeLocation, TEndPoint, TAINodeConfiguration, TNodeResource +from iotdb.thrift.confignode.ttypes import TNodeVersionInfo + +logger = Logger() + + +def _generate_configuration() -> TAINodeConfiguration: + location = TAINodeLocation(AINodeDescriptor().get_config().get_ainode_id(), + TEndPoint(AINodeDescriptor().get_config().get_ain_inference_rpc_address(), + AINodeDescriptor().get_config().get_ain_inference_rpc_port())) + resource = TNodeResource( + int(psutil.cpu_count()), + int(psutil.virtual_memory()[0]) + ) + + return TAINodeConfiguration(location, resource) + + +def _generate_version_info() -> TNodeVersionInfo: + return TNodeVersionInfo(AINodeDescriptor().get_config().get_version_info(), + AINodeDescriptor().get_config().get_build_info()) + + +def _check_path_permission(): + system_path = AINodeDescriptor().get_config().get_ain_system_dir() + if not os.path.exists(system_path): + try: + os.makedirs(system_path) + os.chmod(system_path, 0o777) + except PermissionError as e: + logger.error(e) + raise e + + +def start_ainode(): + _check_path_permission() + system_properties_file = os.path.join(AINodeDescriptor().get_config().get_ain_system_dir(), AINODE_SYSTEM_FILE_NAME) + if not os.path.exists(system_properties_file): + # If the system.properties file does not exist, the AINode will register to ConfigNode. + try: + logger.info('IoTDB-AINode is registering to ConfigNode...') + ainode_id = ClientManager().borrow_config_node_client().node_register( + AINodeDescriptor().get_config().get_cluster_name(), + _generate_configuration(), + _generate_version_info()) + AINodeDescriptor().get_config().set_ainode_id(ainode_id) + system_properties = { + 'ainode_id': ainode_id, + 'cluster_name': AINodeDescriptor().get_config().get_cluster_name(), + 'iotdb_version': AINodeDescriptor().get_config().get_version_info(), + 'commit_id': AINodeDescriptor().get_config().get_build_info(), + 'ain_rpc_address': AINodeDescriptor().get_config().get_ain_inference_rpc_address(), + 'ain_rpc_port': AINodeDescriptor().get_config().get_ain_inference_rpc_port(), + 'config_node_list': AINodeDescriptor().get_config().get_ain_target_config_node_list(), + } + with open(system_properties_file, 'w') as f: + f.write('#' + str(datetime.now()) + '\n') + for key, value in system_properties.items(): + f.write(key + '=' + str(value) + '\n') + + except Exception as e: + logger.error('IoTDB-AINode failed to register to ConfigNode: {}'.format(e)) + raise e + else: + # If the system.properties file does exist, the AINode will just restart. + try: + logger.info('IoTDB-AINode is restarting...') + ClientManager().borrow_config_node_client().node_restart( + AINodeDescriptor().get_config().get_cluster_name(), + _generate_configuration(), + _generate_version_info()) + + except Exception as e: + logger.error('IoTDB-AINode failed to restart: {}'.format(e)) + raise e + + rpc_service = RPCService() + rpc_service.start() + rpc_service.join(1) + if rpc_service.exit_code != 0: + return + + logger.info('IoTDB-AINode has successfully started.') + + +def remove_ainode(arguments): + # Delete the current node + if len(arguments) == 2: + target_ainode_id = AINodeDescriptor().get_config().get_ainode_id() + target_rpc_address = AINodeDescriptor().get_config().get_ain_inference_rpc_address() + target_rpc_port = AINodeDescriptor().get_config().get_ain_inference_rpc_port() + + # Delete the node with a given id + elif len(arguments) == 3: + target_ainode_id = int(arguments[2]) + ainode_configuration_map = ClientManager().borrow_config_node_client().get_ainode_configuration( + target_ainode_id) + + end_point = ainode_configuration_map[target_ainode_id].location.internalEndPoint + target_rpc_address = end_point.ip + target_rpc_port = end_point.port + + if not end_point: + raise MissingConfigError("NodeId: {} not found in cluster ".format(target_ainode_id)) + + logger.info('Got target AINode id: {}'.format(target_ainode_id)) + + else: + raise MissingConfigError("Invalid command") + + location = TAINodeLocation(target_ainode_id, TEndPoint(target_rpc_address, target_rpc_port)) + status = ClientManager().borrow_config_node_client().node_remove(location) + + if status.code == TSStatusCode.SUCCESS_STATUS.get_status_code(): + logger.info('IoTDB-AINode has successfully removed.') + if os.path.exists(AINodeDescriptor().get_config().get_ain_models_dir()): + shutil.rmtree(AINodeDescriptor().get_config().get_ain_models_dir()) + + +def main(): + arguments = sys.argv + # load config + AINodeDescriptor() + if len(arguments) == 1: + logger.info("Command line argument must be specified.") + return + command = arguments[1] + if command == 'start': + try: + logger.info('IoTDB-AINode is starting...') + start_ainode() + except Exception as e: + logger.error("Start AINode failed, because of: {}".format(e)) + sys.exit(1) + elif command == 'remove': + try: + logger.info("Removing AINode...") + remove_ainode(arguments) + except Exception as e: + logger.error("Remove AINode failed, because of: {}".format(e)) + sys.exit(1) + else: + logger.warning("Unknown argument: {}.".format(command)) + + +if __name__ == '__main__': + main() diff --git a/iotdb-core/ainode/iotdb/ainode/service.py b/iotdb-core/ainode/iotdb/ainode/service.py new file mode 100644 index 0000000000000..54954dd7d5001 --- /dev/null +++ b/iotdb-core/ainode/iotdb/ainode/service.py @@ -0,0 +1,53 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +import threading + +from thrift.protocol import TCompactProtocol, TBinaryProtocol +from thrift.server import TServer +from thrift.transport import TSocket, TTransport + +from iotdb.ainode.config import AINodeDescriptor +from iotdb.ainode.handler import AINodeRPCServiceHandler +from iotdb.ainode.log import Logger +from iotdb.thrift.ainode import IAINodeRPCService + +logger = Logger() + + +class RPCService(threading.Thread): + def __init__(self): + self.exit_code = 0 + super().__init__() + processor = IAINodeRPCService.Processor(handler=AINodeRPCServiceHandler()) + transport = TSocket.TServerSocket(host=AINodeDescriptor().get_config().get_ain_inference_rpc_address(), + port=AINodeDescriptor().get_config().get_ain_inference_rpc_port()) + transport_factory = TTransport.TFramedTransportFactory() + if AINodeDescriptor().get_config().get_ain_thrift_compression_enabled(): + protocol_factory = TCompactProtocol.TCompactProtocolFactory() + else: + protocol_factory = TBinaryProtocol.TBinaryProtocolFactory() + + self.__pool_server = TServer.TThreadPoolServer(processor, transport, transport_factory, protocol_factory) + + def run(self) -> None: + logger.info("The RPC service thread begin to run...") + try: + self.__pool_server.serve() + except Exception as e: + self.exit_code = 1 + logger.error(e) diff --git a/iotdb-core/ainode/iotdb/ainode/util/__init__.py b/iotdb-core/ainode/iotdb/ainode/util/__init__.py new file mode 100644 index 0000000000000..2a1e720805f29 --- /dev/null +++ b/iotdb-core/ainode/iotdb/ainode/util/__init__.py @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# diff --git a/iotdb-core/ainode/iotdb/ainode/util/decorator.py b/iotdb-core/ainode/iotdb/ainode/util/decorator.py new file mode 100644 index 0000000000000..33b9f4835ac8a --- /dev/null +++ b/iotdb-core/ainode/iotdb/ainode/util/decorator.py @@ -0,0 +1,26 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +def singleton(cls): + instances = {} + + def get_instance(*args, **kwargs): + if cls not in instances: + instances[cls] = cls(*args, **kwargs) + return instances[cls] + + return get_instance diff --git a/iotdb-core/ainode/iotdb/ainode/util/lock.py b/iotdb-core/ainode/iotdb/ainode/util/lock.py new file mode 100644 index 0000000000000..91abbedad249f --- /dev/null +++ b/iotdb-core/ainode/iotdb/ainode/util/lock.py @@ -0,0 +1,84 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +import hashlib +import threading + + +class ReadWriteLock: + def __init__(self): + self._reader_num = 0 + self._read_lock = threading.Lock() + self._write_lock = threading.Lock() + + def acquire_read(self): + with self._read_lock: + if self._reader_num == 0: + self._write_lock.acquire() + self._reader_num += 1 + + def release_read(self): + with self._read_lock: + self._reader_num -= 1 + if self._reader_num == 0: + self._write_lock.release() + + def acquire_write(self): + self._write_lock.acquire() + + def release_write(self): + self._write_lock.release() + + class ReadLockContext: + def __init__(self, rw_lock): + self.rw_lock = rw_lock + + def __enter__(self): + self.rw_lock.acquire_read() + + def __exit__(self, exc_type, exc_value, traceback): + self.rw_lock.release_read() + + class WriteLockContext: + def __init__(self, rw_lock): + self.rw_lock = rw_lock + + def __enter__(self): + self.rw_lock.acquire_write() + + def __exit__(self, exc_type, exc_value, traceback): + self.rw_lock.release_write() + + def read_lock(self): + return self.ReadLockContext(self) + + def write_lock(self): + return self.WriteLockContext(self) + + +def hash_model_id(model_id): + return int(hashlib.md5(str(model_id).encode()).hexdigest(), 16) + + +class ModelLockPool: + def __init__(self, pool_size=16): + self._pool = [ReadWriteLock() for _ in range(pool_size)] + self._pool_size = pool_size + + def get_lock(self, model_id): + pool_index = hash_model_id(model_id) % self._pool_size + return self._pool[pool_index] diff --git a/iotdb-core/ainode/iotdb/ainode/util/serde.py b/iotdb-core/ainode/iotdb/ainode/util/serde.py new file mode 100644 index 0000000000000..4338dcdfefc85 --- /dev/null +++ b/iotdb-core/ainode/iotdb/ainode/util/serde.py @@ -0,0 +1,564 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +import struct +from enum import Enum + +import numpy as np +import pandas as pd + +from iotdb.ainode.exception import BadConfigValueError + + +class TSDataType(Enum): + BOOLEAN = 0 + INT32 = 1 + INT64 = 2 + FLOAT = 3 + DOUBLE = 4 + TEXT = 5 + + # this method is implemented to avoid the issue reported by: + # https://bugs.python.org/issue30545 + def __eq__(self, other) -> bool: + return self.value == other.value + + def __hash__(self): + return self.value + + def np_dtype(self): + return { + TSDataType.BOOLEAN: np.dtype(">?"), + TSDataType.FLOAT: np.dtype(">f4"), + TSDataType.DOUBLE: np.dtype(">f8"), + TSDataType.INT32: np.dtype(">i4"), + TSDataType.INT64: np.dtype(">i8"), + TSDataType.TEXT: np.dtype("str"), + }[self] + + +TIMESTAMP_STR = "Time" +START_INDEX = 2 + + +# convert dataFrame to tsBlock in binary +# input shouldn't contain time column +def convert_to_binary(data_frame: pd.DataFrame): + data_shape = data_frame.shape + value_column_size = data_shape[1] + position_count = data_shape[0] + keys = data_frame.keys() + + binary = value_column_size.to_bytes(4, byteorder="big") + + for data_type in data_frame.dtypes: + binary += _get_type_in_byte(data_type) + + # position count + binary += position_count.to_bytes(4, byteorder="big") + + # column encoding + binary += b'\x02' + for data_type in data_frame.dtypes: + binary += _get_encoder(data_type) + + # write columns, the column in index 0 must be timeColumn + binary += bool.to_bytes(False, 1, byteorder="big") + for i in range(position_count): + value = 0 + v = struct.pack(">i", value) + binary += v + binary += v + + for i in range(value_column_size): + # the value can't be null + binary += bool.to_bytes(False, 1, byteorder="big") + col = data_frame[keys[i]] + for j in range(position_count): + value = col[j] + if value.dtype.byteorder != '>': + value = value.byteswap() + binary += value.tobytes() + + return binary + + +# convert tsBlock in binary to dataFrame +def convert_to_df(name_list, type_list, name_index, binary_list): + column_name_list = [TIMESTAMP_STR] + column_type_list = [TSDataType.INT64] + column_ordinal_dict = {TIMESTAMP_STR: 1} + + if name_index is not None: + column_type_deduplicated_list = [ + None for _ in range(len(name_index)) + ] + for i in range(len(name_list)): + name = name_list[i] + column_name_list.append(name) + column_type_list.append(TSDataType[type_list[i]]) + if name not in column_ordinal_dict: + index = name_index[name] + column_ordinal_dict[name] = index + START_INDEX + column_type_deduplicated_list[index] = TSDataType[type_list[i]] + else: + index = START_INDEX + column_type_deduplicated_list = [] + for i in range(len(name_list)): + name = name_list[i] + column_name_list.append(name) + column_type_list.append(TSDataType[type_list[i]]) + if name not in column_ordinal_dict: + column_ordinal_dict[name] = index + index += 1 + column_type_deduplicated_list.append( + TSDataType[type_list[i]] + ) + + binary_size = len(binary_list) + binary_index = 0 + result = {} + for column_name in column_name_list: + result[column_name] = None + + while binary_index < binary_size: + buffer = binary_list[binary_index] + binary_index += 1 + time_column_values, column_values, null_indicators, _ = deserialize(buffer) + time_array = np.frombuffer( + time_column_values, np.dtype(np.longlong).newbyteorder(">") + ) + if time_array.dtype.byteorder == ">": + time_array = time_array.byteswap().newbyteorder("<") + + if result[TIMESTAMP_STR] is None: + result[TIMESTAMP_STR] = time_array + else: + result[TIMESTAMP_STR] = np.concatenate( + (result[TIMESTAMP_STR], time_array), axis=0 + ) + total_length = len(time_array) + + for i in range(len(column_values)): + column_name = column_name_list[i + 1] + + location = column_ordinal_dict[column_name] - START_INDEX + if location < 0: + continue + + data_type = column_type_deduplicated_list[location] + value_buffer = column_values[location] + value_buffer_len = len(value_buffer) + + if data_type == TSDataType.DOUBLE: + data_array = np.frombuffer( + value_buffer, np.dtype(np.double).newbyteorder(">") + ) + elif data_type == TSDataType.FLOAT: + data_array = np.frombuffer( + value_buffer, np.dtype(np.float32).newbyteorder(">") + ) + elif data_type == TSDataType.BOOLEAN: + data_array = [] + for index in range(len(value_buffer)): + data_array.append(value_buffer[index]) + data_array = np.array(data_array).astype("bool") + elif data_type == TSDataType.INT32: + data_array = np.frombuffer( + value_buffer, np.dtype(np.int32).newbyteorder(">") + ) + elif data_type == TSDataType.INT64: + data_array = np.frombuffer( + value_buffer, np.dtype(np.int64).newbyteorder(">") + ) + elif data_type == TSDataType.TEXT: + index = 0 + data_array = [] + while index < value_buffer_len: + value_bytes = value_buffer[index] + value = value_bytes.decode("utf-8") + data_array.append(value) + index += 1 + data_array = np.array(data_array, dtype=object) + else: + raise RuntimeError("unsupported data type {}.".format(data_type)) + + if data_array.dtype.byteorder == ">": + data_array = data_array.byteswap().newbyteorder("<") + + null_indicator = null_indicators[location] + if len(data_array) < total_length or (data_type == TSDataType.BOOLEAN and null_indicator is not None): + if data_type == TSDataType.INT32 or data_type == TSDataType.INT64: + tmp_array = np.full(total_length, np.nan, np.float32) + elif data_type == TSDataType.FLOAT or data_type == TSDataType.DOUBLE: + tmp_array = np.full(total_length, np.nan, data_array.dtype) + elif data_type == TSDataType.BOOLEAN: + tmp_array = np.full(total_length, np.nan, np.float32) + elif data_type == TSDataType.TEXT: + tmp_array = np.full(total_length, np.nan, dtype=data_array.dtype) + else: + raise Exception("Unsupported dataType in deserialization") + + if null_indicator is not None: + indexes = [not v for v in null_indicator] + if data_type == TSDataType.BOOLEAN: + tmp_array[indexes] = data_array[indexes] + else: + tmp_array[indexes] = data_array + + if data_type == TSDataType.INT32: + tmp_array = pd.Series(tmp_array).astype("Int32") + elif data_type == TSDataType.INT64: + tmp_array = pd.Series(tmp_array).astype("Int64") + elif data_type == TSDataType.BOOLEAN: + tmp_array = pd.Series(tmp_array).astype("boolean") + + data_array = tmp_array + + if result[column_name] is None: + result[column_name] = data_array + else: + if isinstance(result[column_name], pd.Series): + if not isinstance(data_array, pd.Series): + if data_type == TSDataType.INT32: + data_array = pd.Series(data_array).astype("Int32") + elif data_type == TSDataType.INT64: + data_array = pd.Series(data_array).astype("Int64") + elif data_type == TSDataType.BOOLEAN: + data_array = pd.Series(data_array).astype("boolean") + else: + raise RuntimeError("Series Error") + result[column_name] = result[column_name].append(data_array) + else: + result[column_name] = np.concatenate( + (result[column_name], data_array), axis=0 + ) + for k, v in result.items(): + if v is None: + result[k] = [] + df = pd.DataFrame(result) + df = df.reset_index(drop=True) + return df + + +def _get_encoder(data_type: pd.Series): + if data_type == "bool": + return b'\x00' + elif data_type == "int32" or data_type == "float32": + return b'\x01' + elif data_type == "int64" or data_type == "float64": + return b'\x02' + elif data_type == "texr": + return b'\x03' + + +def _get_type_in_byte(data_type: pd.Series): + if data_type == 'bool': + return b'\x00' + elif data_type == 'int32': + return b'\x01' + elif data_type == 'int64': + return b'\x02' + elif data_type == 'float32': + return b'\x03' + elif data_type == 'float64': + return b'\x04' + elif data_type == 'text': + return b'\x05' + else: + raise BadConfigValueError('data_type', data_type, + "data_type should be in ['bool', 'int32', 'int64', 'float32', 'float64', 'text']") + + +# Serialized tsBlock: +# +-------------+---------------+---------+------------+-----------+----------+ +# | val col cnt | val col types | pos cnt | encodings | time col | val col | +# +-------------+---------------+---------+------------+-----------+----------+ +# | int32 | list[byte] | int32 | list[byte] | bytes | byte | +# +-------------+---------------+---------+------------+-----------+----------+ + +def deserialize(buffer): + value_column_count, buffer = read_int_from_buffer(buffer) + data_types, buffer = read_column_types(buffer, value_column_count) + + position_count, buffer = read_int_from_buffer(buffer) + column_encodings, buffer = read_column_encoding(buffer, value_column_count + 1) + + time_column_values, buffer = read_time_column(buffer, position_count) + column_values = [None] * value_column_count + null_indicators = [None] * value_column_count + for i in range(value_column_count): + column_value, null_indicator, buffer = read_column(column_encodings[i + 1], buffer, data_types[i], + position_count) + column_values[i] = column_value + null_indicators[i] = null_indicator + + return time_column_values, column_values, null_indicators, position_count + + +# General Methods + +def read_int_from_buffer(buffer): + res, buffer = read_from_buffer(buffer, 4) + return int.from_bytes(res, "big"), buffer + + +def read_byte_from_buffer(buffer): + return read_from_buffer(buffer, 1) + + +def read_from_buffer(buffer, size): + res = buffer[:size] + buffer = buffer[size:] + return res, buffer + + +# Read ColumnType + +def read_column_types(buffer, value_column_count): + data_types = [] + for _ in range(value_column_count): + res, buffer = read_byte_from_buffer(buffer) + data_types.append(get_data_type(res)) + return data_types, buffer + + +def get_data_type(value): + if value == b'\x00': + return TSDataType.BOOLEAN + elif value == b'\x01': + return TSDataType.INT32 + elif value == b'\x02': + return TSDataType.INT64 + elif value == b'\x03': + return TSDataType.FLOAT + elif value == b'\x04': + return TSDataType.DOUBLE + elif value == b'\x05': + return TSDataType.TEXT + + +def get_data_type_byte_from_str(value): + ''' + Args: + value (str): data type in ['bool', 'int32', 'int64', 'float32', 'float64', 'text'] + Returns: + byte: corresponding data type in [b'\x00', b'\x01', b'\x02', b'\x03', b'\x04', b'\x05'] + ''' + if value not in ['bool', 'int32', 'int64', 'float32', 'float64', 'text']: + raise BadConfigValueError('data_type', value, + "data_type should be in ['bool', 'int32', 'int64', 'float32', 'float64', 'text']") + if value == "bool": + return TSDataType.BOOLEAN.value + elif value == "int32": + return TSDataType.INT32.value + elif value == "int64": + return TSDataType.INT64.value + elif value == "float32": + return TSDataType.FLOAT.value + elif value == "float64": + return TSDataType.DOUBLE.value + elif value == "text": + return TSDataType.TEXT.value + + +# Read ColumnEncodings + +def read_column_encoding(buffer, size): + encodings = [] + for _ in range(size): + res, buffer = read_byte_from_buffer(buffer) + encodings.append(res) + return encodings, buffer + + +# Read Column + +def deserialize_null_indicators(buffer, size): + may_have_null, buffer = read_byte_from_buffer(buffer) + if may_have_null != b'\x00': + return deserialize_from_boolean_array(buffer, size) + return None, buffer + + +# Serialized data layout: +# +---------------+-----------------+-------------+ +# | may have null | null indicators | values | +# +---------------+-----------------+-------------+ +# | byte | list[byte] | list[int64] | +# +---------------+-----------------+-------------+ + +def read_time_column(buffer, size): + null_indicators, buffer = deserialize_null_indicators(buffer, size) + if null_indicators is None: + values, buffer = read_from_buffer( + buffer, size * 8 + ) + else: + raise Exception("TimeColumn should not contains null value") + return values, buffer + + +def read_int64_column(buffer, data_type, position_count): + null_indicators, buffer = deserialize_null_indicators(buffer, position_count) + if null_indicators is None: + size = position_count + else: + size = null_indicators.count(False) + + if TSDataType.INT64 == data_type or TSDataType.DOUBLE == data_type: + values, buffer = read_from_buffer(buffer, size * 8) + return values, null_indicators, buffer + else: + raise Exception("Invalid data type: " + data_type) + + +# Serialized data layout: +# +---------------+-----------------+-------------+ +# | may have null | null indicators | values | +# +---------------+-----------------+-------------+ +# | byte | list[byte] | list[int32] | +# +---------------+-----------------+-------------+ + +def read_int32_column(buffer, data_type, position_count): + null_indicators, buffer = deserialize_null_indicators(buffer, position_count) + if null_indicators is None: + size = position_count + else: + size = null_indicators.count(False) + + if TSDataType.INT32 == data_type or TSDataType.FLOAT == data_type: + values, buffer = read_from_buffer(buffer, size * 4) + return values, null_indicators, buffer + else: + raise Exception("Invalid data type: " + data_type) + + +# Serialized data layout: +# +---------------+-----------------+-------------+ +# | may have null | null indicators | values | +# +---------------+-----------------+-------------+ +# | byte | list[byte] | list[byte] | +# +---------------+-----------------+-------------+ + +def read_byte_column(buffer, data_type, position_count): + if data_type != TSDataType.BOOLEAN: + raise Exception("Invalid data type: " + data_type) + null_indicators, buffer = deserialize_null_indicators(buffer, position_count) + res, buffer = deserialize_from_boolean_array(buffer, position_count) + return res, null_indicators, buffer + + +def deserialize_from_boolean_array(buffer, size): + packed_boolean_array, buffer = read_from_buffer(buffer, (size + 7) // 8) + current_byte = 0 + output = [None] * size + position = 0 + # read null bits 8 at a time + while position < (size & ~0b111): + value = packed_boolean_array[current_byte] + output[position] = ((value & 0b1000_0000) != 0) + output[position + 1] = ((value & 0b0100_0000) != 0) + output[position + 2] = ((value & 0b0010_0000) != 0) + output[position + 3] = ((value & 0b0001_0000) != 0) + output[position + 4] = ((value & 0b0000_1000) != 0) + output[position + 5] = ((value & 0b0000_0100) != 0) + output[position + 6] = ((value & 0b0000_0010) != 0) + output[position + 7] = ((value & 0b0000_0001) != 0) + + position += 8 + current_byte += 1 + # read last null bits + if (size & 0b111) > 0: + value = packed_boolean_array[-1] + mask = 0b1000_0000 + position = size & ~0b111 + while position < size: + output[position] = ((value & mask) != 0) + mask >>= 1 + position += 1 + return output, buffer + + +# Serialized data layout: +# +---------------+-----------------+-------------+ +# | may have null | null indicators | values | +# +---------------+-----------------+-------------+ +# | byte | list[byte] | list[entry] | +# +---------------+-----------------+-------------+ +# +# Each entry is represented as: +# +---------------+-------+ +# | value length | value | +# +---------------+-------+ +# | int32 | bytes | +# +---------------+-------+ + +def read_binary_column(buffer, data_type, position_count): + if data_type != TSDataType.TEXT: + raise Exception("Invalid data type: " + data_type) + null_indicators, buffer = deserialize_null_indicators(buffer, position_count) + + if null_indicators is None: + size = position_count + else: + size = null_indicators.count(False) + values = [None] * size + for i in range(size): + length, buffer = read_int_from_buffer(buffer) + res, buffer = read_from_buffer(buffer, length) + values[i] = res + return values, null_indicators, buffer + + +def read_column(encoding, buffer, data_type, position_count): + if encoding == b'\x00': + return read_byte_column(buffer, data_type, position_count) + elif encoding == b'\x01': + return read_int32_column(buffer, data_type, position_count) + elif encoding == b'\x02': + return read_int64_column(buffer, data_type, position_count) + elif encoding == b'\x03': + return read_binary_column(buffer, data_type, position_count) + elif encoding == b'\x04': + return read_run_length_column(buffer, data_type, position_count) + else: + raise Exception("Unsupported encoding: " + encoding) + + +# Serialized data layout: +# +-----------+-------------------------+ +# | encoding | serialized inner column | +# +-----------+-------------------------+ +# | byte | list[byte] | +# +-----------+-------------------------+ + +def read_run_length_column(buffer, data_type, position_count): + encoding, buffer = read_byte_from_buffer(buffer) + column, null_indicators, buffer = read_column(encoding, buffer, data_type, 1) + + return repeat(column, data_type, position_count), null_indicators * position_count, buffer + + +def repeat(buffer, data_type, position_count): + if data_type == TSDataType.BOOLEAN or data_type == TSDataType.TEXT: + return buffer * position_count + else: + res = bytes(0) + for _ in range(position_count): + res.join(buffer) + return res diff --git a/iotdb-core/ainode/iotdb/ainode/util/status.py b/iotdb-core/ainode/iotdb/ainode/util/status.py new file mode 100644 index 0000000000000..1bcbef7a806c0 --- /dev/null +++ b/iotdb-core/ainode/iotdb/ainode/util/status.py @@ -0,0 +1,33 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from iotdb.ainode.constant import TSStatusCode +from iotdb.ainode.log import Logger +from iotdb.thrift.common.ttypes import TSStatus + + +def get_status(status_code: TSStatusCode, message: str = None) -> TSStatus: + status = TSStatus(status_code.get_status_code()) + status.message = message + return status + + +def verify_success(status: TSStatus, err_msg: str) -> None: + if status.code != TSStatusCode.SUCCESS_STATUS.get_status_code(): + Logger().warning(err_msg + ", error status is ", status) + raise RuntimeError(str(status.code) + ": " + status.message) diff --git a/iotdb-core/ainode/poetry.lock b/iotdb-core/ainode/poetry.lock new file mode 100644 index 0000000000000..fb8655ca22012 --- /dev/null +++ b/iotdb-core/ainode/poetry.lock @@ -0,0 +1,1442 @@ +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. + +[[package]] +name = "alembic" +version = "1.13.2" +description = "A database migration tool for SQLAlchemy." +optional = false +python-versions = ">=3.8" +files = [ + {file = "alembic-1.13.2-py3-none-any.whl", hash = "sha256:6b8733129a6224a9a711e17c99b08462dbf7cc9670ba8f2e2ae9af860ceb1953"}, + {file = "alembic-1.13.2.tar.gz", hash = "sha256:1ff0ae32975f4fd96028c39ed9bb3c867fe3af956bd7bb37343b54c9fe7445ef"}, +] + +[package.dependencies] +importlib-metadata = {version = "*", markers = "python_version < \"3.9\""} +importlib-resources = {version = "*", markers = "python_version < \"3.9\""} +Mako = "*" +SQLAlchemy = ">=1.3.0" +typing-extensions = ">=4" + +[package.extras] +tz = ["backports.zoneinfo"] + +[[package]] +name = "certifi" +version = "2024.7.4" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, + {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, +] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "colorlog" +version = "6.8.2" +description = "Add colours to the output of Python's logging module." +optional = false +python-versions = ">=3.6" +files = [ + {file = "colorlog-6.8.2-py3-none-any.whl", hash = "sha256:4dcbb62368e2800cb3c5abd348da7e53f6c362dda502ec27c560b2e58a66bd33"}, + {file = "colorlog-6.8.2.tar.gz", hash = "sha256:3e3e079a41feb5a1b64f978b5ea4f46040a94f11f0e8bbb8261e3dbbeca64d44"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} + +[package.extras] +development = ["black", "flake8", "mypy", "pytest", "types-colorama"] + +[[package]] +name = "cython" +version = "3.0.11" +description = "The Cython compiler for writing C extensions in the Python language." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" +files = [ + {file = "Cython-3.0.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:44292aae17524abb4b70a25111fe7dec1a0ad718711d47e3786a211d5408fdaa"}, + {file = "Cython-3.0.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a75d45fbc20651c1b72e4111149fed3b33d270b0a4fb78328c54d965f28d55e1"}, + {file = "Cython-3.0.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d89a82937ce4037f092e9848a7bbcc65bc8e9fc9aef2bb74f5c15e7d21a73080"}, + {file = "Cython-3.0.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a8ea2e7e2d3bc0d8630dafe6c4a5a89485598ff8a61885b74f8ed882597efd5"}, + {file = "Cython-3.0.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cee29846471ce60226b18e931d8c1c66a158db94853e3e79bc2da9bd22345008"}, + {file = "Cython-3.0.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eeb6860b0f4bfa402de8929833fe5370fa34069c7ebacb2d543cb017f21fb891"}, + {file = "Cython-3.0.11-cp310-cp310-win32.whl", hash = "sha256:3699391125ab344d8d25438074d1097d9ba0fb674d0320599316cfe7cf5f002a"}, + {file = "Cython-3.0.11-cp310-cp310-win_amd64.whl", hash = "sha256:d02f4ebe15aac7cdacce1a628e556c1983f26d140fd2e0ac5e0a090e605a2d38"}, + {file = "Cython-3.0.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75ba1c70b6deeaffbac123856b8d35f253da13552207aa969078611c197377e4"}, + {file = "Cython-3.0.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af91497dc098718e634d6ec8f91b182aea6bb3690f333fc9a7777bc70abe8810"}, + {file = "Cython-3.0.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3999fb52d3328a6a5e8c63122b0a8bd110dfcdb98dda585a3def1426b991cba7"}, + {file = "Cython-3.0.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d566a4e09b8979be8ab9f843bac0dd216c81f5e5f45661a9b25cd162ed80508c"}, + {file = "Cython-3.0.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:46aec30f217bdf096175a1a639203d44ac73a36fe7fa3dd06bd012e8f39eca0f"}, + {file = "Cython-3.0.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ddd1fe25af330f4e003421636746a546474e4ccd8f239f55d2898d80983d20ed"}, + {file = "Cython-3.0.11-cp311-cp311-win32.whl", hash = "sha256:221de0b48bf387f209003508e602ce839a80463522fc6f583ad3c8d5c890d2c1"}, + {file = "Cython-3.0.11-cp311-cp311-win_amd64.whl", hash = "sha256:3ff8ac1f0ecd4f505db4ab051e58e4531f5d098b6ac03b91c3b902e8d10c67b3"}, + {file = "Cython-3.0.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:11996c40c32abf843ba652a6d53cb15944c88d91f91fc4e6f0028f5df8a8f8a1"}, + {file = "Cython-3.0.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63f2c892e9f9c1698ecfee78205541623eb31cd3a1b682668be7ac12de94aa8e"}, + {file = "Cython-3.0.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b14c24f1dc4c4c9d997cca8d1b7fb01187a218aab932328247dcf5694a10102"}, + {file = "Cython-3.0.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c8eed5c015685106db15dd103fd040948ddca9197b1dd02222711815ea782a27"}, + {file = "Cython-3.0.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780f89c95b8aec1e403005b3bf2f0a2afa060b3eba168c86830f079339adad89"}, + {file = "Cython-3.0.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a690f2ff460682ea985e8d38ec541be97e0977fa0544aadc21efc116ff8d7579"}, + {file = "Cython-3.0.11-cp312-cp312-win32.whl", hash = "sha256:2252b5aa57621848e310fe7fa6f7dce5f73aa452884a183d201a8bcebfa05a00"}, + {file = "Cython-3.0.11-cp312-cp312-win_amd64.whl", hash = "sha256:da394654c6da15c1d37f0b7ec5afd325c69a15ceafee2afba14b67a5df8a82c8"}, + {file = "Cython-3.0.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4341d6a64d47112884e0bcf31e6c075268220ee4cd02223047182d4dda94d637"}, + {file = "Cython-3.0.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:351955559b37e6c98b48aecb178894c311be9d731b297782f2b78d111f0c9015"}, + {file = "Cython-3.0.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c02361af9bfa10ff1ccf967fc75159e56b1c8093caf565739ed77a559c1f29f"}, + {file = "Cython-3.0.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6823aef13669a32caf18bbb036de56065c485d9f558551a9b55061acf9c4c27f"}, + {file = "Cython-3.0.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6fb68cef33684f8cc97987bee6ae919eee7e18ee6a3ad7ed9516b8386ef95ae6"}, + {file = "Cython-3.0.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:790263b74432cb997740d73665f4d8d00b9cd1cecbdd981d93591ddf993d4f12"}, + {file = "Cython-3.0.11-cp313-cp313-win32.whl", hash = "sha256:e6dd395d1a704e34a9fac00b25f0036dce6654c6b898be6f872ac2bb4f2eda48"}, + {file = "Cython-3.0.11-cp313-cp313-win_amd64.whl", hash = "sha256:52186101d51497519e99b60d955fd5cb3bf747c67f00d742e70ab913f1e42d31"}, + {file = "Cython-3.0.11-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:c69d5cad51388522b98a99b4be1b77316de85b0c0523fa865e0ea58bbb622e0a"}, + {file = "Cython-3.0.11-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8acdc87e9009110adbceb7569765eb0980129055cc954c62f99fe9f094c9505e"}, + {file = "Cython-3.0.11-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1dd47865f4c0a224da73acf83d113f93488d17624e2457dce1753acdfb1cc40c"}, + {file = "Cython-3.0.11-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:301bde949b4f312a1c70e214b0c3bc51a3f955d466010d2f68eb042df36447b0"}, + {file = "Cython-3.0.11-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:f3953d2f504176f929862e5579cfc421860c33e9707f585d70d24e1096accdf7"}, + {file = "Cython-3.0.11-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:3f2b062f6df67e8a56c75e500ca330cf62c85ac26dd7fd006f07ef0f83aebfa3"}, + {file = "Cython-3.0.11-cp36-cp36m-win32.whl", hash = "sha256:c3d68751668c66c7a140b6023dba5d5d507f72063407bb609d3a5b0f3b8dfbe4"}, + {file = "Cython-3.0.11-cp36-cp36m-win_amd64.whl", hash = "sha256:bcd29945fafd12484cf37b1d84f12f0e7a33ba3eac5836531c6bd5283a6b3a0c"}, + {file = "Cython-3.0.11-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4e9a8d92978b15a0c7ca7f98447c6c578dc8923a0941d9d172d0b077cb69c576"}, + {file = "Cython-3.0.11-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:421017466e9260aca86823974e26e158e6358622f27c0f4da9c682f3b6d2e624"}, + {file = "Cython-3.0.11-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d80a7232938d523c1a12f6b1794ab5efb1ae77ad3fde79de4bb558d8ab261619"}, + {file = "Cython-3.0.11-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfa550d9ae39e827a6e7198076df763571cb53397084974a6948af558355e028"}, + {file = "Cython-3.0.11-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:aedceb6090a60854b31bf9571dc55f642a3fa5b91f11b62bcef167c52cac93d8"}, + {file = "Cython-3.0.11-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:473d35681d9f93ce380e6a7c8feb2d65fc6333bd7117fbc62989e404e241dbb0"}, + {file = "Cython-3.0.11-cp37-cp37m-win32.whl", hash = "sha256:3379c6521e25aa6cd7703bb7d635eaca75c0f9c7f1b0fdd6dd15a03bfac5f68d"}, + {file = "Cython-3.0.11-cp37-cp37m-win_amd64.whl", hash = "sha256:14701edb3107a5d9305a82d9d646c4f28bfecbba74b26cc1ee2f4be08f602057"}, + {file = "Cython-3.0.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:598699165cfa7c6d69513ee1bffc9e1fdd63b00b624409174c388538aa217975"}, + {file = "Cython-3.0.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0583076c4152b417a3a8a5d81ec02f58c09b67d3f22d5857e64c8734ceada8c"}, + {file = "Cython-3.0.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52205347e916dd65d2400b977df4c697390c3aae0e96275a438cc4ae85dadc08"}, + {file = "Cython-3.0.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:989899a85f0d9a57cebb508bd1f194cb52f0e3f7e22ac259f33d148d6422375c"}, + {file = "Cython-3.0.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:53b6072a89049a991d07f42060f65398448365c59c9cb515c5925b9bdc9d71f8"}, + {file = "Cython-3.0.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:f988f7f8164a6079c705c39e2d75dbe9967e3dacafe041420d9af7b9ee424162"}, + {file = "Cython-3.0.11-cp38-cp38-win32.whl", hash = "sha256:a1f4cbc70f6b7f0c939522118820e708e0d490edca42d852fa8004ec16780be2"}, + {file = "Cython-3.0.11-cp38-cp38-win_amd64.whl", hash = "sha256:187685e25e037320cae513b8cc4bf9dbc4465c037051aede509cbbf207524de2"}, + {file = "Cython-3.0.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0fc6fdd6fa493be7bdda22355689d5446ac944cd71286f6f44a14b0d67ee3ff5"}, + {file = "Cython-3.0.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b1d1f6f94cc5d42a4591f6d60d616786b9cd15576b112bc92a23131fcf38020"}, + {file = "Cython-3.0.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4ab2b92a3e6ed552adbe9350fd2ef3aa0cc7853cf91569f9dbed0c0699bbeab"}, + {file = "Cython-3.0.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:104d6f2f2c827ccc5e9e42c80ef6773a6aa94752fe6bc5b24a4eab4306fb7f07"}, + {file = "Cython-3.0.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:13062ce556a1e98d2821f7a0253b50569fdc98c36efd6653a65b21e3f8bbbf5f"}, + {file = "Cython-3.0.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:525d09b3405534763fa73bd78c8e51ac8264036ce4c16d37dfd1555a7da6d3a7"}, + {file = "Cython-3.0.11-cp39-cp39-win32.whl", hash = "sha256:b8c7e514075696ca0f60c337f9e416e61d7ccbc1aa879a56c39181ed90ec3059"}, + {file = "Cython-3.0.11-cp39-cp39-win_amd64.whl", hash = "sha256:8948802e1f5677a673ea5d22a1e7e273ca5f83e7a452786ca286eebf97cee67c"}, + {file = "Cython-3.0.11-py2.py3-none-any.whl", hash = "sha256:0e25f6425ad4a700d7f77cd468da9161e63658837d1bc34861a9861a4ef6346d"}, + {file = "cython-3.0.11.tar.gz", hash = "sha256:7146dd2af8682b4ca61331851e6aebce9fe5158e75300343f80c07ca80b1faff"}, +] + +[[package]] +name = "dynaconf" +version = "3.2.6" +description = "The dynamic configurator for your Python Project" +optional = false +python-versions = ">=3.8" +files = [ + {file = "dynaconf-3.2.6-py2.py3-none-any.whl", hash = "sha256:3911c740d717df4576ed55f616c7cbad6e06bc8ef23ffca444b6e2a12fb1c34c"}, + {file = "dynaconf-3.2.6.tar.gz", hash = "sha256:74cc1897396380bb957730eb341cc0976ee9c38bbcb53d3307c50caed0aedfb8"}, +] + +[package.extras] +all = ["configobj", "hvac", "redis", "ruamel.yaml"] +configobj = ["configobj"] +ini = ["configobj"] +redis = ["redis"] +test = ["configobj", "django", "flask (>=0.12)", "hvac (>=1.1.0)", "pytest", "pytest-cov", "pytest-mock", "pytest-xdist", "python-dotenv", "radon", "redis", "toml"] +toml = ["toml"] +vault = ["hvac"] +yaml = ["ruamel.yaml"] + +[[package]] +name = "filelock" +version = "3.15.4" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.8" +files = [ + {file = "filelock-3.15.4-py3-none-any.whl", hash = "sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7"}, + {file = "filelock-3.15.4.tar.gz", hash = "sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb"}, +] + +[package.extras] +docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-asyncio (>=0.21)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)", "virtualenv (>=20.26.2)"] +typing = ["typing-extensions (>=4.8)"] + +[[package]] +name = "fsspec" +version = "2024.6.1" +description = "File-system specification" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fsspec-2024.6.1-py3-none-any.whl", hash = "sha256:3cb443f8bcd2efb31295a5b9fdb02aee81d8452c80d28f97a6d0959e6cee101e"}, + {file = "fsspec-2024.6.1.tar.gz", hash = "sha256:fad7d7e209dd4c1208e3bbfda706620e0da5142bebbd9c384afb95b07e798e49"}, +] + +[package.extras] +abfs = ["adlfs"] +adl = ["adlfs"] +arrow = ["pyarrow (>=1)"] +dask = ["dask", "distributed"] +dev = ["pre-commit", "ruff"] +doc = ["numpydoc", "sphinx", "sphinx-design", "sphinx-rtd-theme", "yarl"] +dropbox = ["dropbox", "dropboxdrivefs", "requests"] +full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] +fuse = ["fusepy"] +gcs = ["gcsfs"] +git = ["pygit2"] +github = ["requests"] +gs = ["gcsfs"] +gui = ["panel"] +hdfs = ["pyarrow (>=1)"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)"] +libarchive = ["libarchive-c"] +oci = ["ocifs"] +s3 = ["s3fs"] +sftp = ["paramiko"] +smb = ["smbprotocol"] +ssh = ["paramiko"] +test = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "numpy", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "requests"] +test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask-expr", "dask[dataframe,test]", "moto[server] (>4,<5)", "pytest-timeout", "xarray"] +test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard"] +tqdm = ["tqdm"] + +[[package]] +name = "greenlet" +version = "3.0.3" +description = "Lightweight in-process concurrent programming" +optional = false +python-versions = ">=3.7" +files = [ + {file = "greenlet-3.0.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:9da2bd29ed9e4f15955dd1595ad7bc9320308a3b766ef7f837e23ad4b4aac31a"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d353cadd6083fdb056bb46ed07e4340b0869c305c8ca54ef9da3421acbdf6881"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dca1e2f3ca00b84a396bc1bce13dd21f680f035314d2379c4160c98153b2059b"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ed7fb269f15dc662787f4119ec300ad0702fa1b19d2135a37c2c4de6fadfd4a"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd4f49ae60e10adbc94b45c0b5e6a179acc1736cf7a90160b404076ee283cf83"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:73a411ef564e0e097dbe7e866bb2dda0f027e072b04da387282b02c308807405"}, + {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7f362975f2d179f9e26928c5b517524e89dd48530a0202570d55ad6ca5d8a56f"}, + {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:649dde7de1a5eceb258f9cb00bdf50e978c9db1b996964cd80703614c86495eb"}, + {file = "greenlet-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:68834da854554926fbedd38c76e60c4a2e3198c6fbed520b106a8986445caaf9"}, + {file = "greenlet-3.0.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:b1b5667cced97081bf57b8fa1d6bfca67814b0afd38208d52538316e9422fc61"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52f59dd9c96ad2fc0d5724107444f76eb20aaccb675bf825df6435acb7703559"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:afaff6cf5200befd5cec055b07d1c0a5a06c040fe5ad148abcd11ba6ab9b114e"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe754d231288e1e64323cfad462fcee8f0288654c10bdf4f603a39ed923bef33"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2797aa5aedac23af156bbb5a6aa2cd3427ada2972c828244eb7d1b9255846379"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b7f009caad047246ed379e1c4dbcb8b020f0a390667ea74d2387be2998f58a22"}, + {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c5e1536de2aad7bf62e27baf79225d0d64360d4168cf2e6becb91baf1ed074f3"}, + {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:894393ce10ceac937e56ec00bb71c4c2f8209ad516e96033e4b3b1de270e200d"}, + {file = "greenlet-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:1ea188d4f49089fc6fb283845ab18a2518d279c7cd9da1065d7a84e991748728"}, + {file = "greenlet-3.0.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:70fb482fdf2c707765ab5f0b6655e9cfcf3780d8d87355a063547b41177599be"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4d1ac74f5c0c0524e4a24335350edad7e5f03b9532da7ea4d3c54d527784f2e"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:149e94a2dd82d19838fe4b2259f1b6b9957d5ba1b25640d2380bea9c5df37676"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15d79dd26056573940fcb8c7413d84118086f2ec1a8acdfa854631084393efcc"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b7db1ebff4ba09aaaeae6aa491daeb226c8150fc20e836ad00041bcb11230"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fcd2469d6a2cf298f198f0487e0a5b1a47a42ca0fa4dfd1b6862c999f018ebbf"}, + {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1f672519db1796ca0d8753f9e78ec02355e862d0998193038c7073045899f305"}, + {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2516a9957eed41dd8f1ec0c604f1cdc86758b587d964668b5b196a9db5bfcde6"}, + {file = "greenlet-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:bba5387a6975598857d86de9eac14210a49d554a77eb8261cc68b7d082f78ce2"}, + {file = "greenlet-3.0.3-cp37-cp37m-macosx_11_0_universal2.whl", hash = "sha256:5b51e85cb5ceda94e79d019ed36b35386e8c37d22f07d6a751cb659b180d5274"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:daf3cb43b7cf2ba96d614252ce1684c1bccee6b2183a01328c98d36fcd7d5cb0"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99bf650dc5d69546e076f413a87481ee1d2d09aaaaaca058c9251b6d8c14783f"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dd6e660effd852586b6a8478a1d244b8dc90ab5b1321751d2ea15deb49ed414"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3391d1e16e2a5a1507d83e4a8b100f4ee626e8eca43cf2cadb543de69827c4c"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1f145462f1fa6e4a4ae3c0f782e580ce44d57c8f2c7aae1b6fa88c0b2efdb41"}, + {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1a7191e42732df52cb5f39d3527217e7ab73cae2cb3694d241e18f53d84ea9a7"}, + {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0448abc479fab28b00cb472d278828b3ccca164531daab4e970a0458786055d6"}, + {file = "greenlet-3.0.3-cp37-cp37m-win32.whl", hash = "sha256:b542be2440edc2d48547b5923c408cbe0fc94afb9f18741faa6ae970dbcb9b6d"}, + {file = "greenlet-3.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:01bc7ea167cf943b4c802068e178bbf70ae2e8c080467070d01bfa02f337ee67"}, + {file = "greenlet-3.0.3-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:1996cb9306c8595335bb157d133daf5cf9f693ef413e7673cb07e3e5871379ca"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc0f794e6ad661e321caa8d2f0a55ce01213c74722587256fb6566049a8b04"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9db1c18f0eaad2f804728c67d6c610778456e3e1cc4ab4bbd5eeb8e6053c6fc"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7170375bcc99f1a2fbd9c306f5be8764eaf3ac6b5cb968862cad4c7057756506"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b66c9c1e7ccabad3a7d037b2bcb740122a7b17a53734b7d72a344ce39882a1b"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:098d86f528c855ead3479afe84b49242e174ed262456c342d70fc7f972bc13c4"}, + {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:81bb9c6d52e8321f09c3d165b2a78c680506d9af285bfccbad9fb7ad5a5da3e5"}, + {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fd096eb7ffef17c456cfa587523c5f92321ae02427ff955bebe9e3c63bc9f0da"}, + {file = "greenlet-3.0.3-cp38-cp38-win32.whl", hash = "sha256:d46677c85c5ba00a9cb6f7a00b2bfa6f812192d2c9f7d9c4f6a55b60216712f3"}, + {file = "greenlet-3.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:419b386f84949bf0e7c73e6032e3457b82a787c1ab4a0e43732898a761cc9dbf"}, + {file = "greenlet-3.0.3-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:da70d4d51c8b306bb7a031d5cff6cc25ad253affe89b70352af5f1cb68e74b53"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:086152f8fbc5955df88382e8a75984e2bb1c892ad2e3c80a2508954e52295257"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d73a9fe764d77f87f8ec26a0c85144d6a951a6c438dfe50487df5595c6373eac"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7dcbe92cc99f08c8dd11f930de4d99ef756c3591a5377d1d9cd7dd5e896da71"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1551a8195c0d4a68fac7a4325efac0d541b48def35feb49d803674ac32582f61"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:64d7675ad83578e3fc149b617a444fab8efdafc9385471f868eb5ff83e446b8b"}, + {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b37eef18ea55f2ffd8f00ff8fe7c8d3818abd3e25fb73fae2ca3b672e333a7a6"}, + {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:77457465d89b8263bca14759d7c1684df840b6811b2499838cc5b040a8b5b113"}, + {file = "greenlet-3.0.3-cp39-cp39-win32.whl", hash = "sha256:57e8974f23e47dac22b83436bdcf23080ade568ce77df33159e019d161ce1d1e"}, + {file = "greenlet-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:c5ee858cfe08f34712f548c3c363e807e7186f03ad7a5039ebadb29e8c6be067"}, + {file = "greenlet-3.0.3.tar.gz", hash = "sha256:43374442353259554ce33599da8b692d5aa96f8976d567d4badf263371fbe491"}, +] + +[package.extras] +docs = ["Sphinx", "furo"] +test = ["objgraph", "psutil"] + +[[package]] +name = "hmmlearn" +version = "0.3.2" +description = "Hidden Markov Models in Python with scikit-learn like API" +optional = false +python-versions = ">=3.8" +files = [ + {file = "hmmlearn-0.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:428eb8a60d5455bacf391d54abff876931ea5cf234cd10e0a4a921546652b466"}, + {file = "hmmlearn-0.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d1e54a15b3fb47e1ba72c3e40dce9368b3aa2f3f8d4d94ed6684b999cd9e1528"}, + {file = "hmmlearn-0.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19124c22b7579d76b07be8c2128153de9071281bff7a0bc29cc3bd83baf0c4cf"}, + {file = "hmmlearn-0.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:011b1aaa67551151b9442670b58fa692e135ea6fd5dc76022e595964594a3654"}, + {file = "hmmlearn-0.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:3c4d643a40c21732df9eea70d1a0ed256a6aa28ed631762c062c6cce2ee6cb47"}, + {file = "hmmlearn-0.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:02df950f8ee4de9226bef55c1a7a947f706720990bf38e5e6da84b7418af9095"}, + {file = "hmmlearn-0.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ced6acc12ffadb3d2b03fdf0e52bc8211977202db170f5b0f62014199cffc224"}, + {file = "hmmlearn-0.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16d6b05065963155a0294b0ef3643f01a1897c0e00ab258e5155537e05027a2f"}, + {file = "hmmlearn-0.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16535843dfa38f619355f4f082d1e37622198a8df315c932f406d31f5786821b"}, + {file = "hmmlearn-0.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:c8c2511fb5c7ed31c98fe46b150991c20abb5d95e3c450bb449adfe792f4f9ee"}, + {file = "hmmlearn-0.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:29106ab096c1bd9a8dba8de5e6a8da763d899e4294de5db4a47be2fd28c73a41"}, + {file = "hmmlearn-0.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5ffe17028c881b0213803db604a7051384179877ee74ded76c02f797ccfe2c34"}, + {file = "hmmlearn-0.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25572410c59f4d278b56c1bdc8196f9efe4672b4bf135e3593c6e879cea94518"}, + {file = "hmmlearn-0.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b30a8b380916ccc299984594a55829131d2da1d66bbd6861bfc548e5d9b6984"}, + {file = "hmmlearn-0.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:b042bbb90488ccaaf5616b2ecd7687b17d733016df68192ad6f2a8d58bb96291"}, + {file = "hmmlearn-0.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:42308b50c2eb68ab541573c196c1123c64a50d0dba2b30b1e21e355084041738"}, + {file = "hmmlearn-0.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:47ef458bbae23f1b80bf923eca8a838778e550aa9397a39655aa97304fbeba69"}, + {file = "hmmlearn-0.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8a92724d2c7b7f8c2b8f54c8bfe061b75dc018d81041172ce435d24456232ed"}, + {file = "hmmlearn-0.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e99980c9a92ed4fd62307d6a91e3ff136ee7233f9f02a62038b68473741204e1"}, + {file = "hmmlearn-0.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:4375627550c0282587ec404cb1e2b59d5bff70522bfac3dc57f1a2912e4af9c4"}, + {file = "hmmlearn-0.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:81cc7f0cf33d8d114423347f00d45cd2fba926a66dd563e97bb0c9b3790e3ae2"}, + {file = "hmmlearn-0.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9c55c09ef8dee1deae31a51de9a99ceea0f86cfeb5eba879b1b6c7722d29eb95"}, + {file = "hmmlearn-0.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:722a0f116c11160c97362f343d7ba0a78b03dd647cb0e2240127520d5d8d1580"}, + {file = "hmmlearn-0.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01b10877f21f6faab8b01ff63e92e93f614097d03a9fb7fc27a26a16227cd980"}, + {file = "hmmlearn-0.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:160c0018f71b6cbe79644ea743888c6e2d1bec6b09bec5e9b389e1796b57eec5"}, + {file = "hmmlearn-0.3.2.tar.gz", hash = "sha256:edaf485fdb1ea88da9ac642b2006c63d9950dd15d4d132f7205305d383e6f745"}, +] + +[package.dependencies] +numpy = ">=1.10" +scikit-learn = ">=0.16,<0.22.0 || >0.22.0" +scipy = ">=0.19" + +[package.extras] +docs = ["matplotlib", "pydata-sphinx-theme", "sphinx (>=2.0)", "sphinx-gallery"] +tests = ["pytest"] + +[[package]] +name = "idna" +version = "3.8" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +files = [ + {file = "idna-3.8-py3-none-any.whl", hash = "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac"}, + {file = "idna-3.8.tar.gz", hash = "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603"}, +] + +[[package]] +name = "importlib-metadata" +version = "8.4.0" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1"}, + {file = "importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5"}, +] + +[package.dependencies] +zipp = ">=0.5" + +[package.extras] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +perf = ["ipython"] +test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"] + +[[package]] +name = "importlib-resources" +version = "6.4.4" +description = "Read resources from Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "importlib_resources-6.4.4-py3-none-any.whl", hash = "sha256:dda242603d1c9cd836c3368b1174ed74cb4049ecd209e7a1a0104620c18c5c11"}, + {file = "importlib_resources-6.4.4.tar.gz", hash = "sha256:20600c8b7361938dc0bb2d5ec0297802e575df486f5a544fa414da65e13721f7"}, +] + +[package.dependencies] +zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["jaraco.test (>=5.4)", "pytest (>=6,!=8.1.*)", "zipp (>=3.17)"] +type = ["pytest-mypy"] + +[[package]] +name = "jinja2" +version = "3.1.4" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +files = [ + {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, + {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "joblib" +version = "1.4.2" +description = "Lightweight pipelining with Python functions" +optional = false +python-versions = ">=3.8" +files = [ + {file = "joblib-1.4.2-py3-none-any.whl", hash = "sha256:06d478d5674cbc267e7496a410ee875abd68e4340feff4490bcb7afb88060ae6"}, + {file = "joblib-1.4.2.tar.gz", hash = "sha256:2382c5816b2636fbd20a09e0f4e9dad4736765fdfb7dca582943b9c1366b3f0e"}, +] + +[[package]] +name = "mako" +version = "1.3.5" +description = "A super-fast templating language that borrows the best ideas from the existing templating languages." +optional = false +python-versions = ">=3.8" +files = [ + {file = "Mako-1.3.5-py3-none-any.whl", hash = "sha256:260f1dbc3a519453a9c856dedfe4beb4e50bd5a26d96386cb6c80856556bb91a"}, + {file = "Mako-1.3.5.tar.gz", hash = "sha256:48dbc20568c1d276a2698b36d968fa76161bf127194907ea6fc594fa81f943bc"}, +] + +[package.dependencies] +MarkupSafe = ">=0.9.2" + +[package.extras] +babel = ["Babel"] +lingua = ["lingua"] +testing = ["pytest"] + +[[package]] +name = "markupsafe" +version = "2.1.5" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.7" +files = [ + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, + {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, +] + +[[package]] +name = "mpmath" +version = "1.3.0" +description = "Python library for arbitrary-precision floating-point arithmetic" +optional = false +python-versions = "*" +files = [ + {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, + {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, +] + +[package.extras] +develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"] +docs = ["sphinx"] +gmpy = ["gmpy2 (>=2.1.0a4)"] +tests = ["pytest (>=4.6)"] + +[[package]] +name = "networkx" +version = "3.1" +description = "Python package for creating and manipulating graphs and networks" +optional = false +python-versions = ">=3.8" +files = [ + {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"}, + {file = "networkx-3.1.tar.gz", hash = "sha256:de346335408f84de0eada6ff9fafafff9bcda11f0a0dfaa931133debb146ab61"}, +] + +[package.extras] +default = ["matplotlib (>=3.4)", "numpy (>=1.20)", "pandas (>=1.3)", "scipy (>=1.8)"] +developer = ["mypy (>=1.1)", "pre-commit (>=3.2)"] +doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.13)", "sphinx (>=6.1)", "sphinx-gallery (>=0.12)", "texext (>=0.6.7)"] +extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.10)", "sympy (>=1.10)"] +test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] + +[[package]] +name = "numpy" +version = "1.24.4" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "numpy-1.24.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bfb52d2169d58c1cdb8cc1f16989101639b34c7d3ce60ed70b19c63eba0b64"}, + {file = "numpy-1.24.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ed094d4f0c177b1b8e7aa9cba7d6ceed51c0e569a5318ac0ca9a090680a6a1b1"}, + {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79fc682a374c4a8ed08b331bef9c5f582585d1048fa6d80bc6c35bc384eee9b4"}, + {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ffe43c74893dbf38c2b0a1f5428760a1a9c98285553c89e12d70a96a7f3a4d6"}, + {file = "numpy-1.24.4-cp310-cp310-win32.whl", hash = "sha256:4c21decb6ea94057331e111a5bed9a79d335658c27ce2adb580fb4d54f2ad9bc"}, + {file = "numpy-1.24.4-cp310-cp310-win_amd64.whl", hash = "sha256:b4bea75e47d9586d31e892a7401f76e909712a0fd510f58f5337bea9572c571e"}, + {file = "numpy-1.24.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f136bab9c2cfd8da131132c2cf6cc27331dd6fae65f95f69dcd4ae3c3639c810"}, + {file = "numpy-1.24.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2926dac25b313635e4d6cf4dc4e51c8c0ebfed60b801c799ffc4c32bf3d1254"}, + {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:222e40d0e2548690405b0b3c7b21d1169117391c2e82c378467ef9ab4c8f0da7"}, + {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7215847ce88a85ce39baf9e89070cb860c98fdddacbaa6c0da3ffb31b3350bd5"}, + {file = "numpy-1.24.4-cp311-cp311-win32.whl", hash = "sha256:4979217d7de511a8d57f4b4b5b2b965f707768440c17cb70fbf254c4b225238d"}, + {file = "numpy-1.24.4-cp311-cp311-win_amd64.whl", hash = "sha256:b7b1fc9864d7d39e28f41d089bfd6353cb5f27ecd9905348c24187a768c79694"}, + {file = "numpy-1.24.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1452241c290f3e2a312c137a9999cdbf63f78864d63c79039bda65ee86943f61"}, + {file = "numpy-1.24.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04640dab83f7c6c85abf9cd729c5b65f1ebd0ccf9de90b270cd61935eef0197f"}, + {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5425b114831d1e77e4b5d812b69d11d962e104095a5b9c3b641a218abcc050e"}, + {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd80e219fd4c71fc3699fc1dadac5dcf4fd882bfc6f7ec53d30fa197b8ee22dc"}, + {file = "numpy-1.24.4-cp38-cp38-win32.whl", hash = "sha256:4602244f345453db537be5314d3983dbf5834a9701b7723ec28923e2889e0bb2"}, + {file = "numpy-1.24.4-cp38-cp38-win_amd64.whl", hash = "sha256:692f2e0f55794943c5bfff12b3f56f99af76f902fc47487bdfe97856de51a706"}, + {file = "numpy-1.24.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2541312fbf09977f3b3ad449c4e5f4bb55d0dbf79226d7724211acc905049400"}, + {file = "numpy-1.24.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9667575fb6d13c95f1b36aca12c5ee3356bf001b714fc354eb5465ce1609e62f"}, + {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3a86ed21e4f87050382c7bc96571755193c4c1392490744ac73d660e8f564a9"}, + {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d11efb4dbecbdf22508d55e48d9c8384db795e1b7b51ea735289ff96613ff74d"}, + {file = "numpy-1.24.4-cp39-cp39-win32.whl", hash = "sha256:6620c0acd41dbcb368610bb2f4d83145674040025e5536954782467100aa8835"}, + {file = "numpy-1.24.4-cp39-cp39-win_amd64.whl", hash = "sha256:befe2bf740fd8373cf56149a5c23a0f601e82869598d41f8e188a0e9869926f8"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:31f13e25b4e304632a4619d0e0777662c2ffea99fcae2029556b17d8ff958aef"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95f7ac6540e95bc440ad77f56e520da5bf877f87dca58bd095288dce8940532a"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e98f220aa76ca2a977fe435f5b04d7b3470c0a2e6312907b37ba6068f26787f2"}, + {file = "numpy-1.24.4.tar.gz", hash = "sha256:80f5e3a4e498641401868df4208b74581206afbee7cf7b8329daae82676d9463"}, +] + +[[package]] +name = "optuna" +version = "3.6.1" +description = "A hyperparameter optimization framework" +optional = false +python-versions = ">=3.7" +files = [ + {file = "optuna-3.6.1-py3-none-any.whl", hash = "sha256:b32e0490bd6552790b70ec94de77dd2855057c9e229cd9f4da48fe8a31c7f1cc"}, + {file = "optuna-3.6.1.tar.gz", hash = "sha256:146e530b57b4b9afd7526b3e642fbe65491f7e292b405913355f8e438e361ecf"}, +] + +[package.dependencies] +alembic = ">=1.5.0" +colorlog = "*" +numpy = "*" +packaging = ">=20.0" +PyYAML = "*" +sqlalchemy = ">=1.3.0" +tqdm = "*" + +[package.extras] +benchmark = ["asv (>=0.5.0)", "botorch", "cma", "virtualenv"] +checking = ["black", "blackdoc", "flake8", "isort", "mypy", "mypy-boto3-s3", "types-PyYAML", "types-redis", "types-setuptools", "types-tqdm", "typing-extensions (>=3.10.0.0)"] +document = ["ase", "cmaes (>=0.10.0)", "fvcore", "lightgbm", "matplotlib (!=3.6.0)", "pandas", "pillow", "plotly (>=4.9.0)", "scikit-learn", "sphinx", "sphinx-copybutton", "sphinx-gallery", "sphinx-plotly-directive", "sphinx-rtd-theme (>=1.2.0)", "torch", "torchvision"] +optional = ["boto3", "cmaes (>=0.10.0)", "google-cloud-storage", "matplotlib (!=3.6.0)", "pandas", "plotly (>=4.9.0)", "redis", "scikit-learn (>=0.24.2)", "scipy", "torch"] +test = ["coverage", "fakeredis[lua]", "kaleido", "moto", "pytest", "scipy (>=1.9.2)", "torch"] + +[[package]] +name = "packaging" +version = "24.1" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, +] + +[[package]] +name = "pandas" +version = "1.5.3" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pandas-1.5.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3749077d86e3a2f0ed51367f30bf5b82e131cc0f14260c4d3e499186fccc4406"}, + {file = "pandas-1.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:972d8a45395f2a2d26733eb8d0f629b2f90bebe8e8eddbb8829b180c09639572"}, + {file = "pandas-1.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:50869a35cbb0f2e0cd5ec04b191e7b12ed688874bd05dd777c19b28cbea90996"}, + {file = "pandas-1.5.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3ac844a0fe00bfaeb2c9b51ab1424e5c8744f89860b138434a363b1f620f354"}, + {file = "pandas-1.5.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a0a56cef15fd1586726dace5616db75ebcfec9179a3a55e78f72c5639fa2a23"}, + {file = "pandas-1.5.3-cp310-cp310-win_amd64.whl", hash = "sha256:478ff646ca42b20376e4ed3fa2e8d7341e8a63105586efe54fa2508ee087f328"}, + {file = "pandas-1.5.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6973549c01ca91ec96199e940495219c887ea815b2083722821f1d7abfa2b4dc"}, + {file = "pandas-1.5.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c39a8da13cede5adcd3be1182883aea1c925476f4e84b2807a46e2775306305d"}, + {file = "pandas-1.5.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f76d097d12c82a535fda9dfe5e8dd4127952b45fea9b0276cb30cca5ea313fbc"}, + {file = "pandas-1.5.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e474390e60ed609cec869b0da796ad94f420bb057d86784191eefc62b65819ae"}, + {file = "pandas-1.5.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f2b952406a1588ad4cad5b3f55f520e82e902388a6d5a4a91baa8d38d23c7f6"}, + {file = "pandas-1.5.3-cp311-cp311-win_amd64.whl", hash = "sha256:bc4c368f42b551bf72fac35c5128963a171b40dce866fb066540eeaf46faa003"}, + {file = "pandas-1.5.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:14e45300521902689a81f3f41386dc86f19b8ba8dd5ac5a3c7010ef8d2932813"}, + {file = "pandas-1.5.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9842b6f4b8479e41968eced654487258ed81df7d1c9b7b870ceea24ed9459b31"}, + {file = "pandas-1.5.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:26d9c71772c7afb9d5046e6e9cf42d83dd147b5cf5bcb9d97252077118543792"}, + {file = "pandas-1.5.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fbcb19d6fceb9e946b3e23258757c7b225ba450990d9ed63ccceeb8cae609f7"}, + {file = "pandas-1.5.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:565fa34a5434d38e9d250af3c12ff931abaf88050551d9fbcdfafca50d62babf"}, + {file = "pandas-1.5.3-cp38-cp38-win32.whl", hash = "sha256:87bd9c03da1ac870a6d2c8902a0e1fd4267ca00f13bc494c9e5a9020920e1d51"}, + {file = "pandas-1.5.3-cp38-cp38-win_amd64.whl", hash = "sha256:41179ce559943d83a9b4bbacb736b04c928b095b5f25dd2b7389eda08f46f373"}, + {file = "pandas-1.5.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c74a62747864ed568f5a82a49a23a8d7fe171d0c69038b38cedf0976831296fa"}, + {file = "pandas-1.5.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c4c00e0b0597c8e4f59e8d461f797e5d70b4d025880516a8261b2817c47759ee"}, + {file = "pandas-1.5.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a50d9a4336a9621cab7b8eb3fb11adb82de58f9b91d84c2cd526576b881a0c5a"}, + {file = "pandas-1.5.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd05f7783b3274aa206a1af06f0ceed3f9b412cf665b7247eacd83be41cf7bf0"}, + {file = "pandas-1.5.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f69c4029613de47816b1bb30ff5ac778686688751a5e9c99ad8c7031f6508e5"}, + {file = "pandas-1.5.3-cp39-cp39-win32.whl", hash = "sha256:7cec0bee9f294e5de5bbfc14d0573f65526071029d036b753ee6507d2a21480a"}, + {file = "pandas-1.5.3-cp39-cp39-win_amd64.whl", hash = "sha256:dfd681c5dc216037e0b0a2c821f5ed99ba9f03ebcf119c7dac0e9a7b960b9ec9"}, + {file = "pandas-1.5.3.tar.gz", hash = "sha256:74a3fd7e5a7ec052f183273dc7b0acd3a863edf7520f5d3a1765c04ffdb3b0b1"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.20.3", markers = "python_version < \"3.10\""}, + {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, + {version = ">=1.21.0", markers = "python_version >= \"3.10\" and python_version < \"3.11\""}, +] +python-dateutil = ">=2.8.1" +pytz = ">=2020.1" + +[package.extras] +test = ["hypothesis (>=5.5.3)", "pytest (>=6.0)", "pytest-xdist (>=1.31)"] + +[[package]] +name = "patsy" +version = "0.5.6" +description = "A Python package for describing statistical models and for building design matrices." +optional = false +python-versions = "*" +files = [ + {file = "patsy-0.5.6-py2.py3-none-any.whl", hash = "sha256:19056886fd8fa71863fa32f0eb090267f21fb74be00f19f5c70b2e9d76c883c6"}, + {file = "patsy-0.5.6.tar.gz", hash = "sha256:95c6d47a7222535f84bff7f63d7303f2e297747a598db89cf5c67f0c0c7d2cdb"}, +] + +[package.dependencies] +numpy = ">=1.4" +six = "*" + +[package.extras] +test = ["pytest", "pytest-cov", "scipy"] + +[[package]] +name = "pmdarima" +version = "2.0.4" +description = "Python's forecast::auto.arima equivalent" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pmdarima-2.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8e6c16672e0f122e63ab1d7282a362c762783264a07636bbc9d4ae3d58ac7605"}, + {file = "pmdarima-2.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c9a839a477100331c47aa8a841c198ecb0b3fab4aff958622d31fec86d2aea76"}, + {file = "pmdarima-2.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ccd9e2186ba1ef45006f6c88dc9ecd6121ddb8914114bebcfa0d42899b40ced7"}, + {file = "pmdarima-2.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:165bdf787f5dafd5faab543d20524413b765d9cb6f020f0e1846551e7678414a"}, + {file = "pmdarima-2.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:cbde33812c37f441ba70d9e7b0479c758d56ad77a8dcbdead1fb8baad9aee806"}, + {file = "pmdarima-2.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8d961c99b445e53eadf6627c61bf800f403bd247b7ec2f62c6dfe8a0b1bcbf0b"}, + {file = "pmdarima-2.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:78e815c51074411bbe8433a912af4cc8dac394d9330cfedf57dd5ce08efe4a65"}, + {file = "pmdarima-2.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8022484256492bc425f0053b3a0346ab0a877f0f668664be738fe07f6b423c08"}, + {file = "pmdarima-2.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:46677b68efffde66aa1291799b39b91420961967fa2b6e041e26bb263782d38d"}, + {file = "pmdarima-2.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:bc594dd981bca5217b4448c96e82dbd60553b07263517a5cb0510b4bfe66ded1"}, + {file = "pmdarima-2.0.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:62a4ff308fbb5074a66c0877ba6b472d0fc406cbc0b5a2dba7e8fa800c9dd8ca"}, + {file = "pmdarima-2.0.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7e215ec6130b917843d26e0698637f976e4a6cc9dbd521bf44547668d08f058a"}, + {file = "pmdarima-2.0.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d837bc00266a234d9292914f19a67e04ca8361d9c309b70fc8f16c1ed863551"}, + {file = "pmdarima-2.0.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8790bff665a5ebaa36ddbbfd0e8ea51ad2e5809270dc74d462d5c2498b26220f"}, + {file = "pmdarima-2.0.4-cp312-cp312-win_amd64.whl", hash = "sha256:a8bf7913bdbd0e286489b2111080a0f51f5d9d3ee5e05b7011a691207047ddcf"}, + {file = "pmdarima-2.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5bce63dca165948a2311eff0c200e57370829855fce6e7d3fe930497f2f9fc04"}, + {file = "pmdarima-2.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c1ed01cbb399d9cdbe3f12f2ef505a144826769ec3ad75f6075cadbe8447a13"}, + {file = "pmdarima-2.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:73060d28ffabeae7374ab48ba7882b29ae6998de3e3e34f5484c64e0baefda0d"}, + {file = "pmdarima-2.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:776e21e076393c6fe86895ffe71e6769c9b3fe0dffb92d6f6558657580cf0a42"}, + {file = "pmdarima-2.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ec865e8fb07378c470f3e41a77d6705875a674cefbe6c0185e9bc070e642da5c"}, + {file = "pmdarima-2.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:670ca1f93ed4f8239e7fbd7d1dd156108899e15e8fb0717b2b3fa605fa6ace35"}, + {file = "pmdarima-2.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab1ee166f511d2497d6b357bf0cac84326efff25349eb777aea5b030ed6bf8bb"}, + {file = "pmdarima-2.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d9ead43510adfe3c0d4000e37427bfcf11ba6cc3b26091368a847c5da010e052"}, + {file = "pmdarima-2.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:c1213f3fa1e3ced9796f4092f9bd4be1881205f77bc2f5a1494695134a92000e"}, + {file = "pmdarima-2.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6e0e3c90e7a91b44599f08cd9c62880466860e76bd1b0ca2d2ff8e72834a1a7f"}, + {file = "pmdarima-2.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8fe612384e4989f010dacdefbff874231f5b7351dfb84bcbfe9ed8d718fc4115"}, + {file = "pmdarima-2.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db842d97905b171c867aae1a492b8c958ec1bae987c3ee41c561a06d99a19efd"}, + {file = "pmdarima-2.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a03936d681c720b0e44565d099de2619b762cb9d443f3043de9a78888aff6bb2"}, + {file = "pmdarima-2.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:13ba7061d4e9d48f21e1c393bfaa3d6b31f60a8c97ddbe8d455fa675c594f9e4"}, + {file = "pmdarima-2.0.4.tar.gz", hash = "sha256:b87f9d9f5b7dc2ddbd053687c2264e26ac98fd4118e843c7e9bc3dd7343e5c1a"}, +] + +[package.dependencies] +Cython = ">=0.29,<0.29.18 || >0.29.18,<0.29.31 || >0.29.31" +joblib = ">=0.11" +numpy = ">=1.21.2" +packaging = ">=17.1" +pandas = ">=0.19" +scikit-learn = ">=0.22" +scipy = ">=1.3.2" +setuptools = ">=38.6.0,<50.0.0 || >50.0.0" +statsmodels = ">=0.13.2" +urllib3 = "*" + +[[package]] +name = "psutil" +version = "5.9.8" +description = "Cross-platform lib for process and system monitoring in Python." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +files = [ + {file = "psutil-5.9.8-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:26bd09967ae00920df88e0352a91cff1a78f8d69b3ecabbfe733610c0af486c8"}, + {file = "psutil-5.9.8-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:05806de88103b25903dff19bb6692bd2e714ccf9e668d050d144012055cbca73"}, + {file = "psutil-5.9.8-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:611052c4bc70432ec770d5d54f64206aa7203a101ec273a0cd82418c86503bb7"}, + {file = "psutil-5.9.8-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:50187900d73c1381ba1454cf40308c2bf6f34268518b3f36a9b663ca87e65e36"}, + {file = "psutil-5.9.8-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:02615ed8c5ea222323408ceba16c60e99c3f91639b07da6373fb7e6539abc56d"}, + {file = "psutil-5.9.8-cp27-none-win32.whl", hash = "sha256:36f435891adb138ed3c9e58c6af3e2e6ca9ac2f365efe1f9cfef2794e6c93b4e"}, + {file = "psutil-5.9.8-cp27-none-win_amd64.whl", hash = "sha256:bd1184ceb3f87651a67b2708d4c3338e9b10c5df903f2e3776b62303b26cb631"}, + {file = "psutil-5.9.8-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:aee678c8720623dc456fa20659af736241f575d79429a0e5e9cf88ae0605cc81"}, + {file = "psutil-5.9.8-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cb6403ce6d8e047495a701dc7c5bd788add903f8986d523e3e20b98b733e421"}, + {file = "psutil-5.9.8-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d06016f7f8625a1825ba3732081d77c94589dca78b7a3fc072194851e88461a4"}, + {file = "psutil-5.9.8-cp36-cp36m-win32.whl", hash = "sha256:7d79560ad97af658a0f6adfef8b834b53f64746d45b403f225b85c5c2c140eee"}, + {file = "psutil-5.9.8-cp36-cp36m-win_amd64.whl", hash = "sha256:27cc40c3493bb10de1be4b3f07cae4c010ce715290a5be22b98493509c6299e2"}, + {file = "psutil-5.9.8-cp37-abi3-win32.whl", hash = "sha256:bc56c2a1b0d15aa3eaa5a60c9f3f8e3e565303b465dbf57a1b730e7a2b9844e0"}, + {file = "psutil-5.9.8-cp37-abi3-win_amd64.whl", hash = "sha256:8db4c1b57507eef143a15a6884ca10f7c73876cdf5d51e713151c1236a0e68cf"}, + {file = "psutil-5.9.8-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:d16bbddf0693323b8c6123dd804100241da461e41d6e332fb0ba6058f630f8c8"}, + {file = "psutil-5.9.8.tar.gz", hash = "sha256:6be126e3225486dff286a8fb9a06246a5253f4c7c53b475ea5f5ac934e64194c"}, +] + +[package.extras] +test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] + +[[package]] +name = "pylru" +version = "1.2.1" +description = "A least recently used (LRU) cache implementation" +optional = false +python-versions = "*" +files = [ + {file = "pylru-1.2.1-py3-none-any.whl", hash = "sha256:b7c75b0676e2fbae647823bc209e23998772867d3679f1583c7350a9b02a59f0"}, + {file = "pylru-1.2.1.tar.gz", hash = "sha256:47ad140a63ab9389648dadfbb4330700e0ffeeb28ec04664ee47d37ed133b0f4"}, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "pytz" +version = "2024.1" +description = "World timezone definitions, modern and historical" +optional = false +python-versions = "*" +files = [ + {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"}, + {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "scikit-base" +version = "0.6.2" +description = "Base classes for sklearn-like parametric objects" +optional = false +python-versions = "<3.13,>=3.8" +files = [ + {file = "scikit-base-0.6.2.tar.gz", hash = "sha256:ac7c1dd9b1006e1e466d8269074f7fb02b7f5143c615f7fdf6a1e0c7565431fe"}, + {file = "scikit_base-0.6.2-py3-none-any.whl", hash = "sha256:91af9fdc6f8e35cbb1a89436f127681a64957645be0039a38d343381abef9154"}, +] + +[package.extras] +all-extras = ["numpy", "pandas"] +binder = ["jupyter"] +dev = ["pre-commit", "pytest", "pytest-cov", "scikit-learn (>=0.24.0)"] +docs = ["Sphinx (!=7.2.0,<8.0.0)", "jupyter", "myst-parser", "nbsphinx (>=0.8.6)", "numpydoc", "pydata-sphinx-theme", "sphinx-design (<0.6.0)", "sphinx-gallery (<0.16.0)", "sphinx-issues (<4.0.0)", "sphinx-panels", "tabulate"] +linters = ["black", "doc8", "flake8", "flake8-bugbear", "flake8-builtins", "flake8-comprehensions", "flake8-print", "flake8-quotes", "isort", "mypy", "nbqa", "pandas-vet", "pep8-naming", "pydocstyle"] +test = ["coverage", "numpy", "pandas", "pytest", "pytest-cov", "safety", "scikit-learn (>=0.24.0)", "scipy"] + +[[package]] +name = "scikit-learn" +version = "1.3.2" +description = "A set of python modules for machine learning and data mining" +optional = false +python-versions = ">=3.8" +files = [ + {file = "scikit-learn-1.3.2.tar.gz", hash = "sha256:a2f54c76accc15a34bfb9066e6c7a56c1e7235dda5762b990792330b52ccfb05"}, + {file = "scikit_learn-1.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e326c0eb5cf4d6ba40f93776a20e9a7a69524c4db0757e7ce24ba222471ee8a1"}, + {file = "scikit_learn-1.3.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:535805c2a01ccb40ca4ab7d081d771aea67e535153e35a1fd99418fcedd1648a"}, + {file = "scikit_learn-1.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1215e5e58e9880b554b01187b8c9390bf4dc4692eedeaf542d3273f4785e342c"}, + {file = "scikit_learn-1.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ee107923a623b9f517754ea2f69ea3b62fc898a3641766cb7deb2f2ce450161"}, + {file = "scikit_learn-1.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:35a22e8015048c628ad099da9df5ab3004cdbf81edc75b396fd0cff8699ac58c"}, + {file = "scikit_learn-1.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6fb6bc98f234fda43163ddbe36df8bcde1d13ee176c6dc9b92bb7d3fc842eb66"}, + {file = "scikit_learn-1.3.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:18424efee518a1cde7b0b53a422cde2f6625197de6af36da0b57ec502f126157"}, + {file = "scikit_learn-1.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3271552a5eb16f208a6f7f617b8cc6d1f137b52c8a1ef8edf547db0259b2c9fb"}, + {file = "scikit_learn-1.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4144a5004a676d5022b798d9e573b05139e77f271253a4703eed295bde0433"}, + {file = "scikit_learn-1.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:67f37d708f042a9b8d59551cf94d30431e01374e00dc2645fa186059c6c5d78b"}, + {file = "scikit_learn-1.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8db94cd8a2e038b37a80a04df8783e09caac77cbe052146432e67800e430c028"}, + {file = "scikit_learn-1.3.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:61a6efd384258789aa89415a410dcdb39a50e19d3d8410bd29be365bcdd512d5"}, + {file = "scikit_learn-1.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb06f8dce3f5ddc5dee1715a9b9f19f20d295bed8e3cd4fa51e1d050347de525"}, + {file = "scikit_learn-1.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b2de18d86f630d68fe1f87af690d451388bb186480afc719e5f770590c2ef6c"}, + {file = "scikit_learn-1.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:0402638c9a7c219ee52c94cbebc8fcb5eb9fe9c773717965c1f4185588ad3107"}, + {file = "scikit_learn-1.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a19f90f95ba93c1a7f7924906d0576a84da7f3b2282ac3bfb7a08a32801add93"}, + {file = "scikit_learn-1.3.2-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:b8692e395a03a60cd927125eef3a8e3424d86dde9b2370d544f0ea35f78a8073"}, + {file = "scikit_learn-1.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15e1e94cc23d04d39da797ee34236ce2375ddea158b10bee3c343647d615581d"}, + {file = "scikit_learn-1.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:785a2213086b7b1abf037aeadbbd6d67159feb3e30263434139c98425e3dcfcf"}, + {file = "scikit_learn-1.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:64381066f8aa63c2710e6b56edc9f0894cc7bf59bd71b8ce5613a4559b6145e0"}, + {file = "scikit_learn-1.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6c43290337f7a4b969d207e620658372ba3c1ffb611f8bc2b6f031dc5c6d1d03"}, + {file = "scikit_learn-1.3.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:dc9002fc200bed597d5d34e90c752b74df516d592db162f756cc52836b38fe0e"}, + {file = "scikit_learn-1.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d08ada33e955c54355d909b9c06a4789a729977f165b8bae6f225ff0a60ec4a"}, + {file = "scikit_learn-1.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:763f0ae4b79b0ff9cca0bf3716bcc9915bdacff3cebea15ec79652d1cc4fa5c9"}, + {file = "scikit_learn-1.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:ed932ea780517b00dae7431e031faae6b49b20eb6950918eb83bd043237950e0"}, +] + +[package.dependencies] +joblib = ">=1.1.1" +numpy = ">=1.17.3,<2.0" +scipy = ">=1.5.0" +threadpoolctl = ">=2.0.0" + +[package.extras] +benchmark = ["matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "pandas (>=1.0.5)"] +docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)", "sphinx (>=6.0.0)", "sphinx-copybutton (>=0.5.2)", "sphinx-gallery (>=0.10.1)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"] +examples = ["matplotlib (>=3.1.3)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)"] +tests = ["black (>=23.3.0)", "matplotlib (>=3.1.3)", "mypy (>=1.3)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.0.272)", "scikit-image (>=0.16.2)"] + +[[package]] +name = "scipy" +version = "1.9.3" +description = "Fundamental algorithms for scientific computing in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "scipy-1.9.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1884b66a54887e21addf9c16fb588720a8309a57b2e258ae1c7986d4444d3bc0"}, + {file = "scipy-1.9.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:83b89e9586c62e787f5012e8475fbb12185bafb996a03257e9675cd73d3736dd"}, + {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a72d885fa44247f92743fc20732ae55564ff2a519e8302fb7e18717c5355a8b"}, + {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d01e1dd7b15bd2449c8bfc6b7cc67d630700ed655654f0dfcf121600bad205c9"}, + {file = "scipy-1.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:68239b6aa6f9c593da8be1509a05cb7f9efe98b80f43a5861cd24c7557e98523"}, + {file = "scipy-1.9.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b41bc822679ad1c9a5f023bc93f6d0543129ca0f37c1ce294dd9d386f0a21096"}, + {file = "scipy-1.9.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:90453d2b93ea82a9f434e4e1cba043e779ff67b92f7a0e85d05d286a3625df3c"}, + {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83c06e62a390a9167da60bedd4575a14c1f58ca9dfde59830fc42e5197283dab"}, + {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abaf921531b5aeaafced90157db505e10345e45038c39e5d9b6c7922d68085cb"}, + {file = "scipy-1.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:06d2e1b4c491dc7d8eacea139a1b0b295f74e1a1a0f704c375028f8320d16e31"}, + {file = "scipy-1.9.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5a04cd7d0d3eff6ea4719371cbc44df31411862b9646db617c99718ff68d4840"}, + {file = "scipy-1.9.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:545c83ffb518094d8c9d83cce216c0c32f8c04aaf28b92cc8283eda0685162d5"}, + {file = "scipy-1.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d54222d7a3ba6022fdf5773931b5d7c56efe41ede7f7128c7b1637700409108"}, + {file = "scipy-1.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cff3a5295234037e39500d35316a4c5794739433528310e117b8a9a0c76d20fc"}, + {file = "scipy-1.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:2318bef588acc7a574f5bfdff9c172d0b1bf2c8143d9582e05f878e580a3781e"}, + {file = "scipy-1.9.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d644a64e174c16cb4b2e41dfea6af722053e83d066da7343f333a54dae9bc31c"}, + {file = "scipy-1.9.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:da8245491d73ed0a994ed9c2e380fd058ce2fa8a18da204681f2fe1f57f98f95"}, + {file = "scipy-1.9.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4db5b30849606a95dcf519763dd3ab6fe9bd91df49eba517359e450a7d80ce2e"}, + {file = "scipy-1.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c68db6b290cbd4049012990d7fe71a2abd9ffbe82c0056ebe0f01df8be5436b0"}, + {file = "scipy-1.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:5b88e6d91ad9d59478fafe92a7c757d00c59e3bdc3331be8ada76a4f8d683f58"}, + {file = "scipy-1.9.3.tar.gz", hash = "sha256:fbc5c05c85c1a02be77b1ff591087c83bc44579c6d2bd9fb798bb64ea5e1a027"}, +] + +[package.dependencies] +numpy = ">=1.18.5,<1.26.0" + +[package.extras] +dev = ["flake8", "mypy", "pycodestyle", "typing_extensions"] +doc = ["matplotlib (>2)", "numpydoc", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-panels (>=0.5.2)", "sphinx-tabs"] +test = ["asv", "gmpy2", "mpmath", "pytest", "pytest-cov", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + +[[package]] +name = "setuptools" +version = "74.0.0" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "setuptools-74.0.0-py3-none-any.whl", hash = "sha256:0274581a0037b638b9fc1c6883cc71c0210865aaa76073f7882376b641b84e8f"}, + {file = "setuptools-74.0.0.tar.gz", hash = "sha256:a85e96b8be2b906f3e3e789adec6a9323abf79758ecfa3065bd740d81158b11e"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.5.2)"] +core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.text (>=3.7)", "more-itertools (>=8.8)", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib-metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.11.*)", "pytest-mypy"] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "sktime" +version = "0.24.2" +description = "A unified framework for machine learning with time series" +optional = false +python-versions = "<3.13,>=3.8" +files = [ + {file = "sktime-0.24.2-py3-none-any.whl", hash = "sha256:4c47d08fc11eddb77c8f5ca64509802a4972b33e1518aa801c4501472100012d"}, + {file = "sktime-0.24.2.tar.gz", hash = "sha256:587450908035482b8e5bba7344424706e200496c951d8baf4835148333f74168"}, +] + +[package.dependencies] +numpy = ">=1.21,<1.27" +packaging = "*" +pandas = ">=1.1,<2.2.0" +scikit-base = "<0.7.0" +scikit-learn = ">=0.24,<1.4.0" +scipy = ">=1.2,<2.0.0" + +[package.extras] +alignment = ["dtw-python (>=1.3,<1.4)", "numba (>=0.53,<0.59)"] +all-extras = ["arch (>=5.6,<6.3.0)", "cloudpickle", "dash (!=2.9.0)", "dask", "dtw-python", "esig (==0.9.7)", "filterpy (>=1.4.5)", "gluonts (>=0.9)", "h5py", "hmmlearn (>=0.2.7)", "holidays", "keras-self-attention", "kotsu (>=0.3.1)", "matplotlib (>=3.3.2)", "mne", "numba (>=0.53,<0.59)", "pmdarima (>=1.8,!=1.8.1,<3.0.0)", "prophet (>=1.1)", "pycatch22 (<0.4.4)", "pykalman-bardo (>=0.9.7,<0.10)", "pyod (>=0.8)", "scikit-optimize", "scikit-posthocs (>=0.6.5)", "seaborn (>=0.11)", "seasonal", "skpro (>=2,<2.2.0)", "statsforecast (>=0.5.2,<1.7.0)", "statsmodels (>=0.12.1)", "stumpy (>=1.5.1)", "tbats (>=1.1)", "tensorflow", "tsfresh (>=0.17)", "tslearn (>=0.5.2,<0.6.0)", "xarray"] +all-extras-pandas2 = ["arch (>=5.6,<6.3.0)", "cloudpickle", "dash (!=2.9.0)", "dask (<2023.12.2)", "dtw-python", "esig (==0.9.7)", "filterpy (>=1.4.5)", "gluonts (>=0.9)", "h5py", "hmmlearn (>=0.2.7)", "holidays", "keras-self-attention", "kotsu (>=0.3.1)", "matplotlib (>=3.3.2)", "mne", "numba (>=0.53,<0.59)", "pmdarima (>=1.8,!=1.8.1,<3.0.0)", "prophet (>=1.1)", "pycatch22 (<0.4.4)", "pykalman-bardo (>=0.9.7,<0.10)", "pyod (>=0.8)", "scikit-posthocs (>=0.6.5)", "seaborn (>=0.11)", "seasonal", "skpro (>=2,<2.2.0)", "statsforecast (>=0.5.2,<1.7.0)", "statsmodels (>=0.12.1)", "stumpy (>=1.5.1)", "tbats (>=1.1)", "tensorflow", "tsfresh (>=0.17)", "tslearn (>=0.5.2,<0.6.0)", "xarray"] +annotation = ["hmmlearn (>=0.2.7,<0.4)", "numba (>=0.53,<0.59)", "pyod (>=0.8,<1.2)"] +binder = ["jupyter", "pandas (<2.0.0)"] +classification = ["esig (>=0.9.7,<0.10)", "numba (>=0.53,<0.59)", "tensorflow (>=2,<=2.14)", "tsfresh (>=0.17,<0.21)"] +clustering = ["numba (>=0.53,<0.59)", "tslearn (>=0.5.2,<0.6.3)"] +cython-extras = ["mrseql", "mrsqm", "numba (<0.59)"] +dev = ["backoff", "httpx", "pre-commit", "pytest", "pytest-cov", "pytest-randomly", "pytest-timeout", "pytest-xdist", "wheel"] +dl = ["tensorflow (>=2,<=2.14)", "torch"] +docs = ["Sphinx (!=7.2.0,<8.0.0)", "jupyter", "myst-parser", "nbsphinx (>=0.8.6)", "numpydoc", "pydata-sphinx-theme", "sphinx-copybutton", "sphinx-design (<0.6.0)", "sphinx-gallery (<0.16.0)", "sphinx-issues (<4.0.0)", "tabulate"] +forecasting = ["arch (>=5.6,<6.3)", "pmdarima (>=1.8,!=1.8.1,<2.1)", "prophet (>=1.1,<1.2)", "skpro (>=2,<2.2)", "statsforecast (>=0.5.2,<1.7)", "statsmodels (>=0.12.1,<0.15)", "tbats (>=1.1,<1.2)"] +mlflow = ["mlflow"] +mlflow-tests = ["boto3", "botocore", "mlflow", "moto"] +networks = ["keras-self-attention (>=0.51,<0.52)", "tensorflow (>=2,<=2.14)"] +pandas1 = ["pandas (<2.0.0)"] +param-est = ["seasonal (>=0.3.1,<0.4)", "statsmodels (>=0.12.1,<0.15)"] +regression = ["numba (>=0.53,<0.59)", "tensorflow (>=2,<=2.14)"] +tests = ["pytest (>=7.4,<7.5)", "pytest-cov (>=4.1,<4.2)", "pytest-randomly (>=3.15,<3.16)", "pytest-timeout (>=2.1,<2.3)", "pytest-xdist (>=3.3,<3.6)"] +transformations = ["esig (>=0.9.7,<0.10)", "filterpy (>=1.4.5,<1.5)", "holidays (>=0.29,<0.40)", "mne (>=1.5,<1.6)", "numba (>=0.53,<0.59)", "pycatch22 (>=0.4,<0.4.5)", "pykalman-bardo (>=0.9.7,<0.10)", "statsmodels (>=0.12.1,<0.15)", "stumpy (>=1.5.1,<1.13)", "tsfresh (>=0.17,<0.21)"] + +[[package]] +name = "sqlalchemy" +version = "2.0.32" +description = "Database Abstraction Library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "SQLAlchemy-2.0.32-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0c9045ecc2e4db59bfc97b20516dfdf8e41d910ac6fb667ebd3a79ea54084619"}, + {file = "SQLAlchemy-2.0.32-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1467940318e4a860afd546ef61fefb98a14d935cd6817ed07a228c7f7c62f389"}, + {file = "SQLAlchemy-2.0.32-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5954463675cb15db8d4b521f3566a017c8789222b8316b1e6934c811018ee08b"}, + {file = "SQLAlchemy-2.0.32-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:167e7497035c303ae50651b351c28dc22a40bb98fbdb8468cdc971821b1ae533"}, + {file = "SQLAlchemy-2.0.32-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b27dfb676ac02529fb6e343b3a482303f16e6bc3a4d868b73935b8792edb52d0"}, + {file = "SQLAlchemy-2.0.32-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bf2360a5e0f7bd75fa80431bf8ebcfb920c9f885e7956c7efde89031695cafb8"}, + {file = "SQLAlchemy-2.0.32-cp310-cp310-win32.whl", hash = "sha256:306fe44e754a91cd9d600a6b070c1f2fadbb4a1a257b8781ccf33c7067fd3e4d"}, + {file = "SQLAlchemy-2.0.32-cp310-cp310-win_amd64.whl", hash = "sha256:99db65e6f3ab42e06c318f15c98f59a436f1c78179e6a6f40f529c8cc7100b22"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:21b053be28a8a414f2ddd401f1be8361e41032d2ef5884b2f31d31cb723e559f"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b178e875a7a25b5938b53b006598ee7645172fccafe1c291a706e93f48499ff5"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723a40ee2cc7ea653645bd4cf024326dea2076673fc9d3d33f20f6c81db83e1d"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:295ff8689544f7ee7e819529633d058bd458c1fd7f7e3eebd0f9268ebc56c2a0"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:49496b68cd190a147118af585173ee624114dfb2e0297558c460ad7495f9dfe2"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:acd9b73c5c15f0ec5ce18128b1fe9157ddd0044abc373e6ecd5ba376a7e5d961"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-win32.whl", hash = "sha256:9365a3da32dabd3e69e06b972b1ffb0c89668994c7e8e75ce21d3e5e69ddef28"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-win_amd64.whl", hash = "sha256:8bd63d051f4f313b102a2af1cbc8b80f061bf78f3d5bd0843ff70b5859e27924"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6bab3db192a0c35e3c9d1560eb8332463e29e5507dbd822e29a0a3c48c0a8d92"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:19d98f4f58b13900d8dec4ed09dd09ef292208ee44cc9c2fe01c1f0a2fe440e9"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cd33c61513cb1b7371fd40cf221256456d26a56284e7d19d1f0b9f1eb7dd7e8"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d6ba0497c1d066dd004e0f02a92426ca2df20fac08728d03f67f6960271feec"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2b6be53e4fde0065524f1a0a7929b10e9280987b320716c1509478b712a7688c"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:916a798f62f410c0b80b63683c8061f5ebe237b0f4ad778739304253353bc1cb"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-win32.whl", hash = "sha256:31983018b74908ebc6c996a16ad3690301a23befb643093fcfe85efd292e384d"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-win_amd64.whl", hash = "sha256:4363ed245a6231f2e2957cccdda3c776265a75851f4753c60f3004b90e69bfeb"}, + {file = "SQLAlchemy-2.0.32-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b8afd5b26570bf41c35c0121801479958b4446751a3971fb9a480c1afd85558e"}, + {file = "SQLAlchemy-2.0.32-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c750987fc876813f27b60d619b987b057eb4896b81117f73bb8d9918c14f1cad"}, + {file = "SQLAlchemy-2.0.32-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ada0102afff4890f651ed91120c1120065663506b760da4e7823913ebd3258be"}, + {file = "SQLAlchemy-2.0.32-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:78c03d0f8a5ab4f3034c0e8482cfcc415a3ec6193491cfa1c643ed707d476f16"}, + {file = "SQLAlchemy-2.0.32-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:3bd1cae7519283ff525e64645ebd7a3e0283f3c038f461ecc1c7b040a0c932a1"}, + {file = "SQLAlchemy-2.0.32-cp37-cp37m-win32.whl", hash = "sha256:01438ebcdc566d58c93af0171c74ec28efe6a29184b773e378a385e6215389da"}, + {file = "SQLAlchemy-2.0.32-cp37-cp37m-win_amd64.whl", hash = "sha256:4979dc80fbbc9d2ef569e71e0896990bc94df2b9fdbd878290bd129b65ab579c"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c742be912f57586ac43af38b3848f7688863a403dfb220193a882ea60e1ec3a"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:62e23d0ac103bcf1c5555b6c88c114089587bc64d048fef5bbdb58dfd26f96da"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:251f0d1108aab8ea7b9aadbd07fb47fb8e3a5838dde34aa95a3349876b5a1f1d"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ef18a84e5116340e38eca3e7f9eeaaef62738891422e7c2a0b80feab165905f"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3eb6a97a1d39976f360b10ff208c73afb6a4de86dd2a6212ddf65c4a6a2347d5"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0c1c9b673d21477cec17ab10bc4decb1322843ba35b481585facd88203754fc5"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-win32.whl", hash = "sha256:c41a2b9ca80ee555decc605bd3c4520cc6fef9abde8fd66b1cf65126a6922d65"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-win_amd64.whl", hash = "sha256:8a37e4d265033c897892279e8adf505c8b6b4075f2b40d77afb31f7185cd6ecd"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:52fec964fba2ef46476312a03ec8c425956b05c20220a1a03703537824b5e8e1"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:328429aecaba2aee3d71e11f2477c14eec5990fb6d0e884107935f7fb6001632"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85a01b5599e790e76ac3fe3aa2f26e1feba56270023d6afd5550ed63c68552b3"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aaf04784797dcdf4c0aa952c8d234fa01974c4729db55c45732520ce12dd95b4"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4488120becf9b71b3ac718f4138269a6be99a42fe023ec457896ba4f80749525"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:14e09e083a5796d513918a66f3d6aedbc131e39e80875afe81d98a03312889e6"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-win32.whl", hash = "sha256:0d322cc9c9b2154ba7e82f7bf25ecc7c36fbe2d82e2933b3642fc095a52cfc78"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-win_amd64.whl", hash = "sha256:7dd8583df2f98dea28b5cd53a1beac963f4f9d087888d75f22fcc93a07cf8d84"}, + {file = "SQLAlchemy-2.0.32-py3-none-any.whl", hash = "sha256:e567a8793a692451f706b363ccf3c45e056b67d90ead58c3bc9471af5d212202"}, + {file = "SQLAlchemy-2.0.32.tar.gz", hash = "sha256:c1b88cc8b02b6a5f0efb0345a03672d4c897dc7d92585176f88c67346f565ea8"}, +] + +[package.dependencies] +greenlet = {version = "!=0.4.17", markers = "python_version < \"3.13\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} +typing-extensions = ">=4.6.0" + +[package.extras] +aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"] +aioodbc = ["aioodbc", "greenlet (!=0.4.17)"] +aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"] +asyncio = ["greenlet (!=0.4.17)"] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"] +mssql = ["pyodbc"] +mssql-pymssql = ["pymssql"] +mssql-pyodbc = ["pyodbc"] +mypy = ["mypy (>=0.910)"] +mysql = ["mysqlclient (>=1.4.0)"] +mysql-connector = ["mysql-connector-python"] +oracle = ["cx_oracle (>=8)"] +oracle-oracledb = ["oracledb (>=1.0.1)"] +postgresql = ["psycopg2 (>=2.7)"] +postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] +postgresql-pg8000 = ["pg8000 (>=1.29.1)"] +postgresql-psycopg = ["psycopg (>=3.0.7)"] +postgresql-psycopg2binary = ["psycopg2-binary"] +postgresql-psycopg2cffi = ["psycopg2cffi"] +postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] +pymysql = ["pymysql"] +sqlcipher = ["sqlcipher3_binary"] + +[[package]] +name = "statsmodels" +version = "0.14.1" +description = "Statistical computations and models for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "statsmodels-0.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:43af9c0b07c9d72f275cf14ea54a481a3f20911f0b443181be4769def258fdeb"}, + {file = "statsmodels-0.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a16975ab6ad505d837ba9aee11f92a8c5b49c4fa1ff45b60fe23780b19e5705e"}, + {file = "statsmodels-0.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e278fe74da5ed5e06c11a30851eda1af08ef5af6be8507c2c45d2e08f7550dde"}, + {file = "statsmodels-0.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0564d92cb05b219b4538ed09e77d96658a924a691255e1f7dd23ee338df441b"}, + {file = "statsmodels-0.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5385e22e72159a09c099c4fb975f350a9f3afeb57c1efce273b89dcf1fe44c0f"}, + {file = "statsmodels-0.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:0a8aae75a2e08ebd990e5fa394f8e32738b55785cb70798449a3f4207085e667"}, + {file = "statsmodels-0.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b69a63ad6c979a6e4cde11870ffa727c76a318c225a7e509f031fbbdfb4e416a"}, + {file = "statsmodels-0.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7562cb18a90a114f39fab6f1c25b9c7b39d9cd5f433d0044b430ca9d44a8b52c"}, + {file = "statsmodels-0.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3abaca4b963259a2bf349c7609cfbb0ce64ad5fb3d92d6f08e21453e4890248"}, + {file = "statsmodels-0.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0f727fe697f6406d5f677b67211abe5a55101896abdfacdb3f38410405f6ad8"}, + {file = "statsmodels-0.14.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b6838ac6bdb286daabb5e91af90fd4258f09d0cec9aace78cc441cb2b17df428"}, + {file = "statsmodels-0.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:709bfcef2dbe66f705b17e56d1021abad02243ee1a5d1efdb90f9bad8b06a329"}, + {file = "statsmodels-0.14.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f32a7cd424cf33304a54daee39d32cccf1d0265e652c920adeaeedff6d576457"}, + {file = "statsmodels-0.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f8c30181c084173d662aaf0531867667be2ff1bee103b84feb64f149f792dbd2"}, + {file = "statsmodels-0.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2de2b97413913d52ad6342dece2d653e77f78620013b7705fad291d4e4266ccb"}, + {file = "statsmodels-0.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3420f88289c593ba2bca33619023059c476674c160733bd7d858564787c83d3"}, + {file = "statsmodels-0.14.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c008e16096f24f0514e53907890ccac6589a16ad6c81c218f2ee6752fdada555"}, + {file = "statsmodels-0.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:bc0351d279c4e080f0ce638a3d886d312aa29eade96042e3ba0a73771b1abdfb"}, + {file = "statsmodels-0.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bf293ada63b2859d95210165ad1dfcd97bd7b994a5266d6fbeb23659d8f0bf68"}, + {file = "statsmodels-0.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44ca8cb88fa3d3a4ffaff1fb8eb0e98bbf83fc936fcd9b9eedee258ecc76696a"}, + {file = "statsmodels-0.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d5373d176239993c095b00d06036690a50309a4e00c2da553b65b840f956ae6"}, + {file = "statsmodels-0.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a532dfe899f8b6632cd8caa0b089b403415618f51e840d1817a1e4b97e200c73"}, + {file = "statsmodels-0.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:4fe0a60695952b82139ae8750952786a700292f9e0551d572d7685070944487b"}, + {file = "statsmodels-0.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:04293890f153ffe577e60a227bd43babd5f6c1fc50ea56a3ab1862ae85247a95"}, + {file = "statsmodels-0.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e70a2e93d54d40b2cb6426072acbc04f35501b1ea2569f6786964adde6ca572"}, + {file = "statsmodels-0.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab3a73d16c0569adbba181ebb967e5baaa74935f6d2efe86ac6fc5857449b07d"}, + {file = "statsmodels-0.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eefa5bcff335440ee93e28745eab63559a20cd34eea0375c66d96b016de909b3"}, + {file = "statsmodels-0.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:bc43765710099ca6a942b5ffa1bac7668965052542ba793dd072d26c83453572"}, + {file = "statsmodels-0.14.1.tar.gz", hash = "sha256:2260efdc1ef89f39c670a0bd8151b1d0843567781bcafec6cda0534eb47a94f6"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.18,<2", markers = "python_version != \"3.10\" or platform_system != \"Windows\" or platform_python_implementation == \"PyPy\""}, + {version = ">=1.22.3,<2", markers = "python_version == \"3.10\" and platform_system == \"Windows\" and platform_python_implementation != \"PyPy\""}, +] +packaging = ">=21.3" +pandas = ">=1.0,<2.1.0 || >2.1.0" +patsy = ">=0.5.4" +scipy = ">=1.4,<1.9.2 || >1.9.2" + +[package.extras] +build = ["cython (>=0.29.33)"] +develop = ["colorama", "cython (>=0.29.33)", "cython (>=0.29.33,<4.0.0)", "flake8", "isort", "joblib", "matplotlib (>=3)", "oldest-supported-numpy (>=2022.4.18)", "pytest (>=7.3.0)", "pytest-cov", "pytest-randomly", "pytest-xdist", "pywinpty", "setuptools-scm[toml] (>=8.0,<9.0)"] +docs = ["ipykernel", "jupyter-client", "matplotlib", "nbconvert", "nbformat", "numpydoc", "pandas-datareader", "sphinx"] + +[[package]] +name = "sympy" +version = "1.13.2" +description = "Computer algebra system (CAS) in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "sympy-1.13.2-py3-none-any.whl", hash = "sha256:c51d75517712f1aed280d4ce58506a4a88d635d6b5dd48b39102a7ae1f3fcfe9"}, + {file = "sympy-1.13.2.tar.gz", hash = "sha256:401449d84d07be9d0c7a46a64bd54fe097667d5e7181bfe67ec777be9e01cb13"}, +] + +[package.dependencies] +mpmath = ">=1.1.0,<1.4" + +[package.extras] +dev = ["hypothesis (>=6.70.0)", "pytest (>=7.1.0)"] + +[[package]] +name = "threadpoolctl" +version = "3.5.0" +description = "threadpoolctl" +optional = false +python-versions = ">=3.8" +files = [ + {file = "threadpoolctl-3.5.0-py3-none-any.whl", hash = "sha256:56c1e26c150397e58c4926da8eeee87533b1e32bef131bd4bf6a2f45f3185467"}, + {file = "threadpoolctl-3.5.0.tar.gz", hash = "sha256:082433502dd922bf738de0d8bcc4fdcbf0979ff44c42bd40f5af8a282f6fa107"}, +] + +[[package]] +name = "thrift" +version = "0.13.0" +description = "Python bindings for the Apache Thrift RPC system" +optional = false +python-versions = "*" +files = [ + {file = "thrift-0.13.0.tar.gz", hash = "sha256:9af1c86bf73433afc6010ed376a6c6aca2b54099cc0d61895f640870a9ae7d89"}, +] + +[package.dependencies] +six = ">=1.7.2" + +[package.extras] +all = ["tornado (>=4.0)", "twisted"] +tornado = ["tornado (>=4.0)"] +twisted = ["twisted"] + +[[package]] +name = "torch" +version = "2.1.0" +description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "torch-2.1.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:bf57f8184b2c317ef81fb33dc233ce4d850cd98ef3f4a38be59c7c1572d175db"}, + {file = "torch-2.1.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:a04a0296d47f28960f51c18c5489a8c3472f624ec3b5bcc8e2096314df8c3342"}, + {file = "torch-2.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:0bd691efea319b14ef239ede16d8a45c246916456fa3ed4f217d8af679433cc6"}, + {file = "torch-2.1.0-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:101c139152959cb20ab370fc192672c50093747906ee4ceace44d8dd703f29af"}, + {file = "torch-2.1.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:a6b7438a90a870e4cdeb15301519ae6c043c883fcd224d303c5b118082814767"}, + {file = "torch-2.1.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:2224622407ca52611cbc5b628106fde22ed8e679031f5a99ce286629fc696128"}, + {file = "torch-2.1.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:8132efb782cd181cc2dcca5e58effbe4217cdb2581206ac71466d535bf778867"}, + {file = "torch-2.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:5c3bfa91ce25ba10116c224c59d5b64cdcce07161321d978bd5a1f15e1ebce72"}, + {file = "torch-2.1.0-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:601b0a2a9d9233fb4b81f7d47dca9680d4f3a78ca3f781078b6ad1ced8a90523"}, + {file = "torch-2.1.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:3cd1dedff13884d890f18eea620184fb4cd8fd3c68ce3300498f427ae93aa962"}, + {file = "torch-2.1.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:fb7bf0cc1a3db484eb5d713942a93172f3bac026fcb377a0cd107093d2eba777"}, + {file = "torch-2.1.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:761822761fffaa1c18a62c5deb13abaa780862577d3eadc428f1daa632536905"}, + {file = "torch-2.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:458a6d6d8f7d2ccc348ac4d62ea661b39a3592ad15be385bebd0a31ced7e00f4"}, + {file = "torch-2.1.0-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:c8bf7eaf9514465e5d9101e05195183470a6215bb50295c61b52302a04edb690"}, + {file = "torch-2.1.0-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:05661c32ec14bc3a157193d0f19a7b19d8e61eb787b33353cad30202c295e83b"}, + {file = "torch-2.1.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:556d8dd3e0c290ed9d4d7de598a213fb9f7c59135b4fee144364a8a887016a55"}, + {file = "torch-2.1.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:de7d63c6ecece118684415a3dbd4805af4a4c1ee1490cccf7405d8c240a481b4"}, + {file = "torch-2.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:2419cf49aaf3b2336c7aa7a54a1b949fa295b1ae36f77e2aecb3a74e3a947255"}, + {file = "torch-2.1.0-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:6ad491e70dbe4288d17fdbfc7fbfa766d66cbe219bc4871c7a8096f4a37c98df"}, + {file = "torch-2.1.0-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:421739685eba5e0beba42cb649740b15d44b0d565c04e6ed667b41148734a75b"}, +] + +[package.dependencies] +filelock = "*" +fsspec = "*" +jinja2 = "*" +networkx = "*" +sympy = "*" +typing-extensions = "*" + +[package.extras] +opt-einsum = ["opt-einsum (>=3.3)"] + +[[package]] +name = "tqdm" +version = "4.66.5" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd"}, + {file = "tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, +] + +[[package]] +name = "urllib3" +version = "2.2.2" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.8" +files = [ + {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, + {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "zipp" +version = "3.20.1" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.8" +files = [ + {file = "zipp-3.20.1-py3-none-any.whl", hash = "sha256:9960cd8967c8f85a56f920d5d507274e74f9ff813a0ab8889a5b5be2daf44064"}, + {file = "zipp-3.20.1.tar.gz", hash = "sha256:c22b14cc4763c5a5b04134207736c107db42e9d3ef2d9779d465f5f1bcba572b"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] + +[metadata] +lock-version = "2.0" +python-versions = ">=3.8, <3.13" +content-hash = "1fb188409bff9f4938040aa7c6b0e4c7a50aaef7b36e39b3060324b9b85429bb" diff --git a/iotdb-core/ainode/pom.xml b/iotdb-core/ainode/pom.xml new file mode 100644 index 0000000000000..ba4f8bb7014a5 --- /dev/null +++ b/iotdb-core/ainode/pom.xml @@ -0,0 +1,371 @@ + + + + 4.0.0 + + org.apache.iotdb + iotdb-core + 1.3.4-SNAPSHOT + + iotdb-ainode + IoTDB: Core: AINode + + + + org.apache.iotdb + iotdb-thrift-commons + 1.3.4-SNAPSHOT + provided + + + org.apache.iotdb + iotdb-thrift + 1.3.4-SNAPSHOT + provided + + + org.apache.iotdb + iotdb-thrift-confignode + 1.3.4-SNAPSHOT + provided + + + org.apache.iotdb + iotdb-thrift-ainode + 1.3.4-SNAPSHOT + provided + + + + + + + org.apache.maven.plugins + maven-clean-plugin + + + + dist + + + iotdb + + conf/ + thrift/ + + + + target + + + venv + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + org.codehaus.gmaven + groovy-maven-plugin + 2.1.1 + + + + sync-python-version + validate + + execute + + + ${project.basedir}/resources/syncPythonVersion.groovy + + + + + + org.apache.groovy + groovy + 4.0.22 + + + org.apache.groovy + groovy-toml + 4.0.22 + + + + + + org.apache.maven.plugins + maven-resources-plugin + + ${project.build.sourceEncoding} + + + + copy-thrift-python-resources + generate-sources + + copy-resources + + + ${basedir}/iotdb/thrift/ + + + ${basedir}/../../iotdb-protocol/thrift-commons/target/generated-sources-python/iotdb/thrift/ + + + ${basedir}/../../iotdb-protocol/thrift-confignode/target/generated-sources-python/iotdb/thrift/ + + + ${basedir}/../../iotdb-protocol/thrift-ainode/target/generated-sources-python/iotdb/thrift/ + + + ${basedir}/../../iotdb-protocol/thrift-datanode/target/generated-sources-python/iotdb/thrift/ + rpc/** + + + + + + + copy-pom-properties + generate-sources + + copy-resources + + + ${basedir}/iotdb/conf/ + + + ${basedir}/resources/ + pom.properties + true + + + + + + + + + pl.project13.maven + git-commit-id-plugin + + + generate-git-properties + generate-resources + + revision + + + true + ${project.basedir}/iotdb/conf/git.properties + + ^git.commit.id.abbrev$ + ^git.dirty$ + + full + false + true + + -dev + + + + + + + org.codehaus.mojo + exec-maven-plugin + + + + python-venv + initialize + + exec + + + ${python.exe.bin} + + -m + venv + ./venv + + + + + + python-upgrade-pip + initialize + + exec + + + ${python.venv.bin}${python.exe.bin} + + -m + pip + install + --upgrade + pip + + + + + + python-install-poetry + initialize + + exec + + + ${python.venv.bin}pip3 + + install + poetry + + + + + + + python-compile + compile + + exec + + + ${python.venv.bin}poetry + + build + + + + + + + + + org.apache.maven.plugins + maven-assembly-plugin + 3.3.0 + + + create-ainode-zip + package + + single + + + apache-iotdb-ainode-${project.version} + false + + ainode.xml + + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + + check-dependencies + + analyze-only + + verify + + + org.apache.iotdb:iotdb-thrift-commons + org.apache.iotdb:iotdb-thrift + org.apache.iotdb:iotdb-thrift-confignode + org.apache.iotdb:iotdb-thrift-ainode + + + + + + + + diff --git a/iotdb-core/ainode/pyproject.toml b/iotdb-core/ainode/pyproject.toml new file mode 100644 index 0000000000000..beb32827b05b0 --- /dev/null +++ b/iotdb-core/ainode/pyproject.toml @@ -0,0 +1,66 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" + +[tool.poetry] +name = "apache-iotdb-ainode" +version = "1.3.4.dev" +description = "Apache IoTDB AINode" +readme = "README.md" +authors = ["Apache Software Foundation "] +license = "Apache License, Version 2.0" +classifiers = [ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + "Topic :: Software Development :: Libraries", + "Topic :: Software Development :: Libraries :: Python Modules", +] +include = [ + {path = "iotdb/thrift/*", format = "wheel"}, + {path = "iotdb/thrift/common/*", format = "wheel"}, + {path = "iotdb/thrift/confignode/*", format = "wheel"}, + {path = "iotdb/thrift/datanode/*", format = "wheel"}, + {path = "iotdb/thrift/ainode/*", format = "wheel"}, + {path = "iotdb/conf/*", format = "wheel"}, +] +packages = [ + { include = "iotdb" } +] + +[tool.poetry.dependencies] +python = ">=3.8, <3.13" + +numpy = "^1.21.4" +pandas = "^1.3.5" +torch = "2.2.0" +pylru = "^1.2.1" + +thrift = "^0.13.0" +dynaconf = "^3.1.11" +requests = "^2.31.0" +optuna = "^3.2.0" +psutil = "^5.9.5" +sktime = "^0.24.1" +pmdarima = "^2.0.4" +hmmlearn = "^0.3.0" + +[tool.poetry.scripts] +ainode = "iotdb.ainode.script:main" \ No newline at end of file diff --git a/iotdb-core/ainode/resources/conf/ainode-env.bat b/iotdb-core/ainode/resources/conf/ainode-env.bat new file mode 100644 index 0000000000000..fa304528a5c60 --- /dev/null +++ b/iotdb-core/ainode/resources/conf/ainode-env.bat @@ -0,0 +1,128 @@ +@REM +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM + +@echo off + +@REM The defaulte venv environment is used if ain_interpreter_dir is not set. Please use absolute path without quotation mark +@REM set ain_interpreter_dir= + +@REM Set ain_force_reinstall to 1 to force reinstall ainode +set ain_force_reinstall=0 + +@REM don't install dependencies online +set ain_install_offline=0 + +set ENV_SCRIPT_DIR=%~dp0 + +:initial +if "%1"=="" goto done +set aux=%1 +if "%aux:~0,2%"=="-r" ( + set ain_force_reinstall=1 + shift + goto initial +) +if "%aux:~0,2%"=="-n" ( + set ain_no_dependencies=--no-dependencies + shift + goto initial +) +if "%aux:~0,1%"=="-" ( + set nome=%aux:~1,250% +) else ( + set "%nome%=%1" + set nome= +) +shift +goto initial + +:done +@REM check if the parameters are set +if "%i%"=="" ( + echo No interpreter_dir is set, use default value. +) else ( + set ain_interpreter_dir=%i% +) + +echo Script got inputs: ain_interpreter_dir: %ain_interpreter_dir% , ain_force_reinstall: %ain_force_reinstall% +if "%ain_interpreter_dir%"=="" ( + %ENV_SCRIPT_DIR%//..//venv//Scripts//python.exe -c "import sys; print(sys.executable)" && ( + echo Activate default venv environment + ) || ( + echo Creating default venv environment + python -m venv "%ENV_SCRIPT_DIR%//..//venv" + ) + set ain_interpreter_dir="%ENV_SCRIPT_DIR%//..//venv//Scripts//python.exe" +) + +@REM Switch the working directory to the directory one level above the script +cd %ENV_SCRIPT_DIR%/../ + +echo Confirming ainode +%ain_interpreter_dir% -m pip config set global.disable-pip-version-check true +%ain_interpreter_dir% -m pip list | findstr /C:"apache-iotdb-ainode" >nul +if %errorlevel% == 0 ( + if %ain_force_reinstall% == 0 ( + echo ainode is already installed + exit /b 0 + ) +) + +set ain_only_ainode=1 +@REM if $ain_install_offline is 1 then do not install dependencies +if %ain_install_offline% == 1 ( + @REM if offline and not -n, then install dependencies + if "%ain_no_dependencies%"=="" ( + set ain_only_ainode=0 + ) else ( + set ain_only_ainode=1 + ) + set ain_no_dependencies=--no-dependencies + echo Installing ainode offline----without dependencies... +) + +if %ain_force_reinstall% == 1 ( + set ain_force_reinstall=--force-reinstall +) else ( + set ain_force_reinstall= +) + +echo Installing ainode... +@REM Print current work dir +cd lib +for %%i in (*.whl *.tar.gz) do ( + echo %%i | findstr "ainode" >nul && ( + echo Installing ainode body: %%i + %ain_interpreter_dir% -m pip install %%i %ain_force_reinstall% --no-warn-script-location %ain_no_dependencies% --find-links https://download.pytorch.org/whl/cpu/torch_stable.html + ) || ( + @REM if ain_only_ainode is 0 then install dependencies + if %ain_only_ainode% == 0 ( + echo Installing dependencies: %%i + set ain_force_reinstall=--force-reinstall + %ain_interpreter_dir% -m pip install %%i %ain_force_reinstall% --no-warn-script-location %ain_no_dependencies% --find-links https://download.pytorch.org/whl/cpu/torch_stable.html + ) + ) + if %errorlevel% == 1 ( + echo Failed to install ainode + exit /b 1 + ) +) +echo ainode is installed successfully +cd .. +exit /b 0 diff --git a/iotdb-core/ainode/resources/conf/ainode-env.sh b/iotdb-core/ainode/resources/conf/ainode-env.sh new file mode 100644 index 0000000000000..1ec434ad2d983 --- /dev/null +++ b/iotdb-core/ainode/resources/conf/ainode-env.sh @@ -0,0 +1,138 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# The defaulte venv environment is used if ain_interpreter_dir is not set. Please use absolute path without quotation mark +# ain_interpreter_dir= + +# Set ain_force_reinstall to 1 to force reinstall AINode +ain_force_reinstall=0 + +# don't install dependencies online +ain_install_offline=0 + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" + +# fetch parameters with names +while getopts "i:t:rnm:" opt; do + case $opt in + i) + p_ain_interpreter_dir="$OPTARG" + ;; + r) + p_ain_force_reinstall=1 + ;; + t) ;; + n) + p_ain_no_dependencies="--no-dependencies" + ;; + m) + p_pypi_mirror="$OPTARG" + ;; + \?) + echo "Invalid option -$OPTARG" >&2 + exit 1 + ;; + esac +done + +if [ -z "$p_ain_interpreter_dir" ]; then + echo "No interpreter_dir is set, use default value." +else + ain_interpreter_dir="$p_ain_interpreter_dir" +fi + +if [ -z "$p_ain_force_reinstall" ]; then + echo "No check_version is set, use default value." +else + ain_force_reinstall="$p_ain_force_reinstall" +fi +echo Script got inputs: "ain_interpreter_dir: $ain_interpreter_dir", "ain_force_reinstall: $ain_force_reinstall" + +if [ -z $ain_interpreter_dir ]; then + $(dirname "$0")/../venv/bin/python3 -c "import sys; print(sys.executable)" && + echo "Activate default venv environment" || ( + echo "Creating default venv environment" && python3 -m venv "$(dirname "$0")/../venv" + ) + ain_interpreter_dir="$SCRIPT_DIR/../venv/bin/python3" +fi +echo "Calling venv to check: $ain_interpreter_dir" + +# Change the working directory to the parent directory +cd "$SCRIPT_DIR/.." + +echo "Confirming AINode..." +$ain_interpreter_dir -m pip config set global.disable-pip-version-check true +$ain_interpreter_dir -m pip list | grep "apache-iotdb-ainode" >/dev/null +if [ $? -eq 0 ]; then + if [ $ain_force_reinstall -eq 0 ]; then + echo "AINode is already installed" + exit 0 + fi +fi + +ain_only_ainode=1 + +# if $ain_install_offline is 1 then do not install dependencies +if [ $ain_install_offline -eq 1 ]; then + # if offline and not -n, then install dependencies + if [ -z "$p_ain_no_dependencies" ]; then + ain_only_ainode=0 + else + ain_only_ainode=1 + fi + p_ain_no_dependencies="--no-dependencies" + echo "Installing AINode offline----without dependencies..." +fi + +if [ $ain_force_reinstall -eq 1 ]; then + p_ain_force_reinstall="--force-reinstall" +else + p_ain_force_reinstall="" +fi + +echo "Installing AINode..." +cd "$SCRIPT_DIR/../lib/" +shopt -s nullglob +for i in *.whl; do + if [[ $i =~ "ainode" ]]; then + echo Installing AINode body: $i + if [ -z "$p_pypi_mirror" ]; then + $ain_interpreter_dir -m pip install "$i" $p_ain_force_reinstall --no-warn-script-location $p_ain_no_dependencies --find-links https://download.pytorch.org/whl/cpu/torch_stable.html + else + $ain_interpreter_dir -m pip install "$i" $p_ain_force_reinstall -i $p_pypi_mirror --no-warn-script-location $p_ain_no_dependencies --find-links https://download.pytorch.org/whl/cpu/torch_stable.html + fi + else + # if ain_only_ainode is 0 then install dependencies + if [ $ain_only_ainode -eq 0 ]; then + echo Installing dependencies $i + if [ -z "$p_pypi_mirror" ]; then + $ain_interpreter_dir -m pip install "$i" $p_ain_force_reinstall --no-warn-script-location $p_ain_no_dependencies + else + $ain_interpreter_dir -m pip install "$i" $p_ain_force_reinstall -i $p_pypi_mirror --no-warn-script-location $p_ain_no_dependencies + fi + fi + fi + if [ $? -eq 1 ]; then + echo "Failed to install AINode" + exit 1 + fi +done +echo "AINode is installed successfully" +exit 0 diff --git a/iotdb-core/ainode/resources/conf/iotdb-ainode.properties b/iotdb-core/ainode/resources/conf/iotdb-ainode.properties new file mode 100644 index 0000000000000..2b208e7212538 --- /dev/null +++ b/iotdb-core/ainode/resources/conf/iotdb-ainode.properties @@ -0,0 +1,60 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# Used for indicate cluster name and distinguish different cluster. +# Datatype: string +# cluster_name=defaultCluster + +# ConfigNode address registered at AINode startup +# Allow modifications only before starting the service for the first time +# Datatype: String +# ain_seed_config_node=127.0.0.1:10710 + +# Used for connection of DataNode/ConfigNode clients +# Could set 127.0.0.1(for local test) or ipv4 address +# Datatype: String +# ain_inference_rpc_address=127.0.0.1 + +# Used for connection of DataNode/ConfigNode clients +# Bind with MN_RPC_ADDRESS +# Datatype: String +# ain_inference_rpc_port=10810 + +# The AINode metadata storage path. +# The starting directory of the relative path is related to the operating system. +# It is recommended to use an absolute path. +# Datatype: String +# ain_system_dir=data/ainode/system + +# The path where AINode stores model files +# The starting directory of the relative path is related to the operating system. +# It is recommended to use an absolute path. +# Datatype: String +# ain_models_dir=data/ainode/models + +# The path where AINode stores logs +# The starting directory of the relative path is related to the operating system. +# It is recommended to use an absolute path. +# Datatype: String +# ain_logs_dir=logs/ainode + +# Whether to use compression in Thrift +# Please use 0 or 1 +# Datatype: Boolean +# ain_thrift_compression_enabled=0 \ No newline at end of file diff --git a/iotdb-core/ainode/resources/pom.properties b/iotdb-core/ainode/resources/pom.properties new file mode 100644 index 0000000000000..ee2e2a650e513 --- /dev/null +++ b/iotdb-core/ainode/resources/pom.properties @@ -0,0 +1,21 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +artifactId=iotdb-ainode +groupId=org.apache.iotdb +version=${project.version} \ No newline at end of file diff --git a/iotdb-core/ainode/resources/sbin/remove-ainode.bat b/iotdb-core/ainode/resources/sbin/remove-ainode.bat new file mode 100644 index 0000000000000..fe163e7feace1 --- /dev/null +++ b/iotdb-core/ainode/resources/sbin/remove-ainode.bat @@ -0,0 +1,105 @@ +@REM +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM + +@echo off + +IF "%~1"=="--help" ( + echo The script will remove an AINode. + echo When it is necessary to move an already connected AINode out of the cluster, the corresponding removal script can be executed. + echo Usage: + echo Remove the AINode with ainode_id + echo ./sbin/remove-ainode.bat -t [ainode_id] + echo. + echo Options: + echo ^ ^ -t = ainode_id + echo ^ ^ -i = When specifying the Python interpreter please enter the address of the executable file of the Python interpreter in the virtual environment. Currently AINode supports virtual environments such as venv, conda, etc. Inputting the system Python interpreter as the installation location is not supported. In order to ensure that scripts are recognized properly, please use absolute paths whenever possible! + EXIT /B 0 +) + +echo ``````````````````````````` +echo Removing IoTDB AINode +echo ``````````````````````````` + +set REMOVE_SCRIPT_DIR=%~dp0 +call %REMOVE_SCRIPT_DIR%\\..\\conf\\\ainode-env.bat %* +if %errorlevel% neq 0 ( + echo Environment check failed. Exiting... + exit /b 1 +) + +:initial +if "%1"=="" goto interpreter +set aux=%1 +if "%aux:~0,1%"=="-" ( + set nome=%aux:~1,250% +) else ( + set "%nome%=%1" + set nome= +) +shift +goto initial + +for /f "tokens=2 delims==" %%a in ('findstr /i /c:"^ain_interpreter_dir" "%REMOVE_SCRIPT_DIR%\\..\\conf\\\ainode-env.bat"') do ( + set _ain_interpreter_dir=%%a + goto :interpreter +) + +:interpreter +if "%i%"=="" ( + if "%_ain_interpreter_dir%"=="" ( + set _ain_interpreter_dir=%REMOVE_SCRIPT_DIR%\\..\\venv\\Scripts\\python.exe + ) +) else ( + set _ain_interpreter_dir=%i% +) + + +for /f "tokens=2 delims==" %%a in ('findstr /i /c:"^ain_system_dir" "%REMOVE_SCRIPT_DIR%\\..\\conf\\iotdb-\ainode.properties"') do ( + set _ain_system_dir=%%a + goto :system +) + +:system +if "%_ain_system_dir%"=="" ( + set _ain_system_dir=%REMOVE_SCRIPT_DIR%\\..\\data\\\ainode\\system +) + +echo Script got parameters: ain_interpreter_dir: %_ain_interpreter_dir%, ain_system_dir: %_ain_system_dir% + +cd %REMOVE_SCRIPT_DIR%\\.. +for %%i in ("%_ain_interpreter_dir%") do set "parent=%%~dpi" +set ain_\ainode_dir=%parent%\\\ainode.exe + +if "%t%"=="" ( + echo No target AINode set, use system.properties + %ain_\ainode_dir% remove +) else ( + %ain_\ainode_dir% remove %t% +) + +if %errorlevel% neq 0 ( + echo Remove AINode failed. Exiting... + exit /b 1 +) + +call %REMOVE_SCRIPT_DIR%\\stop-\ainode.bat %* + +rd /s /q %_ain_system_dir% + +pause \ No newline at end of file diff --git a/iotdb-core/ainode/resources/sbin/remove-ainode.sh b/iotdb-core/ainode/resources/sbin/remove-ainode.sh new file mode 100755 index 0000000000000..2a27661460d5f --- /dev/null +++ b/iotdb-core/ainode/resources/sbin/remove-ainode.sh @@ -0,0 +1,110 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +if [ "$#" -eq 1 ] && [ "$1" == "--help" ]; then + echo "The script will remove an AINode." + echo "When it is necessary to move an already connected AINode out of the cluster, the corresponding removal script can be executed." + echo "Usage:" + echo "Remove the AINode with ainode_id" + echo "./sbin/remove-ainode.sh -t [ainode_id]" + echo "" + echo "Options:" + echo " -t = ainode_id" + echo " -i = When specifying the Python interpreter please enter the address of the executable file of the Python interpreter in the virtual environment. Currently AINode supports virtual environments such as venv, conda, etc. Inputting the system Python interpreter as the installation location is not supported. In order to ensure that scripts are recognized properly, please use absolute paths whenever possible!" + exit 0 +fi + +echo --------------------------- +echo Removing IoTDB AINode +echo --------------------------- + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +echo "SCRIPT_DIR: $SCRIPT_DIR" +chmod u+x $(dirname "$0")/../conf/ainode-env.sh +ain_interpreter_dir=$(sed -n 's/^ain_interpreter_dir=\(.*\)$/\1/p' $(dirname "$0")/../conf/ainode-env.sh) +ain_system_dir=$(sed -n 's/^ain_system_dir=\(.*\)$/\1/p' $(dirname "$0")/../conf/iotdb-ainode.properties) +bash $(dirname "$0")/../conf/ainode-env.sh $* +if [ $? -eq 1 ]; then + echo "Environment check failed. Exiting..." + exit 1 +fi + +# fetch parameters with names +while getopts "i:t:rn" opt; do + case $opt in + i) p_ain_interpreter_dir="$OPTARG" + ;; + r) p_ain_force_reinstall="$OPTARG" + ;; + t) p_ain_remove_target="$OPTARG" + ;; + n) + ;; + \?) echo "Invalid option -$OPTARG" >&2 + exit 1 + ;; + esac +done + +# If ain_interpreter_dir in parameters is empty: +if [ -z "$p_ain_interpreter_dir" ]; then + # If ain_interpreter_dir in ../conf/ainode-env.sh is empty, set default value to ../venv/bin/python3 + if [ -z "$ain_interpreter_dir" ]; then + ain_interpreter_dir="$SCRIPT_DIR/../venv/bin/python3" + fi +else + # If ain_interpreter_dir in parameters is not empty, set ain_interpreter_dir to the value in parameters + ain_interpreter_dir="$p_ain_interpreter_dir" +fi + +# If ain_system_dir is empty, set default value to ../data/ainode/system +if [ -z "$ain_system_dir" ] +then + ain_system_dir="$SCRIPT_DIR/../data/ainode/system" +fi + +echo "Script got parameters: ain_interpreter_dir: $ain_interpreter_dir, ain_system_dir: $ain_system_dir" + +# check if ain_interpreter_dir is an absolute path +if [[ "$ain_interpreter_dir" != /* ]]; then + ain_interpreter_dir="$SCRIPT_DIR/$ain_interpreter_dir" +fi + +# Change the working directory to the parent directory +cd "$SCRIPT_DIR/.." +ain_ainode_dir=$(dirname "$ain_interpreter_dir")/ainode + + +if [ -z "$p_ain_remove_target" ]; then + echo No target AINode set, use system.properties + $ain_ainode_dir remove +else + $ain_ainode_dir remove $p_ain_remove_target +fi + +if [ $? -eq 1 ]; then + echo "Remove AINode failed. Exiting..." + exit 1 +fi + +bash $SCRIPT_DIR/stop-ainode.sh $* + +# Remove system directory +rm -rf $ain_system_dir \ No newline at end of file diff --git a/iotdb-core/ainode/resources/sbin/start-ainode.bat b/iotdb-core/ainode/resources/sbin/start-ainode.bat new file mode 100644 index 0000000000000..e29109bbc4e43 --- /dev/null +++ b/iotdb-core/ainode/resources/sbin/start-ainode.bat @@ -0,0 +1,77 @@ +@REM +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM + +@echo off + +echo ``````````````````````````` +echo Starting IoTDB AINode +echo ``````````````````````````` + +set START_SCRIPT_DIR=%~dp0 +call %START_SCRIPT_DIR%\\..\\conf\\ainode-env.bat %* +if %errorlevel% neq 0 ( + echo Environment check failed. Exiting... + exit /b 1 +) + +for /f "tokens=2 delims==" %%a in ('findstr /i /c:"^ain_interpreter_dir" "%START_SCRIPT_DIR%\\..\\conf\\ainode-env.bat"') do ( + set _ain_interpreter_dir=%%a + goto :done +) + +:initial +if "%1"=="" goto done +set aux=%1 +if "%aux:~0,1%"=="-" ( + set nome=%aux:~1,250% +) else ( + set "%nome%=%1" + set nome= +) +shift +goto initial + +:done +if "%i%"=="" ( + if "%_ain_interpreter_dir%"=="" ( + set _ain_interpreter_dir=%START_SCRIPT_DIR%\\..\\venv\\Scripts\\python.exe + ) +) else ( + set _ain_interpreter_dir=%i% +) + +echo Script got parameter: ain_interpreter_dir: %_ain_interpreter_dir% + +cd %START_SCRIPT_DIR%\\.. + +for %%i in ("%_ain_interpreter_dir%") do set "parent=%%~dpi" + +set ain_ainode_dir=%parent%\ainode.exe + +set ain_ainode_dir_new=%parent%\Scripts\\ainode.exe + +echo Starting AINode... + +%ain_ainode_dir% start +if %errorlevel% neq 0 ( + echo ain_ainode_dir_new is %ain_ainode_dir_new% + %ain_ainode_dir_new% start +) + +pause \ No newline at end of file diff --git a/iotdb-core/ainode/resources/sbin/start-ainode.sh b/iotdb-core/ainode/resources/sbin/start-ainode.sh new file mode 100644 index 0000000000000..dd1afbd8bda4f --- /dev/null +++ b/iotdb-core/ainode/resources/sbin/start-ainode.sh @@ -0,0 +1,78 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +echo --------------------------- +echo Starting IoTDB AINode +echo --------------------------- + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +echo "SCRIPT_DIR: $SCRIPT_DIR" +chmod u+x $(dirname "$0")/../conf/ainode-env.sh +ain_interpreter_dir=$(sed -n 's/^ain_interpreter_dir=\(.*\)$/\1/p' $(dirname "$0")/../conf/ainode-env.sh) +bash $(dirname "$0")/../conf/ainode-env.sh $* +if [ $? -eq 1 ]; then + echo "Environment check failed. Exiting..." + exit 1 +fi + + +# fetch parameters with names +while getopts "i:rn" opt; do + case $opt in + i) p_ain_interpreter_dir="$OPTARG" + ;; + r) p_ain_force_reinstall="$OPTARG" + ;; + n) + ;; + \?) echo "Invalid option -$OPTARG" >&2 + exit 1 + ;; + esac +done + +# If ain_interpreter_dir in parameters is empty: +if [ -z "$p_ain_interpreter_dir" ]; then + # If ain_interpreter_dir in ../conf/ainode-env.sh is empty, set default value to ../venv/bin/python3 + if [ -z "$ain_interpreter_dir" ]; then + ain_interpreter_dir="$SCRIPT_DIR/../venv/bin/python3" + fi +else + # If ain_interpreter_dir in parameters is not empty, set ain_interpreter_dir to the value in parameters + ain_interpreter_dir="$p_ain_interpreter_dir" +fi + +# check if ain_interpreter_dir is an absolute path +if [[ "$ain_interpreter_dir" != /* ]]; then + ain_interpreter_dir="$SCRIPT_DIR/$ain_interpreter_dir" +fi + +echo Script got parameter: ain_interpreter_dir: $ain_interpreter_dir + +# Change the working directory to the parent directory +cd "$SCRIPT_DIR/.." + +ain_ainode_dir=$(dirname "$ain_interpreter_dir")/ainode + +echo Script got ainode dir: ain_ainode_dir: $ain_ainode_dir + +echo Starting AINode... + +$ain_ainode_dir start diff --git a/iotdb-core/ainode/resources/sbin/stop-ainode.bat b/iotdb-core/ainode/resources/sbin/stop-ainode.bat new file mode 100644 index 0000000000000..a4f302b3f9368 --- /dev/null +++ b/iotdb-core/ainode/resources/sbin/stop-ainode.bat @@ -0,0 +1,61 @@ +@REM +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM + +@echo off + +set current_dir=%~dp0 +set superior_dir=%current_dir%\..\ + +:initial +if "%1"=="" goto done +set aux=%1 +if "%aux:~0,1%"=="-" ( + set nome=%aux:~1,250% +) else ( + set "%nome%=%1" + set nome= +) +shift +goto initial + +:done +for /f "eol=# tokens=2 delims==" %%i in ('findstr /i "^ain_inference_rpc_port" +%superior_dir%\conf\iotdb-ainode.properties') do ( + set ain_inference_rpc_port=%%i +) + +echo Check whether the rpc_port is used..., port is %ain_inference_rpc_port% + +for /f "eol=# tokens=2 delims==" %%i in ('findstr /i "ain_inference_rpc_address" +%superior_dir%\conf\iotdb-ainode.properties') do ( + set ain_inference_rpc_address=%%i +) + +if defined t ( + for /f "tokens=2 delims=/" %%a in ("%t%") do set "ain_inference_rpc=%%a" +) else ( + set ain_inference_rpc=%ain_inference_rpc_address%:%ain_inference_rpc_port% +) + +echo Target AINode to be stopped: %ain_inference_rpc% + +for /f "tokens=5" %%a in ('netstat /ano ^| findstr /r /c:"^ *TCP *%ain_inference_rpc%.*$"') do ( + taskkill /f /pid %%a + echo Close AINode, PID: %%a +) diff --git a/iotdb-core/ainode/resources/sbin/stop-ainode.sh b/iotdb-core/ainode/resources/sbin/stop-ainode.sh new file mode 100644 index 0000000000000..4580ce6fd3e2f --- /dev/null +++ b/iotdb-core/ainode/resources/sbin/stop-ainode.sh @@ -0,0 +1,73 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +AINODE_CONF="`dirname "$0"`/../conf" +ain_inference_rpc_port=`sed '/^ain_inference_rpc_port=/!d;s/.*=//' ${AINODE_CONF}/iotdb-ainode.properties` + +# fetch parameters with names +while getopts "i:t:r" opt; do + case $opt in + i) + ;; + r) + ;; + t) p_ain_remove_target="$OPTARG" + ;; + \?) echo "Invalid option -$OPTARG" >&2 + exit 1 + ;; + esac +done + +# If p_ain_remove_target exists, take the value after the colon of p_ain_remove_target as ain_inference_rpc_port +if [ -n "$p_ain_remove_target" ]; then + ain_inference_rpc_port=${p_ain_remove_target#*:} +fi + +echo "Check whether the rpc_port is used..., port is" $ain_inference_rpc_port + +if type lsof > /dev/null 2>&1 ; then + echo $(lsof -t -i:"${ain_inference_rpc_port}" -sTCP:LISTEN) + PID=$(lsof -t -i:"${ain_inference_rpc_port}" -sTCP:LISTEN) +elif type netstat > /dev/null 2>&1 ; then + PID=$(netstat -anp 2>/dev/null | grep ":${ain_inference_rpc_port} " | grep ' LISTEN ' | awk '{print $NF}' | sed "s|/.*||g" ) +else + echo "" + echo " Error: No necessary tool." + echo " Please install 'lsof' or 'netstat'." + exit 1 +fi + +PID_VERIFY=$(ps ax | grep -i 'ainode' | grep -v grep | awk '{print $1}') + +if [ -z "$PID" ]; then + echo "No AINode to stop" + if [ "$(id -u)" -ne 0 ]; then + echo "Maybe you can try to run in sudo mode to detect the process." + fi + exit 1 +elif [[ "${PID_VERIFY}" =~ ${PID} ]]; then + kill -s TERM "$PID" + echo "Stop AINode, PID:" "$PID" +else + echo "No AINode to stop" + exit 1 +fi + diff --git a/iotdb-core/ainode/resources/syncPythonVersion.groovy b/iotdb-core/ainode/resources/syncPythonVersion.groovy new file mode 100644 index 0000000000000..373bfd7fa0a32 --- /dev/null +++ b/iotdb-core/ainode/resources/syncPythonVersion.groovy @@ -0,0 +1,192 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import groovy.toml.TomlSlurper + +import java.util.regex.Matcher + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// The entire Python "check" block is borrowed from Apache PLC4X's build. +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +allConditionsMet = true + +/** + * Version extraction function/macro. It looks for occurrence of x.y or x.y.z + * in passed input text (likely output from `program --version` command if found). + * + * @param input + * @return + */ +private static Matcher extractVersion(input) { + def matcher = input =~ /(\d+\.\d+(\.\d+)?).*/ + matcher +} + +def checkVersionAtLeast(String current, String minimum) { + def currentSegments = current.tokenize('.') + def minimumSegments = minimum.tokenize('.') + def numSegments = Math.min(currentSegments.size(), minimumSegments.size()) + for (int i = 0; i < numSegments; ++i) { + def currentSegment = currentSegments[i].toInteger() + def minimumSegment = minimumSegments[i].toInteger() + if (currentSegment < minimumSegment) { + println current.padRight(14) + " FAILED (required min " + minimum + " but got " + current + ")" + return false + } else if (currentSegment > minimumSegment) { + println current.padRight(14) + " OK" + return true + } + } + def curNotShorter = currentSegments.size() >= minimumSegments.size() + if (curNotShorter) { + println current.padRight(14) + " OK" + } else { + println current.padRight(14) + " (required min " + minimum + " but got " + current + ")" + } + curNotShorter +} + +def checkVersionAtMost(String current, String maximum) { + def currentSegments = current.tokenize('.') + def maximumSegments = maximum.tokenize('.') + def numSegments = Math.min(currentSegments.size(), maximumSegments.size()) + for (int i = 0; i < numSegments; ++i) { + def currentSegment = currentSegments[i].toInteger() + def maximumSegment = maximumSegments[i].toInteger() + if (currentSegment > maximumSegment) { + println current.padRight(14) + " FAILED (required max " + maximum + " but got " + current + ")" + return false + } else if (currentSegment < maximumSegment) { + println current.padRight(14) + " OK" + return true + } + } + def curNotShorter = currentSegments.size() >= maximumSegments.size() + if (curNotShorter) { + println current.padRight(14) + " OK" + } else { + println current.padRight(14) + " (required max " + maximum + " but got " + current + ")" + } + curNotShorter +} + +def checkPython() { + String python = project.properties['python.exe.bin'] + println "Using python executable: " + python.padRight(14) + " OK" + print "Detecting Python version: " + try { + def process = (python + " --version").execute() + def stdOut = new StringBuilder() + def stdErr = new StringBuilder() + process.waitForProcessOutput(stdOut, stdErr) + Matcher matcher = extractVersion(stdOut + stdErr) + if (matcher.size() > 0) { + String curVersion = matcher[0][1] + def result = checkVersionAtLeast(curVersion, "3.8.0") + if (!result) { + allConditionsMet = false + } + result = checkVersionAtMost(curVersion, "3.13") + if (!result) { + allConditionsMet = false + } + } else { + println "missing (Please install at least version 3.8.0 and at most one of the 3.13.x versions)" + allConditionsMet = false + } + } catch (Exception ignored) { + println "missing" + println "--- output of version `${python} --version` command ---" + println output + println "----------------------------------------------------" + allConditionsMet = false + } +} + + +// On Ubuntu it seems that venv is generally available, but the 'ensurepip' command fails. +// In this case we need to install the python3-venv package. Unfortunately checking the +// venv is successful in this case, so we need this slightly odd test. +def checkPythonVenv() { + print "Detecting venv: " + try { + def python = project.properties['python.exe.bin'] + def cmdArray = [python, "-Im", "ensurepip"] + def process = cmdArray.execute() + def stdOut = new StringBuilder() + def stdErr = new StringBuilder() + process.waitForProcessOutput(stdOut, stdErr) + if (stdErr.contains("No module named")) { + println "missing" + println "--- output of version `python -Im \"ensurepip\"` command ---" + println output + println "------------------------------------------------------------" + allConditionsMet = false + } else { + println " OK" + } + } catch (Exception e) { + println "missing" + println "--- failed with exception ---" + println e + e.printStackTrace() + println "----------------------------------------------------" + allConditionsMet = false + } +} + +// Check the python environment is setup correctly. +checkPython() +checkPythonVenv() + +if (!allConditionsMet) { + throw new RuntimeException("Not all conditions met, see log for details.") +} +println "" +println "All known conditions met successfully." +println "" + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// Calculate the version that we should use in the python build. +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +def currentMavenVersion = project.version as String +def currentPythonVersion = currentMavenVersion +if(currentMavenVersion.contains("-SNAPSHOT")) { + currentPythonVersion = currentMavenVersion.split("-SNAPSHOT")[0] + ".dev" +} +println "Current Project Version in Maven: " + currentMavenVersion +println "Current Project Version in Python: " + currentPythonVersion + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// Synchronize the version in pyproject.toml and the one used in the maven pom. +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +def pyprojectFile = new File(project.basedir, "pyproject.toml") +def ts = new TomlSlurper() +def toml = ts.parse(pyprojectFile) +def pyprojectFileVersion = toml.tool.poetry.version +if (pyprojectFileVersion != currentPythonVersion) { + pyprojectFile.text = pyprojectFile.text.replace("version = \"" + pyprojectFileVersion + "\"", "version = \"" + currentPythonVersion + "\"") + println "Version in pyproject.toml updated from " + pyprojectFileVersion + " to " + currentPythonVersion + // TODO: When releasing, we might need to manually add this file to the release preparation commit. +} else { + println "Version in pyproject.toml is up to date" +} \ No newline at end of file diff --git a/iotdb-core/antlr/pom.xml b/iotdb-core/antlr/pom.xml index 5247d212fe3c7..93e6bd2db5f2c 100644 --- a/iotdb-core/antlr/pom.xml +++ b/iotdb-core/antlr/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-core - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT iotdb-antlr IoTDB: Core: Antlr-Parser diff --git a/iotdb-core/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/IdentifierParser.g4 b/iotdb-core/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/IdentifierParser.g4 index c9cfc400dcee7..632911fb8af2c 100644 --- a/iotdb-core/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/IdentifierParser.g4 +++ b/iotdb-core/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/IdentifierParser.g4 @@ -53,6 +53,7 @@ keyWords | BOUNDARY | BY | CACHE + | CALL | CASE | CAST | CHILD @@ -61,6 +62,7 @@ keyWords | CLUSTERID | CONCAT | CONDITION + | CONFIGNODE | CONFIGNODES | CONFIGURATION | CONNECTION @@ -77,6 +79,7 @@ keyWords | DATA_REGION_GROUP_NUM | DATABASE | DATABASES + | DATANODE | DATANODEID | DATANODES | DATASET @@ -112,9 +115,11 @@ keyWords | GRANT | GROUP | HAVING + | HEAD | HYPERPARAMETERS | IN | INDEX + | INFERENCE | INFO | INSERT | INTO @@ -135,6 +140,9 @@ keyWords | MERGE | METADATA | MIGRATE + | AINODES + | MODEL + | MODELS | MODIFY | NAN | NODEID @@ -213,10 +221,12 @@ keyWords | STATELESS | STATEMENT | STOP + | SUBSCRIPTION | SUBSCRIPTIONS | SUBSTRING | SYSTEM | TAGS + | TAIL | TASK | TEMPLATE | TEMPLATES diff --git a/iotdb-core/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/IoTDBSqlParser.g4 b/iotdb-core/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/IoTDBSqlParser.g4 index 8986fce18e145..02b5062cb0266 100644 --- a/iotdb-core/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/IoTDBSqlParser.g4 +++ b/iotdb-core/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/IoTDBSqlParser.g4 @@ -56,15 +56,17 @@ ddlStatement | createPipe | alterPipe | dropPipe | startPipe | stopPipe | showPipes // Pipe Plugin | createPipePlugin | dropPipePlugin | showPipePlugins - // TOPIC - | createTopic | dropTopic | showTopics // Subscription - | showSubscriptions + | createTopic | dropTopic | showTopics | showSubscriptions | dropSubscription // CQ | createContinuousQuery | dropContinuousQuery | showContinuousQueries // Cluster | showVariables | showCluster | showRegions | showDataNodes | showConfigNodes | showClusterId - | getRegionId | getTimeSlotList | countTimeSlotList | getSeriesSlotList | migrateRegion | verifyConnection + | getRegionId | getTimeSlotList | countTimeSlotList | getSeriesSlotList + | migrateRegion | reconstructRegion | extendRegion | removeRegion | removeDataNode | removeConfigNode + | verifyConnection + // AINode + | showAINodes | createModel | dropModel | showModels | callInference // Quota | setSpaceQuota | showSpaceQuota | setThrottleQuota | showThrottleQuota // View @@ -489,6 +491,11 @@ showConfigNodes : SHOW CONFIGNODES ; +// ---- Show AI Nodes +showAINodes + : SHOW AINODES + ; + // ---- Show Cluster Id showClusterId : SHOW CLUSTERID @@ -529,16 +536,39 @@ migrateRegion : MIGRATE REGION regionId=INTEGER_LITERAL FROM fromId=INTEGER_LITERAL TO toId=INTEGER_LITERAL ; +reconstructRegion + : RECONSTRUCT REGION regionIds+=INTEGER_LITERAL (COMMA regionIds+=INTEGER_LITERAL)* ON targetDataNodeId=INTEGER_LITERAL + ; + +extendRegion + : EXTEND REGION regionId=INTEGER_LITERAL TO targetDataNodeId=INTEGER_LITERAL + ; + +removeRegion + : REMOVE REGION regionId=INTEGER_LITERAL FROM targetDataNodeId=INTEGER_LITERAL + ; + verifyConnection : VERIFY CONNECTION (DETAILS)? ; +// ---- Remove DataNode +removeDataNode + : REMOVE DATANODE dataNodeId=INTEGER_LITERAL + ; + +// ---- Remove ConfigNode +removeConfigNode + : REMOVE CONFIGNODE configNodeId=INTEGER_LITERAL + ; + // Pipe Task ========================================================================================= createPipe : CREATE PIPE (IF NOT EXISTS)? pipeName=identifier - extractorAttributesClause? + ((extractorAttributesClause? processorAttributesClause? - connectorAttributesClause + connectorAttributesClause) + |connectorAttributesWithoutWithSinkClause) ; extractorAttributesClause @@ -570,6 +600,10 @@ connectorAttributesClause RR_BRACKET ; +connectorAttributesWithoutWithSinkClause + : LR_BRACKET (connectorAttributeClause COMMA)* connectorAttributeClause? RR_BRACKET + ; + connectorAttributeClause : connectorKey=STRING_LITERAL OPERATOR_SEQ connectorValue=STRING_LITERAL ; @@ -631,7 +665,8 @@ showPipePlugins : SHOW PIPEPLUGINS ; -// Topic ========================================================================================= + +// Subscription ========================================================================================= createTopic : CREATE TOPIC (IF NOT EXISTS)? topicName=identifier topicAttributesClause? ; @@ -652,11 +687,50 @@ showTopics : SHOW ((TOPIC topicName=identifier) | TOPICS ) ; -// Subscriptions ========================================================================================= showSubscriptions : SHOW SUBSCRIPTIONS (ON topicName=identifier)? ; +dropSubscription + : DROP SUBSCRIPTION (IF EXISTS)? subscriptionId=identifier + ; + +// AI Model ========================================================================================= +// ---- Create Model +createModel + : CREATE MODEL modelName=identifier uriClause + ; + +windowFunction + : TAIL LR_BRACKET windowSize=INTEGER_LITERAL RR_BRACKET + | HEAD LR_BRACKET windowSize=INTEGER_LITERAL RR_BRACKET + | COUNT LR_BRACKET interval=INTEGER_LITERAL COMMA step=INTEGER_LITERAL RR_BRACKET + ; + +callInference + : CALL INFERENCE LR_BRACKET modelId=identifier COMMA inputSql=STRING_LITERAL (COMMA hparamPair)* RR_BRACKET + ; + +hparamPair + : hparamKey=attributeKey operator_eq hparamValue + ; + +hparamValue + : attributeValue + | windowFunction + ; + +// ---- Drop Model +dropModel + : DROP MODEL modelId=identifier + ; + +// ---- Show Models +showModels + : SHOW MODELS + | SHOW MODELS modelId=identifier + ; + // Create Logical View createLogicalView : CREATE VIEW viewTargetPaths AS viewSourcePaths @@ -1085,7 +1159,7 @@ loadTimeseries // Load TsFile loadFile - : LOAD fileName=STRING_LITERAL loadFileAttributeClauses? + : LOAD fileName=STRING_LITERAL ((loadFileAttributeClauses?) | (loadFileWithAttributeClauses)) ; loadFileAttributeClauses @@ -1098,6 +1172,17 @@ loadFileAttributeClause | ONSUCCESS operator_eq (DELETE|NONE) ; +loadFileWithAttributeClauses + : WITH + LR_BRACKET + (loadFileWithAttributeClause COMMA)* loadFileWithAttributeClause? + RR_BRACKET + ; + +loadFileWithAttributeClause + : loadFileWithKey=STRING_LITERAL OPERATOR_SEQ loadFileWithValue=STRING_LITERAL + ; + // Remove TsFile removeFile : REMOVE fileName=STRING_LITERAL diff --git a/iotdb-core/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/SqlLexer.g4 b/iotdb-core/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/SqlLexer.g4 index dc9c12c7c8c5e..f90a0d3dbfd20 100644 --- a/iotdb-core/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/SqlLexer.g4 +++ b/iotdb-core/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/SqlLexer.g4 @@ -129,6 +129,10 @@ CACHE : C A C H E ; +CALL + : C A L L + ; + CAST : C A S T ; @@ -161,6 +165,10 @@ CONFIGNODES : C O N F I G N O D E S ; +CONFIGNODE + : C O N F I G N O D E + ; + CONFIGURATION : C O N F I G U R A T I O N ; @@ -213,6 +221,10 @@ DATABASES : D A T A B A S E S ; +DATANODE + : D A T A N O D E + ; + DATANODEID : D A T A N O D E I D ; @@ -294,6 +306,10 @@ EXPLAIN : E X P L A I N ; +EXTEND + : E X T E N D + ; + EXTRACTOR : E X T R A C T O R ; @@ -358,6 +374,10 @@ HAVING : H A V I N G ; +HEAD + : H E A D + ; + HYPERPARAMETERS : H Y P E R P A R A M E T E R S ; @@ -370,6 +390,10 @@ INDEX : I N D E X ; +INFERENCE + : I N F E R E N C E + ; + INFO : I N F O ; @@ -450,6 +474,18 @@ MIGRATE : M I G R A T E ; +AINODES + : A I N O D E S + ; + +MODEL + : M O D E L + ; + +MODELS + : M O D E L S + ; + MODIFY : M O D I F Y ; @@ -614,6 +650,10 @@ READONLY : R E A D O N L Y ; +RECONSTRUCT + : R E C O N S T R U C T + ; + REGEXP : R E G E X P ; @@ -750,6 +790,10 @@ STOP : S T O P ; +SUBSCRIPTION + : S U B S C R I P T I O N + ; + SUBSCRIPTIONS : S U B S C R I P T I O N S ; @@ -766,6 +810,10 @@ TAGS : T A G S ; +TAIL + : T A I L + ; + TASK : T A S K ; diff --git a/iotdb-core/confignode/pom.xml b/iotdb-core/confignode/pom.xml index 168815c89dfff..083c87a8531f2 100644 --- a/iotdb-core/confignode/pom.xml +++ b/iotdb-core/confignode/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-core - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT iotdb-confignode IoTDB: Core: ConfigNode @@ -42,57 +42,62 @@ org.apache.iotdb service-rpc - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-consensus - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-server - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb pipe-api - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb trigger-api - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb metrics-interface - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-thrift - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-thrift-commons - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-thrift-confignode - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT + + + org.apache.iotdb + iotdb-thrift-ainode + 1.3.4-SNAPSHOT org.apache.iotdb node-commons - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb udf-api - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.tsfile @@ -149,6 +154,16 @@ mockito-core test + + com.tngtech.archunit + archunit + 1.3.0 + test + + + org.apache.commons + commons-math3 + diff --git a/iotdb-core/confignode/src/assembly/resources/conf/confignode-env.sh b/iotdb-core/confignode/src/assembly/resources/conf/confignode-env.sh index 2e89a6f0d7fed..065f08dee35c0 100644 --- a/iotdb-core/confignode/src/assembly/resources/conf/confignode-env.sh +++ b/iotdb-core/confignode/src/assembly/resources/conf/confignode-env.sh @@ -148,6 +148,37 @@ calculate_memory_sizes() OFF_HEAP_MEMORY="${off_heap_memory_size_in_mb}M" } +get_cn_system_dir() { + local config_file="$1" + local cn_system_dir="" + + cn_system_dir=`sed '/^cn_system_dir=/!d;s/.*=//' ${CONFIGNODE_CONF}/${config_file} | tail -n 1` + + if [ -z "$cn_system_dir" ]; then + echo "" + return 0 + fi + + if [[ "$cn_system_dir" == /* ]]; then + echo "$cn_system_dir" + else + echo "$CONFIGNODE_HOME/$cn_system_dir" + fi +} + +if [ -f "${CONFIGNODE_CONF}/iotdb-system.properties" ]; then + heap_dump_dir=$(get_cn_system_dir "iotdb-system.properties") +else + heap_dump_dir=$(get_cn_system_dir "iotdb-confignode.properties") +fi + +if [ -z "$heap_dump_dir" ]; then + heap_dump_dir="$CONFIGNODE_HOME/data/confignode/system" +fi + +if [ ! -d "$heap_dump_dir" ]; then + mkdir -p "$heap_dump_dir" +fi # find java in JAVA_HOME if [ -n "$JAVA_HOME" ]; then @@ -274,8 +305,8 @@ CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Xmx${ON_HEAP_MEMORY}" CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -XX:MaxDirectMemorySize=${OFF_HEAP_MEMORY}" CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Djdk.nio.maxCachedBufferSize=${MAX_CACHED_BUFFER_SIZE}" IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+CrashOnOutOfMemoryError" -# if you want to dump the heap memory while OOM happening, you can use the following command, remember to replace /tmp/heapdump.hprof with your own file path and the folder where this file is located needs to be created in advance -#IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/tmp/confignode_heapdump.hprof" +# if you want to dump the heap memory while OOM happening, you can use the following command, remember to replace ${heap_dump_dir}/confignode_heapdump.hprof with your own file path and the folder where this file is located needs to be created in advance +#IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${heap_dump_dir}/confignode_heapdump.hprof" echo "ConfigNode on heap memory size = ${ON_HEAP_MEMORY}B, off heap memory size = ${OFF_HEAP_MEMORY}B" echo "If you want to change this configuration, please check conf/confignode-env.sh." diff --git a/iotdb-core/confignode/src/assembly/resources/sbin/remove-confignode.bat b/iotdb-core/confignode/src/assembly/resources/sbin/remove-confignode.bat deleted file mode 100644 index d3e2cc143f5b5..0000000000000 --- a/iotdb-core/confignode/src/assembly/resources/sbin/remove-confignode.bat +++ /dev/null @@ -1,137 +0,0 @@ -@REM -@REM Licensed to the Apache Software Foundation (ASF) under one -@REM or more contributor license agreements. See the NOTICE file -@REM distributed with this work for additional information -@REM regarding copyright ownership. The ASF licenses this file -@REM to you under the Apache License, Version 2.0 (the -@REM "License"); you may not use this file except in compliance -@REM with the License. You may obtain a copy of the License at -@REM -@REM http://www.apache.org/licenses/LICENSE-2.0 -@REM -@REM Unless required by applicable law or agreed to in writing, -@REM software distributed under the License is distributed on an -@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -@REM KIND, either express or implied. See the License for the -@REM specific language governing permissions and limitations -@REM under the License. -@REM - -@echo off - -IF "%~1"=="--help" ( - echo The script will remove a ConfigNode. - echo Before removing a ConfigNode, ensure that there is at least one active ConfigNode in the cluster after the removal. - echo Usage: - echo Remove the ConfigNode with confignode_id - echo ./sbin/remove-confignode.bat [confignode_id] - echo Remove the ConfigNode with address:port - echo ./sbin/remove-confignode.bat [cn_internal_address:cn_internal_port] - EXIT /B 0 -) - -echo ``````````````````````````` -echo Starting to remove IoTDB ConfigNode -echo ``````````````````````````` - - -set PATH="%JAVA_HOME%\bin\";%PATH% -set "FULL_VERSION=" -set "MAJOR_VERSION=" -set "MINOR_VERSION=" - - -for /f tokens^=2-5^ delims^=.-_+^" %%j in ('java -fullversion 2^>^&1') do ( - set "FULL_VERSION=%%j-%%k-%%l-%%m" - IF "%%j" == "1" ( - set "MAJOR_VERSION=%%k" - set "MINOR_VERSION=%%l" - ) else ( - set "MAJOR_VERSION=%%j" - set "MINOR_VERSION=%%k" - ) -) - -set JAVA_VERSION=%MAJOR_VERSION% - -@REM we do not check jdk that version less than 1.8 because they are too stale... -IF "%JAVA_VERSION%" == "6" ( - echo IoTDB only supports jdk >= 8, please check your java version. - goto finally -) -IF "%JAVA_VERSION%" == "7" ( - echo IoTDB only supports jdk >= 8, please check your java version. - goto finally -) - -if "%OS%" == "Windows_NT" setlocal - -pushd %~dp0.. -if NOT DEFINED CONFIGNODE_HOME set CONFIGNODE_HOME=%cd% -popd - -set CONFIGNODE_CONF=%CONFIGNODE_HOME%\conf -set CONFIGNODE_LOGS=%CONFIGNODE_HOME%\logs - -@setlocal ENABLEDELAYEDEXPANSION ENABLEEXTENSIONS -set CONF_PARAMS=-r -set is_conf_path=false -for %%i in (%*) do ( - IF "%%i" == "-c" ( - set is_conf_path=true - ) ELSE IF "!is_conf_path!" == "true" ( - set is_conf_path=false - set IOTDB_CONF=%%i - ) ELSE ( - set CONF_PARAMS=!CONF_PARAMS! %%i - ) -) - -IF EXIST "%CONFIGNODE_CONF%\confignode-env.bat" ( - CALL "%CONFIGNODE_CONF%\confignode-env.bat" %1 - ) ELSE ( - echo "can't find %CONFIGNODE_CONF%\confignode-env.bat" - ) - -if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.confignode.service.ConfigNode -if NOT DEFINED JAVA_HOME goto :err - -@REM ----------------------------------------------------------------------------- -@REM JVM Opts we'll use in legacy run or installation -set JAVA_OPTS=-ea^ - -Dlogback.configurationFile="%CONFIGNODE_CONF%\logback-confignode.xml"^ - -DCONFIGNODE_HOME="%CONFIGNODE_HOME%"^ - -DCONFIGNODE_CONF="%CONFIGNODE_CONF%"^ - -Dsun.jnu.encoding=UTF-8^ - -Dfile.encoding=UTF-8 - -@REM ***** CLASSPATH library setting ***** -@REM Ensure that any user defined CLASSPATH variables are not used on startup -if EXIST "%CONFIGNODE_HOME%\lib" (set CLASSPATH="%CONFIGNODE_HOME%\lib\*") else set CLASSPATH="%CONFIGNODE_HOME%\..\lib\*" -set CLASSPATH=%CLASSPATH%;iotdb.ConfigNode -goto okClasspath - -:append -set CLASSPATH=%CLASSPATH%;%1 - -goto :eof - -@REM ----------------------------------------------------------------------------- -:okClasspath - -rem echo CLASSPATH: %CLASSPATH% - -"%JAVA_HOME%\bin\java" %ILLEGAL_ACCESS_PARAMS% %JAVA_OPTS% %CONFIGNODE_HEAP_OPTS% -cp %CLASSPATH% %CONFIGNODE_JMX_OPTS% %MAIN_CLASS% %CONF_PARAMS% -goto finally - -:err -echo JAVA_HOME environment variable must be set! -pause - - -@REM ----------------------------------------------------------------------------- -:finally - -pause - -ENDLOCAL diff --git a/iotdb-core/confignode/src/assembly/resources/sbin/remove-confignode.sh b/iotdb-core/confignode/src/assembly/resources/sbin/remove-confignode.sh deleted file mode 100755 index defa7a8eba09b..0000000000000 --- a/iotdb-core/confignode/src/assembly/resources/sbin/remove-confignode.sh +++ /dev/null @@ -1,71 +0,0 @@ -#!/bin/bash -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -if [ "$#" -eq 1 ] && [ "$1" == "--help" ]; then - echo "The script will remove a ConfigNode." - echo "Before removing a ConfigNode, ensure that there is at least one active ConfigNode in the cluster after the removal." - echo "Usage:" - echo "Remove the ConfigNode with confignode_id" - echo "./sbin/remove-confignode.sh [confignode_id]" - echo "Remove the ConfigNode with address:port" - echo "./sbin/remove-confignode.sh [cn_internal_address:cn_internal_port]" - exit 0 -fi - -echo ---------------------------- -echo Starting to remove IoTDB ConfigNode -echo ---------------------------- - -source "$(dirname "$0")/iotdb-common.sh" - -#get_iotdb_include wil remove -D parameters -VARS=$(get_iotdb_include "$*") -checkAllConfigNodeVariables -eval set -- "$VARS" - -PARAMS="-r "$* - -initConfigNodeEnv - -CLASSPATH="" -for f in ${CONFIGNODE_HOME}/lib/*.jar; do - CLASSPATH=${CLASSPATH}":"$f -done -classname=org.apache.iotdb.confignode.service.ConfigNode - -launch_service() { - class="$1" - iotdb_parms="-Dlogback.configurationFile=${CONFIGNODE_LOG_CONFIG}" - iotdb_parms="$iotdb_parms -DCONFIGNODE_HOME=${CONFIGNODE_HOME}" - iotdb_parms="$iotdb_parms -DCONFIGNODE_DATA_HOME=${CONFIGNODE_DATA_HOME}" - iotdb_parms="$iotdb_parms -DTSFILE_HOME=${CONFIGNODE_HOME}" - iotdb_parms="$iotdb_parms -DCONFIGNODE_CONF=${CONFIGNODE_CONF}" - iotdb_parms="$iotdb_parms -DTSFILE_CONF=${CONFIGNODE_CONF}" - iotdb_parms="$iotdb_parms -Dname=iotdb\.ConfigNode" - iotdb_parms="$iotdb_parms -DCONFIGNODE_LOGS=${CONFIGNODE_LOGS}" - - exec "$JAVA" $illegal_access_params $iotdb_parms $IOTDB_JMX_OPTS -cp "$CLASSPATH" "$class" $PARAMS - return $? -} - -# Start up the service -launch_service "$classname" - -exit $? diff --git a/iotdb-core/confignode/src/assembly/resources/sbin/stop-confignode.bat b/iotdb-core/confignode/src/assembly/resources/sbin/stop-confignode.bat index 8c0e392f71bd0..b728e400fbcde 100644 --- a/iotdb-core/confignode/src/assembly/resources/sbin/stop-confignode.bat +++ b/iotdb-core/confignode/src/assembly/resources/sbin/stop-confignode.bat @@ -39,8 +39,8 @@ for /f "eol=; tokens=2,2 delims==" %%i in ('findstr /i "^cn_internal_port" ) if not defined cn_internal_port ( - echo "cn_internal_port not found in the configuration file. Exiting." - exit /b 1 + echo "WARNING: cn_internal_port not found in the configuration file. Using default value cn_internal_port = 10710" + set cn_internal_port=10710 ) echo "check whether the cn_internal_port is used..., port is %cn_internal_port%" @@ -51,8 +51,8 @@ for /f "eol=; tokens=2,2 delims==" %%i in ('findstr /i "cn_internal_address" ) if not defined cn_internal_address ( - echo "cn_internal_address not found in the configuration file. Exiting." - exit /b 1 + echo "WARNING: cn_internal_address not found in the configuration file. Using default value cn_internal_address = 127.0.0.1" + set cn_internal_address=127.0.0.1 ) for /f "tokens=5" %%a in ('netstat /ano ^| findstr %cn_internal_address%:%cn_internal_port% ^| findstr LISTENING ') do ( diff --git a/iotdb-core/confignode/src/assembly/resources/sbin/stop-confignode.sh b/iotdb-core/confignode/src/assembly/resources/sbin/stop-confignode.sh index 5b2ccef01a514..f3207380dbd71 100644 --- a/iotdb-core/confignode/src/assembly/resources/sbin/stop-confignode.sh +++ b/iotdb-core/confignode/src/assembly/resources/sbin/stop-confignode.sh @@ -27,6 +27,11 @@ else cn_internal_port=$(sed '/^cn_internal_port=/!d;s/.*=//' "${CONFIGNODE_CONF}"/iotdb-confignode.properties) fi +if [ -z "$cn_internal_port" ]; then + echo "WARNING: cn_internal_port not found in the configuration file. Using default value cn_internal_port=10710" + cn_internal_port=10710 +fi + check_config_unique "cn_internal_port" "$cn_internal_port" echo Check whether the internal_port is used..., port is "$cn_internal_port" diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/CnToCnNodeRequestType.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/CnToCnNodeRequestType.java index 3864f26cbc8e1..e422e45dff0be 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/CnToCnNodeRequestType.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/CnToCnNodeRequestType.java @@ -27,7 +27,7 @@ public enum CnToCnNodeRequestType { REMOVE_CONFIG_NODE, DELETE_CONFIG_NODE_PEER, REPORT_CONFIG_NODE_SHUTDOWN, - STOP_CONFIG_NODE, + STOP_AND_CLEAR_CONFIG_NODE, SET_CONFIGURATION, SHOW_CONFIGURATION, SUBMIT_TEST_CONNECTION_TASK, diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/AsyncAINodeHeartbeatClientPool.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/AsyncAINodeHeartbeatClientPool.java new file mode 100644 index 0000000000000..e09ccc79becbf --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/AsyncAINodeHeartbeatClientPool.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.client.async; + +import org.apache.iotdb.ainode.rpc.thrift.TAIHeartbeatReq; +import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.commons.client.ClientPoolFactory; +import org.apache.iotdb.commons.client.IClientManager; +import org.apache.iotdb.commons.client.ainode.AsyncAINodeServiceClient; +import org.apache.iotdb.confignode.client.async.handlers.heartbeat.AINodeHeartbeatHandler; + +public class AsyncAINodeHeartbeatClientPool { + + private final IClientManager clientManager; + + private AsyncAINodeHeartbeatClientPool() { + clientManager = + new IClientManager.Factory() + .createClientManager( + new ClientPoolFactory.AsyncAINodeHeartbeatServiceClientPoolFactory()); + } + + public void getAINodeHeartBeat( + TEndPoint endPoint, TAIHeartbeatReq req, AINodeHeartbeatHandler handler) { + try { + clientManager.borrowClient(endPoint).getAIHeartbeat(req, handler); + } catch (Exception ignore) { + // Just ignore + } + } + + private static class AsyncAINodeHeartbeatClientPoolHolder { + + private static final AsyncAINodeHeartbeatClientPool INSTANCE = + new AsyncAINodeHeartbeatClientPool(); + + private AsyncAINodeHeartbeatClientPoolHolder() { + // Empty constructor + } + } + + public static AsyncAINodeHeartbeatClientPool getInstance() { + return AsyncAINodeHeartbeatClientPool.AsyncAINodeHeartbeatClientPoolHolder.INSTANCE; + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/CnToDnRequestType.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnAsyncRequestType.java similarity index 87% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/CnToDnRequestType.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnAsyncRequestType.java index bc73072825739..225d434191325 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/CnToDnRequestType.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnAsyncRequestType.java @@ -17,14 +17,12 @@ * under the License. */ -package org.apache.iotdb.confignode.client; - -public enum CnToDnRequestType { +package org.apache.iotdb.confignode.client.async; +public enum CnToDnAsyncRequestType { // Node Maintenance - DISABLE_DATA_NODE, - STOP_DATA_NODE, - + STOP_AND_CLEAR_DATA_NODE, + CLEAN_DATA_NODE_CACHE, FLUSH, MERGE, FULL_MERGE, @@ -33,8 +31,6 @@ public enum CnToDnRequestType { LOAD_CONFIGURATION, SET_SYSTEM_STATUS, SET_CONFIGURATION, - SHOW_CONFIGURATION, - SUBMIT_TEST_CONNECTION_TASK, TEST_CONNECTION, @@ -42,20 +38,14 @@ public enum CnToDnRequestType { CREATE_DATA_REGION, CREATE_SCHEMA_REGION, DELETE_REGION, - - CREATE_NEW_REGION_PEER, - ADD_REGION_PEER, - REMOVE_REGION_PEER, - DELETE_OLD_REGION_PEER, RESET_PEER_LIST, - + NOTIFY_REGION_MIGRATION, UPDATE_REGION_ROUTE_MAP, CHANGE_REGION_LEADER, // PartitionCache - INVALIDATE_PARTITION_CACHE, - INVALIDATE_PERMISSION_CACHE, INVALIDATE_SCHEMA_CACHE, + INVALIDATE_LAST_CACHE, CLEAR_CACHE, // Function @@ -86,15 +76,11 @@ public enum CnToDnRequestType { CONSUMER_GROUP_PUSH_ALL_META, CONSUMER_GROUP_PUSH_SINGLE_META, - // CQ - EXECUTE_CQ, - // TEMPLATE UPDATE_TEMPLATE, // Schema SET_TTL, - UPDATE_TTL_CACHE, CONSTRUCT_SCHEMA_BLACK_LIST, ROLLBACK_SCHEMA_BLACK_LIST, @@ -112,8 +98,8 @@ public enum CnToDnRequestType { CONSTRUCT_VIEW_SCHEMA_BLACK_LIST, ROLLBACK_VIEW_SCHEMA_BLACK_LIST, - DELETE_VIEW, + DELETE_VIEW, ALTER_VIEW, // TODO Need to migrate to Node Maintenance diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnInternalServiceAsyncRequestManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnInternalServiceAsyncRequestManager.java index 7ac3bbcd96168..59f99fe57cb03 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnInternalServiceAsyncRequestManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnInternalServiceAsyncRequestManager.java @@ -32,7 +32,7 @@ import org.apache.iotdb.commons.client.request.AsyncRequestRPCHandler; import org.apache.iotdb.commons.client.request.DataNodeInternalServiceRequestManager; import org.apache.iotdb.commons.client.request.TestConnectionUtils; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.commons.exception.UncheckedStartupException; import org.apache.iotdb.confignode.client.async.handlers.rpc.CheckTimeSeriesExistenceRPCHandler; import org.apache.iotdb.confignode.client.async.handlers.rpc.CountPathsUsingTemplateRPCHandler; import org.apache.iotdb.confignode.client.async.handlers.rpc.DataNodeAsyncRequestRPCHandler; @@ -50,6 +50,7 @@ import org.apache.iotdb.mpp.rpc.thrift.TAlterViewReq; import org.apache.iotdb.mpp.rpc.thrift.TCheckSchemaRegionUsingTemplateReq; import org.apache.iotdb.mpp.rpc.thrift.TCheckTimeSeriesExistenceReq; +import org.apache.iotdb.mpp.rpc.thrift.TCleanDataNodeCacheReq; import org.apache.iotdb.mpp.rpc.thrift.TConstructSchemaBlackListReq; import org.apache.iotdb.mpp.rpc.thrift.TConstructSchemaBlackListWithTemplateReq; import org.apache.iotdb.mpp.rpc.thrift.TConstructViewSchemaBlackListReq; @@ -68,7 +69,9 @@ import org.apache.iotdb.mpp.rpc.thrift.TDropTriggerInstanceReq; import org.apache.iotdb.mpp.rpc.thrift.TFetchSchemaBlackListReq; import org.apache.iotdb.mpp.rpc.thrift.TInactiveTriggerInstanceReq; +import org.apache.iotdb.mpp.rpc.thrift.TInvalidateCacheReq; import org.apache.iotdb.mpp.rpc.thrift.TInvalidateMatchedSchemaCacheReq; +import org.apache.iotdb.mpp.rpc.thrift.TNotifyRegionMigrationReq; import org.apache.iotdb.mpp.rpc.thrift.TPipeHeartbeatReq; import org.apache.iotdb.mpp.rpc.thrift.TPushConsumerGroupMetaReq; import org.apache.iotdb.mpp.rpc.thrift.TPushMultiPipeMetaReq; @@ -90,9 +93,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + /** Asynchronously send RPC requests to DataNodes. See queryengine.thrift for more details. */ public class CnToDnInternalServiceAsyncRequestManager - extends DataNodeInternalServiceRequestManager { + extends DataNodeInternalServiceRequestManager { private static final Logger LOGGER = LoggerFactory.getLogger(CnToDnInternalServiceAsyncRequestManager.class); @@ -100,263 +107,298 @@ public class CnToDnInternalServiceAsyncRequestManager @Override protected void initActionMapBuilder() { actionMapBuilder.put( - CnToDnRequestType.SET_TTL, + CnToDnAsyncRequestType.SET_TTL, (req, client, handler) -> client.setTTL((TSetTTLReq) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.CREATE_DATA_REGION, + CnToDnAsyncRequestType.CREATE_DATA_REGION, (req, client, handler) -> client.createDataRegion( (TCreateDataRegionReq) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.DELETE_REGION, + CnToDnAsyncRequestType.DELETE_REGION, (req, client, handler) -> client.deleteRegion((TConsensusGroupId) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.CREATE_SCHEMA_REGION, + CnToDnAsyncRequestType.CREATE_SCHEMA_REGION, (req, client, handler) -> client.createSchemaRegion( (TCreateSchemaRegionReq) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.CREATE_FUNCTION, + CnToDnAsyncRequestType.CREATE_FUNCTION, (req, client, handler) -> client.createFunction( (TCreateFunctionInstanceReq) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.DROP_FUNCTION, + CnToDnAsyncRequestType.DROP_FUNCTION, (req, client, handler) -> client.dropFunction( (TDropFunctionInstanceReq) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.CREATE_TRIGGER_INSTANCE, + CnToDnAsyncRequestType.CREATE_TRIGGER_INSTANCE, (req, client, handler) -> client.createTriggerInstance( (TCreateTriggerInstanceReq) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.DROP_TRIGGER_INSTANCE, + CnToDnAsyncRequestType.DROP_TRIGGER_INSTANCE, (req, client, handler) -> client.dropTriggerInstance( (TDropTriggerInstanceReq) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.ACTIVE_TRIGGER_INSTANCE, + CnToDnAsyncRequestType.ACTIVE_TRIGGER_INSTANCE, (req, client, handler) -> client.activeTriggerInstance( (TActiveTriggerInstanceReq) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.INACTIVE_TRIGGER_INSTANCE, + CnToDnAsyncRequestType.INACTIVE_TRIGGER_INSTANCE, (req, client, handler) -> client.inactiveTriggerInstance( (TInactiveTriggerInstanceReq) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.UPDATE_TRIGGER_LOCATION, + CnToDnAsyncRequestType.UPDATE_TRIGGER_LOCATION, (req, client, handler) -> client.updateTriggerLocation( (TUpdateTriggerLocationReq) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.CREATE_PIPE_PLUGIN, + CnToDnAsyncRequestType.CREATE_PIPE_PLUGIN, (req, client, handler) -> client.createPipePlugin( (TCreatePipePluginInstanceReq) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.DROP_PIPE_PLUGIN, + CnToDnAsyncRequestType.DROP_PIPE_PLUGIN, (req, client, handler) -> client.dropPipePlugin( (TDropPipePluginInstanceReq) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.PIPE_PUSH_ALL_META, + CnToDnAsyncRequestType.PIPE_PUSH_ALL_META, (req, client, handler) -> client.pushPipeMeta((TPushPipeMetaReq) req, (PipePushMetaRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.PIPE_PUSH_SINGLE_META, + CnToDnAsyncRequestType.PIPE_PUSH_SINGLE_META, (req, client, handler) -> client.pushSinglePipeMeta( (TPushSinglePipeMetaReq) req, (PipePushMetaRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.PIPE_PUSH_MULTI_META, + CnToDnAsyncRequestType.PIPE_PUSH_MULTI_META, (req, client, handler) -> client.pushMultiPipeMeta( (TPushMultiPipeMetaReq) req, (PipePushMetaRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.TOPIC_PUSH_ALL_META, + CnToDnAsyncRequestType.TOPIC_PUSH_ALL_META, (req, client, handler) -> client.pushTopicMeta((TPushTopicMetaReq) req, (TopicPushMetaRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.TOPIC_PUSH_SINGLE_META, + CnToDnAsyncRequestType.TOPIC_PUSH_SINGLE_META, (req, client, handler) -> client.pushSingleTopicMeta( (TPushSingleTopicMetaReq) req, (TopicPushMetaRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.TOPIC_PUSH_MULTI_META, + CnToDnAsyncRequestType.TOPIC_PUSH_MULTI_META, (req, client, handler) -> client.pushMultiTopicMeta( (TPushMultiTopicMetaReq) req, (TopicPushMetaRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.CONSUMER_GROUP_PUSH_ALL_META, + CnToDnAsyncRequestType.CONSUMER_GROUP_PUSH_ALL_META, (req, client, handler) -> client.pushConsumerGroupMeta( (TPushConsumerGroupMetaReq) req, (ConsumerGroupPushMetaRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.CONSUMER_GROUP_PUSH_SINGLE_META, + CnToDnAsyncRequestType.CONSUMER_GROUP_PUSH_SINGLE_META, (req, client, handler) -> client.pushSingleConsumerGroupMeta( (TPushSingleConsumerGroupMetaReq) req, (ConsumerGroupPushMetaRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.PIPE_HEARTBEAT, + CnToDnAsyncRequestType.PIPE_HEARTBEAT, (req, client, handler) -> client.pipeHeartbeat((TPipeHeartbeatReq) req, (PipeHeartbeatRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.MERGE, + CnToDnAsyncRequestType.MERGE, (req, client, handler) -> client.merge((DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.FULL_MERGE, + CnToDnAsyncRequestType.FULL_MERGE, (req, client, handler) -> client.merge((DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.FLUSH, + CnToDnAsyncRequestType.FLUSH, (req, client, handler) -> client.flush((TFlushReq) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.CLEAR_CACHE, + CnToDnAsyncRequestType.CLEAR_CACHE, (req, client, handler) -> client.clearCache((DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.START_REPAIR_DATA, + CnToDnAsyncRequestType.START_REPAIR_DATA, (req, client, handler) -> client.startRepairData((DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.STOP_REPAIR_DATA, + CnToDnAsyncRequestType.STOP_REPAIR_DATA, (req, client, handler) -> client.stopRepairData((DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.LOAD_CONFIGURATION, + CnToDnAsyncRequestType.LOAD_CONFIGURATION, (req, client, handler) -> client.loadConfiguration((DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.SET_SYSTEM_STATUS, + CnToDnAsyncRequestType.SET_SYSTEM_STATUS, (req, client, handler) -> client.setSystemStatus((String) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.SET_CONFIGURATION, + CnToDnAsyncRequestType.SET_CONFIGURATION, (req, client, handler) -> client.setConfiguration( (TSetConfigurationReq) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.UPDATE_REGION_ROUTE_MAP, + CnToDnAsyncRequestType.UPDATE_REGION_ROUTE_MAP, (req, client, handler) -> client.updateRegionCache((TRegionRouteReq) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.CHANGE_REGION_LEADER, + CnToDnAsyncRequestType.NOTIFY_REGION_MIGRATION, + (req, client, handler) -> + client.notifyRegionMigration( + (TNotifyRegionMigrationReq) req, (DataNodeTSStatusRPCHandler) handler)); + actionMapBuilder.put( + CnToDnAsyncRequestType.CHANGE_REGION_LEADER, (req, client, handler) -> client.changeRegionLeader( (TRegionLeaderChangeReq) req, (TransferLeaderRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.CONSTRUCT_SCHEMA_BLACK_LIST, + CnToDnAsyncRequestType.CONSTRUCT_SCHEMA_BLACK_LIST, (req, client, handler) -> client.constructSchemaBlackList( (TConstructSchemaBlackListReq) req, (SchemaUpdateRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.ROLLBACK_SCHEMA_BLACK_LIST, + CnToDnAsyncRequestType.ROLLBACK_SCHEMA_BLACK_LIST, (req, client, handler) -> client.rollbackSchemaBlackList( (TRollbackSchemaBlackListReq) req, (SchemaUpdateRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.FETCH_SCHEMA_BLACK_LIST, + CnToDnAsyncRequestType.FETCH_SCHEMA_BLACK_LIST, (req, client, handler) -> client.fetchSchemaBlackList( (TFetchSchemaBlackListReq) req, (FetchSchemaBlackListRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.INVALIDATE_MATCHED_SCHEMA_CACHE, + CnToDnAsyncRequestType.INVALIDATE_SCHEMA_CACHE, + (req, client, handler) -> + client.invalidateSchemaCache( + (TInvalidateCacheReq) req, (DataNodeTSStatusRPCHandler) handler)); + actionMapBuilder.put( + CnToDnAsyncRequestType.INVALIDATE_MATCHED_SCHEMA_CACHE, (req, client, handler) -> client.invalidateMatchedSchemaCache( (TInvalidateMatchedSchemaCacheReq) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.DELETE_DATA_FOR_DELETE_SCHEMA, + CnToDnAsyncRequestType.INVALIDATE_LAST_CACHE, + (req, client, handler) -> + client.invalidateLastCache((String) req, (DataNodeTSStatusRPCHandler) handler)); + actionMapBuilder.put( + CnToDnAsyncRequestType.DELETE_DATA_FOR_DELETE_SCHEMA, (req, client, handler) -> client.deleteDataForDeleteSchema( (TDeleteDataForDeleteSchemaReq) req, (SchemaUpdateRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.DELETE_TIMESERIES, + CnToDnAsyncRequestType.DELETE_TIMESERIES, (req, client, handler) -> client.deleteTimeSeries((TDeleteTimeSeriesReq) req, (SchemaUpdateRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.CONSTRUCT_SCHEMA_BLACK_LIST_WITH_TEMPLATE, + CnToDnAsyncRequestType.CONSTRUCT_SCHEMA_BLACK_LIST_WITH_TEMPLATE, (req, client, handler) -> client.constructSchemaBlackListWithTemplate( (TConstructSchemaBlackListWithTemplateReq) req, (SchemaUpdateRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.ROLLBACK_SCHEMA_BLACK_LIST_WITH_TEMPLATE, + CnToDnAsyncRequestType.ROLLBACK_SCHEMA_BLACK_LIST_WITH_TEMPLATE, (req, client, handler) -> client.rollbackSchemaBlackListWithTemplate( (TRollbackSchemaBlackListWithTemplateReq) req, (SchemaUpdateRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.DEACTIVATE_TEMPLATE, + CnToDnAsyncRequestType.DEACTIVATE_TEMPLATE, (req, client, handler) -> client.deactivateTemplate( (TDeactivateTemplateReq) req, (SchemaUpdateRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.UPDATE_TEMPLATE, + CnToDnAsyncRequestType.UPDATE_TEMPLATE, (req, client, handler) -> client.updateTemplate((TUpdateTemplateReq) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.COUNT_PATHS_USING_TEMPLATE, + CnToDnAsyncRequestType.COUNT_PATHS_USING_TEMPLATE, (req, client, handler) -> client.countPathsUsingTemplate( (TCountPathsUsingTemplateReq) req, (CountPathsUsingTemplateRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.CHECK_SCHEMA_REGION_USING_TEMPLATE, + CnToDnAsyncRequestType.CHECK_SCHEMA_REGION_USING_TEMPLATE, (req, client, handler) -> client.checkSchemaRegionUsingTemplate( (TCheckSchemaRegionUsingTemplateReq) req, (CheckSchemaRegionUsingTemplateRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.CHECK_TIMESERIES_EXISTENCE, + CnToDnAsyncRequestType.CHECK_TIMESERIES_EXISTENCE, (req, client, handler) -> client.checkTimeSeriesExistence( (TCheckTimeSeriesExistenceReq) req, (CheckTimeSeriesExistenceRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.CONSTRUCT_VIEW_SCHEMA_BLACK_LIST, + CnToDnAsyncRequestType.CONSTRUCT_VIEW_SCHEMA_BLACK_LIST, (req, client, handler) -> client.constructViewSchemaBlackList( (TConstructViewSchemaBlackListReq) req, (SchemaUpdateRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.ROLLBACK_VIEW_SCHEMA_BLACK_LIST, + CnToDnAsyncRequestType.ROLLBACK_VIEW_SCHEMA_BLACK_LIST, (req, client, handler) -> client.rollbackViewSchemaBlackList( (TRollbackViewSchemaBlackListReq) req, (SchemaUpdateRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.DELETE_VIEW, + CnToDnAsyncRequestType.DELETE_VIEW, (req, client, handler) -> client.deleteViewSchema((TDeleteViewSchemaReq) req, (SchemaUpdateRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.ALTER_VIEW, + CnToDnAsyncRequestType.ALTER_VIEW, (req, client, handler) -> client.alterView((TAlterViewReq) req, (SchemaUpdateRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.KILL_QUERY_INSTANCE, + CnToDnAsyncRequestType.KILL_QUERY_INSTANCE, (req, client, handler) -> client.killQueryInstance((String) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.SET_SPACE_QUOTA, + CnToDnAsyncRequestType.SET_SPACE_QUOTA, (req, client, handler) -> client.setSpaceQuota((TSetSpaceQuotaReq) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.SET_THROTTLE_QUOTA, + CnToDnAsyncRequestType.SET_THROTTLE_QUOTA, (req, client, handler) -> client.setThrottleQuota( (TSetThrottleQuotaReq) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.RESET_PEER_LIST, + CnToDnAsyncRequestType.RESET_PEER_LIST, (req, client, handler) -> client.resetPeerList((TResetPeerListReq) req, (DataNodeTSStatusRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.SUBMIT_TEST_CONNECTION_TASK, + CnToDnAsyncRequestType.SUBMIT_TEST_CONNECTION_TASK, (req, client, handler) -> client.submitTestConnectionTask( (TNodeLocations) req, (SubmitTestConnectionTaskRPCHandler) handler)); actionMapBuilder.put( - CnToDnRequestType.TEST_CONNECTION, + CnToDnAsyncRequestType.TEST_CONNECTION, (req, client, handler) -> client.testConnectionEmptyRPC((DataNodeTSStatusRPCHandler) handler)); + actionMapBuilder.put( + CnToDnAsyncRequestType.CLEAN_DATA_NODE_CACHE, + (req, client, handler) -> + client.cleanDataNodeCache( + (TCleanDataNodeCacheReq) req, (DataNodeTSStatusRPCHandler) handler)); + actionMapBuilder.put( + CnToDnAsyncRequestType.STOP_AND_CLEAR_DATA_NODE, + (req, client, handler) -> + client.stopAndClearDataNode((DataNodeTSStatusRPCHandler) handler)); + } + + @Override + protected void checkActionMapCompleteness() { + List lackList = + Arrays.stream(CnToDnAsyncRequestType.values()) + .filter(type -> !actionMap.containsKey(type)) + .collect(Collectors.toList()); + if (!lackList.isEmpty()) { + throw new UncheckedStartupException( + String.format("These request types should be added to actionMap: %s", lackList)); + } } @Override - protected AsyncRequestRPCHandler buildHandler( - AsyncRequestContext requestContext, + protected AsyncRequestRPCHandler buildHandler( + AsyncRequestContext requestContext, int requestId, TDataNodeLocation targetNode) { return DataNodeAsyncRequestRPCHandler.buildHandler(requestContext, requestId, targetNode); @@ -364,8 +406,8 @@ protected AsyncRequestRPCHandler buildH @Override protected void adjustClientTimeoutIfNecessary( - CnToDnRequestType cnToDnRequestType, AsyncDataNodeInternalServiceClient client) { - if (CnToDnRequestType.SUBMIT_TEST_CONNECTION_TASK.equals(cnToDnRequestType)) { + CnToDnAsyncRequestType CnToDnAsyncRequestType, AsyncDataNodeInternalServiceClient client) { + if (CnToDnAsyncRequestType.SUBMIT_TEST_CONNECTION_TASK.equals(CnToDnAsyncRequestType)) { client.setTimeoutTemporarily(TestConnectionUtils.calculateCnLeaderToAllDnMaxTime()); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/DataNodeAsyncRequestContext.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/DataNodeAsyncRequestContext.java index 2b813c081d7e7..50797d603a3b3 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/DataNodeAsyncRequestContext.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/DataNodeAsyncRequestContext.java @@ -21,7 +21,7 @@ import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.commons.client.request.AsyncRequestContext; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import java.util.Map; @@ -32,19 +32,21 @@ * @param ClassName of RPC response */ public class DataNodeAsyncRequestContext - extends AsyncRequestContext { + extends AsyncRequestContext { - public DataNodeAsyncRequestContext(CnToDnRequestType requestType) { + public DataNodeAsyncRequestContext(CnToDnAsyncRequestType requestType) { super(requestType); } public DataNodeAsyncRequestContext( - CnToDnRequestType requestType, Map dataNodeLocationMap) { + CnToDnAsyncRequestType requestType, Map dataNodeLocationMap) { super(requestType, dataNodeLocationMap); } public DataNodeAsyncRequestContext( - CnToDnRequestType requestType, Q q, Map dataNodeLocationMap) { + CnToDnAsyncRequestType requestType, + Q q, + Map dataNodeLocationMap) { super(requestType, q, dataNodeLocationMap); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/heartbeat/AINodeHeartbeatHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/heartbeat/AINodeHeartbeatHandler.java new file mode 100644 index 0000000000000..9d8e0b6e8474f --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/heartbeat/AINodeHeartbeatHandler.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.client.async.handlers.heartbeat; + +import org.apache.iotdb.ainode.rpc.thrift.TAIHeartbeatResp; +import org.apache.iotdb.commons.client.ThriftClient; +import org.apache.iotdb.commons.cluster.NodeStatus; +import org.apache.iotdb.commons.cluster.NodeType; +import org.apache.iotdb.confignode.manager.load.LoadManager; +import org.apache.iotdb.confignode.manager.load.cache.node.NodeHeartbeatSample; + +import org.apache.thrift.async.AsyncMethodCallback; + +public class AINodeHeartbeatHandler implements AsyncMethodCallback { + + private final int nodeId; + + private final LoadManager loadManager; + + public AINodeHeartbeatHandler(int nodeId, LoadManager loadManager) { + this.nodeId = nodeId; + this.loadManager = loadManager; + } + + @Override + public void onComplete(TAIHeartbeatResp aiHeartbeatResp) { + loadManager + .getLoadCache() + .cacheAINodeHeartbeatSample(nodeId, new NodeHeartbeatSample(aiHeartbeatResp)); + } + + @Override + public void onError(Exception e) { + if (ThriftClient.isConnectionBroken(e)) { + loadManager.forceUpdateNodeCache( + NodeType.DataNode, nodeId, new NodeHeartbeatSample(NodeStatus.Unknown)); + } + loadManager.getLoadCache().resetHeartbeatProcessing(nodeId); + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/heartbeat/DataNodeHeartbeatHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/heartbeat/DataNodeHeartbeatHandler.java index 9d42c319a44e3..3613c337862aa 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/heartbeat/DataNodeHeartbeatHandler.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/heartbeat/DataNodeHeartbeatHandler.java @@ -87,10 +87,22 @@ public void onComplete(TDataNodeHeartbeatResp heartbeatResp) { .getLoadCache() .cacheDataNodeHeartbeatSample(nodeId, new NodeHeartbeatSample(heartbeatResp)); + RegionStatus regionStatus = RegionStatus.valueOf(heartbeatResp.getStatus()); + heartbeatResp .getJudgedLeaders() .forEach( (regionGroupId, isLeader) -> { + + // Do not allow regions to inherit the Removing state from datanode + RegionStatus nextRegionStatus = regionStatus; + if (nextRegionStatus == RegionStatus.Removing) { + nextRegionStatus = + loadManager + .getLoadCache() + .getRegionCacheLastSampleStatus(regionGroupId, nodeId); + } + // Update RegionGroupCache loadManager .getLoadCache() @@ -100,7 +112,7 @@ public void onComplete(TDataNodeHeartbeatResp heartbeatResp) { new RegionHeartbeatSample( heartbeatResp.getHeartbeatTimestamp(), // Region will inherit DataNode's status - RegionStatus.valueOf(heartbeatResp.getStatus())), + nextRegionStatus), false); if (((TConsensusGroupType.SchemaRegion.equals(regionGroupId.getType()) @@ -143,6 +155,9 @@ public void onComplete(TDataNodeHeartbeatResp heartbeatResp) { .updateConfirmedConfigNodeEndPoints( nodeId, heartbeatResp.getConfirmedConfigNodeEndPoints()); } + if (heartbeatResp.isSetRegionDisk()) { + loadManager.getLoadCache().updateRegionSizeMap(nodeId, heartbeatResp.getRegionDisk()); + } } @Override diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/CheckTimeSeriesExistenceRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/CheckTimeSeriesExistenceRPCHandler.java index b12ba289f1c2a..3a735691a0efa 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/CheckTimeSeriesExistenceRPCHandler.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/CheckTimeSeriesExistenceRPCHandler.java @@ -21,7 +21,7 @@ import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.mpp.rpc.thrift.TCheckTimeSeriesExistenceResp; import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.TSStatusCode; @@ -39,7 +39,7 @@ public class CheckTimeSeriesExistenceRPCHandler LoggerFactory.getLogger(CheckTimeSeriesExistenceRPCHandler.class); public CheckTimeSeriesExistenceRPCHandler( - CnToDnRequestType requestType, + CnToDnAsyncRequestType requestType, int requestId, TDataNodeLocation targetDataNode, Map dataNodeLocationMap, diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/CountPathsUsingTemplateRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/CountPathsUsingTemplateRPCHandler.java index f2ed0cc425c57..b27c74bb41d8c 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/CountPathsUsingTemplateRPCHandler.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/CountPathsUsingTemplateRPCHandler.java @@ -21,7 +21,7 @@ import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.mpp.rpc.thrift.TCountPathsUsingTemplateResp; import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.TSStatusCode; @@ -39,7 +39,7 @@ public class CountPathsUsingTemplateRPCHandler LoggerFactory.getLogger(CountPathsUsingTemplateRPCHandler.class); public CountPathsUsingTemplateRPCHandler( - CnToDnRequestType requestType, + CnToDnAsyncRequestType requestType, int requestId, TDataNodeLocation targetDataNode, Map dataNodeLocationMap, diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/DataNodeAsyncRequestRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/DataNodeAsyncRequestRPCHandler.java index f14a2365a9b35..69021eaec1aeb 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/DataNodeAsyncRequestRPCHandler.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/DataNodeAsyncRequestRPCHandler.java @@ -20,11 +20,12 @@ package org.apache.iotdb.confignode.client.async.handlers.rpc; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TPipeHeartbeatResp; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.common.rpc.thrift.TTestConnectionResp; import org.apache.iotdb.commons.client.request.AsyncRequestContext; import org.apache.iotdb.commons.client.request.AsyncRequestRPCHandler; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.handlers.rpc.subscription.CheckSchemaRegionUsingTemplateRPCHandler; import org.apache.iotdb.confignode.client.async.handlers.rpc.subscription.ConsumerGroupPushMetaRPCHandler; import org.apache.iotdb.confignode.client.async.handlers.rpc.subscription.TopicPushMetaRPCHandler; @@ -32,7 +33,6 @@ import org.apache.iotdb.mpp.rpc.thrift.TCheckTimeSeriesExistenceResp; import org.apache.iotdb.mpp.rpc.thrift.TCountPathsUsingTemplateResp; import org.apache.iotdb.mpp.rpc.thrift.TFetchSchemaBlackListResp; -import org.apache.iotdb.mpp.rpc.thrift.TPipeHeartbeatResp; import org.apache.iotdb.mpp.rpc.thrift.TPushConsumerGroupMetaResp; import org.apache.iotdb.mpp.rpc.thrift.TPushPipeMetaResp; import org.apache.iotdb.mpp.rpc.thrift.TPushTopicMetaResp; @@ -42,10 +42,10 @@ import java.util.concurrent.CountDownLatch; public abstract class DataNodeAsyncRequestRPCHandler - extends AsyncRequestRPCHandler { + extends AsyncRequestRPCHandler { protected DataNodeAsyncRequestRPCHandler( - CnToDnRequestType requestType, + CnToDnAsyncRequestType requestType, int requestId, TDataNodeLocation targetNode, Map dataNodeLocationMap, @@ -70,10 +70,10 @@ protected String generateFormattedTargetLocation(TDataNodeLocation dataNodeLocat } public static DataNodeAsyncRequestRPCHandler buildHandler( - AsyncRequestContext context, + AsyncRequestContext context, int requestId, TDataNodeLocation targetDataNode) { - CnToDnRequestType requestType = context.getRequestType(); + CnToDnAsyncRequestType requestType = context.getRequestType(); Map dataNodeLocationMap = context.getNodeLocationMap(); Map responseMap = context.getResponseMap(); CountDownLatch countDownLatch = context.getCountDownLatch(); @@ -195,11 +195,16 @@ public static DataNodeAsyncRequestRPCHandler buildHandler( case FULL_MERGE: case FLUSH: case CLEAR_CACHE: + case INVALIDATE_LAST_CACHE: + case CLEAN_DATA_NODE_CACHE: + case STOP_AND_CLEAR_DATA_NODE: case START_REPAIR_DATA: case STOP_REPAIR_DATA: case LOAD_CONFIGURATION: case SET_SYSTEM_STATUS: + case NOTIFY_REGION_MIGRATION: case UPDATE_REGION_ROUTE_MAP: + case INVALIDATE_SCHEMA_CACHE: case INVALIDATE_MATCHED_SCHEMA_CACHE: case UPDATE_TEMPLATE: case KILL_QUERY_INSTANCE: diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/DataNodeTSStatusRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/DataNodeTSStatusRPCHandler.java index 19d451eb671fb..7c93f363dd4b8 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/DataNodeTSStatusRPCHandler.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/DataNodeTSStatusRPCHandler.java @@ -21,7 +21,7 @@ import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.TSStatusCode; @@ -37,7 +37,7 @@ public class DataNodeTSStatusRPCHandler extends DataNodeAsyncRequestRPCHandler dataNodeLocationMap, diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/FetchSchemaBlackListRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/FetchSchemaBlackListRPCHandler.java index 45c659298ca30..693017ec02d6a 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/FetchSchemaBlackListRPCHandler.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/FetchSchemaBlackListRPCHandler.java @@ -21,7 +21,7 @@ import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.mpp.rpc.thrift.TFetchSchemaBlackListResp; import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.TSStatusCode; @@ -39,7 +39,7 @@ public class FetchSchemaBlackListRPCHandler LoggerFactory.getLogger(FetchSchemaBlackListRPCHandler.class); public FetchSchemaBlackListRPCHandler( - CnToDnRequestType requestType, + CnToDnAsyncRequestType requestType, int requestId, TDataNodeLocation targetDataNode, Map dataNodeLocationMap, diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/PipeHeartbeatRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/PipeHeartbeatRPCHandler.java index ec4968cfa06dd..e5fa157961d7e 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/PipeHeartbeatRPCHandler.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/PipeHeartbeatRPCHandler.java @@ -20,8 +20,8 @@ package org.apache.iotdb.confignode.client.async.handlers.rpc; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; -import org.apache.iotdb.confignode.client.CnToDnRequestType; -import org.apache.iotdb.mpp.rpc.thrift.TPipeHeartbeatResp; +import org.apache.iotdb.common.rpc.thrift.TPipeHeartbeatResp; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -34,7 +34,7 @@ public class PipeHeartbeatRPCHandler extends DataNodeAsyncRequestRPCHandler dataNodeLocationMap, diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/PipePushMetaRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/PipePushMetaRPCHandler.java index 3517bbeb93ea3..9ef80e9f8474a 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/PipePushMetaRPCHandler.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/PipePushMetaRPCHandler.java @@ -20,7 +20,7 @@ package org.apache.iotdb.confignode.client.async.handlers.rpc; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.mpp.rpc.thrift.TPushPipeMetaResp; import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.TSStatusCode; @@ -35,7 +35,7 @@ public class PipePushMetaRPCHandler extends DataNodeAsyncRequestRPCHandler dataNodeLocationMap, diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/SchemaUpdateRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/SchemaUpdateRPCHandler.java index db8458a948ce3..dc2796a232e28 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/SchemaUpdateRPCHandler.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/SchemaUpdateRPCHandler.java @@ -21,7 +21,7 @@ import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.TSStatusCode; @@ -36,7 +36,7 @@ public class SchemaUpdateRPCHandler extends DataNodeTSStatusRPCHandler { private static final Logger LOGGER = LoggerFactory.getLogger(SchemaUpdateRPCHandler.class); public SchemaUpdateRPCHandler( - CnToDnRequestType requestType, + CnToDnAsyncRequestType requestType, int requestId, TDataNodeLocation targetDataNode, Map dataNodeLocationMap, diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/SubmitTestConnectionTaskRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/SubmitTestConnectionTaskRPCHandler.java index 4abf0a0eca114..f3c58892cbe4b 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/SubmitTestConnectionTaskRPCHandler.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/SubmitTestConnectionTaskRPCHandler.java @@ -22,7 +22,7 @@ import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.common.rpc.thrift.TTestConnectionResp; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.rpc.TSStatusCode; import org.slf4j.Logger; @@ -38,7 +38,7 @@ public class SubmitTestConnectionTaskRPCHandler LoggerFactory.getLogger(SubmitTestConnectionTaskRPCHandler.class); public SubmitTestConnectionTaskRPCHandler( - CnToDnRequestType requestType, + CnToDnAsyncRequestType requestType, int requestId, TDataNodeLocation targetDataNode, Map dataNodeLocationMap, diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/TransferLeaderRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/TransferLeaderRPCHandler.java index 352cc0694e2bf..8bfe0eb4755d3 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/TransferLeaderRPCHandler.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/TransferLeaderRPCHandler.java @@ -20,7 +20,7 @@ package org.apache.iotdb.confignode.client.async.handlers.rpc; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.mpp.rpc.thrift.TRegionLeaderChangeResp; import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.TSStatusCode; @@ -37,7 +37,7 @@ public class TransferLeaderRPCHandler private static final Logger LOGGER = LoggerFactory.getLogger(TransferLeaderRPCHandler.class); public TransferLeaderRPCHandler( - CnToDnRequestType requestType, + CnToDnAsyncRequestType requestType, int requestId, TDataNodeLocation targetDataNode, Map dataNodeLocationMap, diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/CheckSchemaRegionUsingTemplateRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/CheckSchemaRegionUsingTemplateRPCHandler.java index 14898dcdc6c9c..249e8b5176799 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/CheckSchemaRegionUsingTemplateRPCHandler.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/CheckSchemaRegionUsingTemplateRPCHandler.java @@ -21,7 +21,7 @@ import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.handlers.rpc.DataNodeAsyncRequestRPCHandler; import org.apache.iotdb.mpp.rpc.thrift.TCheckSchemaRegionUsingTemplateResp; import org.apache.iotdb.rpc.RpcUtils; @@ -40,7 +40,7 @@ public class CheckSchemaRegionUsingTemplateRPCHandler LoggerFactory.getLogger(CheckSchemaRegionUsingTemplateRPCHandler.class); public CheckSchemaRegionUsingTemplateRPCHandler( - CnToDnRequestType requestType, + CnToDnAsyncRequestType requestType, int requestId, TDataNodeLocation targetDataNode, Map dataNodeLocationMap, diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/ConsumerGroupPushMetaRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/ConsumerGroupPushMetaRPCHandler.java index ee3c11eeb427b..2938d4f85b7cd 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/ConsumerGroupPushMetaRPCHandler.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/ConsumerGroupPushMetaRPCHandler.java @@ -20,7 +20,7 @@ package org.apache.iotdb.confignode.client.async.handlers.rpc.subscription; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.handlers.rpc.DataNodeAsyncRequestRPCHandler; import org.apache.iotdb.mpp.rpc.thrift.TPushConsumerGroupMetaResp; import org.apache.iotdb.rpc.RpcUtils; @@ -38,7 +38,7 @@ public class ConsumerGroupPushMetaRPCHandler LoggerFactory.getLogger(ConsumerGroupPushMetaRPCHandler.class); public ConsumerGroupPushMetaRPCHandler( - CnToDnRequestType requestType, + CnToDnAsyncRequestType requestType, int requestId, TDataNodeLocation targetDataNode, Map dataNodeLocationMap, diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/TopicPushMetaRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/TopicPushMetaRPCHandler.java index cf8451feaacad..91ffdd7232b3f 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/TopicPushMetaRPCHandler.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/TopicPushMetaRPCHandler.java @@ -20,7 +20,7 @@ package org.apache.iotdb.confignode.client.async.handlers.rpc.subscription; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.handlers.rpc.DataNodeAsyncRequestRPCHandler; import org.apache.iotdb.mpp.rpc.thrift.TPushTopicMetaResp; import org.apache.iotdb.rpc.RpcUtils; @@ -37,7 +37,7 @@ public class TopicPushMetaRPCHandler extends DataNodeAsyncRequestRPCHandler dataNodeLocationMap, diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/CnToDnSyncRequestType.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/CnToDnSyncRequestType.java new file mode 100644 index 0000000000000..14d0d60fc8dc1 --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/CnToDnSyncRequestType.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.client.sync; + +public enum CnToDnSyncRequestType { + // Node Maintenance + CLEAN_DATA_NODE_CACHE, + STOP_AND_CLEAR_DATA_NODE, + SET_SYSTEM_STATUS, + SHOW_CONFIGURATION, + + // Region Maintenance + CREATE_DATA_REGION, + CREATE_SCHEMA_REGION, + DELETE_REGION, + CREATE_NEW_REGION_PEER, + ADD_REGION_PEER, + REMOVE_REGION_PEER, + DELETE_OLD_REGION_PEER, + RESET_PEER_LIST, + + // PartitionCache + INVALIDATE_PARTITION_CACHE, + INVALIDATE_PERMISSION_CACHE, + INVALIDATE_SCHEMA_CACHE, + + // Template + UPDATE_TEMPLATE, + + // Schema + KILL_QUERY_INSTANCE, +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncConfigNodeClientPool.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncConfigNodeClientPool.java index cc26be000a8eb..6282924d0a0f1 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncConfigNodeClientPool.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncConfigNodeClientPool.java @@ -87,9 +87,9 @@ public Object sendSyncRequestToConfigNodeWithRetry( return client.deleteConfigNodePeer((TConfigNodeLocation) req); case REPORT_CONFIG_NODE_SHUTDOWN: return client.reportConfigNodeShutdown((TConfigNodeLocation) req); - case STOP_CONFIG_NODE: - // Only use stopConfigNode when the ConfigNode is removed. - return client.stopConfigNode((TConfigNodeLocation) req); + case STOP_AND_CLEAR_CONFIG_NODE: + // Only use stopAndClearConfigNode when the ConfigNode is removed. + return client.stopAndClearConfigNode((TConfigNodeLocation) req); case SET_CONFIGURATION: return client.setConfiguration((TSetConfigurationReq) req); case SHOW_CONFIGURATION: diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncDataNodeClientPool.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncDataNodeClientPool.java index 84e609ffb38c3..c1dc83c1dfd92 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncDataNodeClientPool.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncDataNodeClientPool.java @@ -27,11 +27,11 @@ import org.apache.iotdb.commons.client.IClientManager; import org.apache.iotdb.commons.client.exception.ClientManagerException; import org.apache.iotdb.commons.client.sync.SyncDataNodeInternalServiceClient; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.commons.exception.UncheckedStartupException; +import org.apache.iotdb.mpp.rpc.thrift.TCleanDataNodeCacheReq; import org.apache.iotdb.mpp.rpc.thrift.TCreateDataRegionReq; import org.apache.iotdb.mpp.rpc.thrift.TCreatePeerReq; import org.apache.iotdb.mpp.rpc.thrift.TCreateSchemaRegionReq; -import org.apache.iotdb.mpp.rpc.thrift.TDisableDataNodeReq; import org.apache.iotdb.mpp.rpc.thrift.TInvalidateCacheReq; import org.apache.iotdb.mpp.rpc.thrift.TInvalidatePermissionCacheReq; import org.apache.iotdb.mpp.rpc.thrift.TMaintainPeerReq; @@ -39,14 +39,19 @@ import org.apache.iotdb.mpp.rpc.thrift.TRegionLeaderChangeResp; import org.apache.iotdb.mpp.rpc.thrift.TResetPeerListReq; import org.apache.iotdb.mpp.rpc.thrift.TUpdateTemplateReq; -import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.TSStatusCode; +import com.google.common.collect.ImmutableMap; +import org.apache.ratis.util.function.CheckedBiFunction; import org.apache.thrift.TException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; /** Synchronously send RPC requests to DataNodes. See queryengine.thrift for more details. */ public class SyncDataNodeClientPool { @@ -57,20 +62,96 @@ public class SyncDataNodeClientPool { private final IClientManager clientManager; + protected ImmutableMap< + CnToDnSyncRequestType, + CheckedBiFunction> + actionMap; + private SyncDataNodeClientPool() { clientManager = new IClientManager.Factory() .createClientManager( new ClientPoolFactory.SyncDataNodeInternalServiceClientPoolFactory()); + buildActionMap(); + checkActionMapCompleteness(); + } + + private void buildActionMap() { + ImmutableMap.Builder< + CnToDnSyncRequestType, + CheckedBiFunction> + actionMapBuilder = ImmutableMap.builder(); + actionMapBuilder.put( + CnToDnSyncRequestType.INVALIDATE_PARTITION_CACHE, + (req, client) -> client.invalidatePartitionCache((TInvalidateCacheReq) req)); + actionMapBuilder.put( + CnToDnSyncRequestType.INVALIDATE_SCHEMA_CACHE, + (req, client) -> client.invalidateSchemaCache((TInvalidateCacheReq) req)); + actionMapBuilder.put( + CnToDnSyncRequestType.CREATE_SCHEMA_REGION, + (req, client) -> client.createSchemaRegion((TCreateSchemaRegionReq) req)); + actionMapBuilder.put( + CnToDnSyncRequestType.CREATE_DATA_REGION, + (req, client) -> client.createDataRegion((TCreateDataRegionReq) req)); + actionMapBuilder.put( + CnToDnSyncRequestType.DELETE_REGION, + (req, client) -> client.deleteRegion((TConsensusGroupId) req)); + actionMapBuilder.put( + CnToDnSyncRequestType.INVALIDATE_PERMISSION_CACHE, + (req, client) -> client.invalidatePermissionCache((TInvalidatePermissionCacheReq) req)); + actionMapBuilder.put( + CnToDnSyncRequestType.CLEAN_DATA_NODE_CACHE, + (req, client) -> client.cleanDataNodeCache((TCleanDataNodeCacheReq) req)); + actionMapBuilder.put( + CnToDnSyncRequestType.STOP_AND_CLEAR_DATA_NODE, + (req, client) -> client.stopAndClearDataNode()); + actionMapBuilder.put( + CnToDnSyncRequestType.SET_SYSTEM_STATUS, + (req, client) -> client.setSystemStatus((String) req)); + actionMapBuilder.put( + CnToDnSyncRequestType.KILL_QUERY_INSTANCE, + (req, client) -> client.killQueryInstance((String) req)); + actionMapBuilder.put( + CnToDnSyncRequestType.UPDATE_TEMPLATE, + (req, client) -> client.updateTemplate((TUpdateTemplateReq) req)); + actionMapBuilder.put( + CnToDnSyncRequestType.CREATE_NEW_REGION_PEER, + (req, client) -> client.createNewRegionPeer((TCreatePeerReq) req)); + actionMapBuilder.put( + CnToDnSyncRequestType.ADD_REGION_PEER, + (req, client) -> client.addRegionPeer((TMaintainPeerReq) req)); + actionMapBuilder.put( + CnToDnSyncRequestType.REMOVE_REGION_PEER, + (req, client) -> client.removeRegionPeer((TMaintainPeerReq) req)); + actionMapBuilder.put( + CnToDnSyncRequestType.DELETE_OLD_REGION_PEER, + (req, client) -> client.deleteOldRegionPeer((TMaintainPeerReq) req)); + actionMapBuilder.put( + CnToDnSyncRequestType.RESET_PEER_LIST, + (req, client) -> client.resetPeerList((TResetPeerListReq) req)); + actionMapBuilder.put( + CnToDnSyncRequestType.SHOW_CONFIGURATION, (req, client) -> client.showConfiguration()); + actionMap = actionMapBuilder.build(); + } + + private void checkActionMapCompleteness() { + List lackList = + Arrays.stream(CnToDnSyncRequestType.values()) + .filter(type -> !actionMap.containsKey(type)) + .collect(Collectors.toList()); + if (!lackList.isEmpty()) { + throw new UncheckedStartupException( + String.format("These request types should be added to actionMap: %s", lackList)); + } } public Object sendSyncRequestToDataNodeWithRetry( - TEndPoint endPoint, Object req, CnToDnRequestType requestType) { + TEndPoint endPoint, Object req, CnToDnSyncRequestType requestType) { Throwable lastException = new TException(); for (int retry = 0; retry < DEFAULT_RETRY_NUM; retry++) { try (SyncDataNodeInternalServiceClient client = clientManager.borrowClient(endPoint)) { return executeSyncRequest(requestType, client, req); - } catch (ClientManagerException | TException e) { + } catch (Exception e) { lastException = e; if (retry != DEFAULT_RETRY_NUM - 1) { LOGGER.warn("{} failed on DataNode {}, retrying {}...", requestType, endPoint, retry + 1); @@ -84,12 +165,12 @@ public Object sendSyncRequestToDataNodeWithRetry( } public Object sendSyncRequestToDataNodeWithGivenRetry( - TEndPoint endPoint, Object req, CnToDnRequestType requestType, int retryNum) { + TEndPoint endPoint, Object req, CnToDnSyncRequestType requestType, int retryNum) { Throwable lastException = new TException(); for (int retry = 0; retry < retryNum; retry++) { try (SyncDataNodeInternalServiceClient client = clientManager.borrowClient(endPoint)) { return executeSyncRequest(requestType, client, req); - } catch (ClientManagerException | TException e) { + } catch (Exception e) { lastException = e; if (retry != retryNum - 1) { LOGGER.warn("{} failed on DataNode {}, retrying {}...", requestType, endPoint, retry + 1); @@ -103,47 +184,9 @@ public Object sendSyncRequestToDataNodeWithGivenRetry( } private Object executeSyncRequest( - CnToDnRequestType requestType, SyncDataNodeInternalServiceClient client, Object req) - throws TException { - switch (requestType) { - case INVALIDATE_PARTITION_CACHE: - return client.invalidatePartitionCache((TInvalidateCacheReq) req); - case INVALIDATE_SCHEMA_CACHE: - return client.invalidateSchemaCache((TInvalidateCacheReq) req); - case CREATE_SCHEMA_REGION: - return client.createSchemaRegion((TCreateSchemaRegionReq) req); - case CREATE_DATA_REGION: - return client.createDataRegion((TCreateDataRegionReq) req); - case DELETE_REGION: - return client.deleteRegion((TConsensusGroupId) req); - case INVALIDATE_PERMISSION_CACHE: - return client.invalidatePermissionCache((TInvalidatePermissionCacheReq) req); - case DISABLE_DATA_NODE: - return client.disableDataNode((TDisableDataNodeReq) req); - case STOP_DATA_NODE: - return client.stopDataNode(); - case SET_SYSTEM_STATUS: - return client.setSystemStatus((String) req); - case KILL_QUERY_INSTANCE: - return client.killQueryInstance((String) req); - case UPDATE_TEMPLATE: - return client.updateTemplate((TUpdateTemplateReq) req); - case CREATE_NEW_REGION_PEER: - return client.createNewRegionPeer((TCreatePeerReq) req); - case ADD_REGION_PEER: - return client.addRegionPeer((TMaintainPeerReq) req); - case REMOVE_REGION_PEER: - return client.removeRegionPeer((TMaintainPeerReq) req); - case DELETE_OLD_REGION_PEER: - return client.deleteOldRegionPeer((TMaintainPeerReq) req); - case RESET_PEER_LIST: - return client.resetPeerList((TResetPeerListReq) req); - case SHOW_CONFIGURATION: - return client.showConfiguration(); - default: - return RpcUtils.getStatus( - TSStatusCode.EXECUTE_STATEMENT_ERROR, "Unknown request type: " + requestType); - } + CnToDnSyncRequestType requestType, SyncDataNodeInternalServiceClient client, Object req) + throws Exception { + return Objects.requireNonNull(actionMap.get(requestType)).apply(req, client); } private void doRetryWait(int retryNum) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConfig.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConfig.java index 1257e9246e948..50c8f5bc9a0d8 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConfig.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConfig.java @@ -20,14 +20,18 @@ package org.apache.iotdb.confignode.conf; import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.commons.client.property.ClientPoolProperty.DefaultProperty; import org.apache.iotdb.commons.conf.IoTDBConstant; import org.apache.iotdb.confignode.manager.load.balancer.RegionBalancer; import org.apache.iotdb.confignode.manager.load.balancer.router.leader.AbstractLeaderBalancer; import org.apache.iotdb.confignode.manager.load.balancer.router.priority.IPriorityBalancer; +import org.apache.iotdb.confignode.manager.load.cache.IFailureDetector; import org.apache.iotdb.confignode.manager.partition.RegionGroupExtensionPolicy; import org.apache.iotdb.consensus.ConsensusFactory; +import org.apache.iotdb.metrics.config.MetricConfigDescriptor; import java.io.File; import java.lang.reflect.Field; @@ -35,7 +39,7 @@ public class ConfigNodeConfig { - /** ClusterId, the default value "defaultCluster" will be changed after join cluster. */ + /** ClusterName, the default value "defaultCluster" will be changed after join cluster. */ private volatile String clusterName = "defaultCluster"; /** ConfigNodeId, the default value -1 will be changed after join cluster. */ @@ -90,7 +94,7 @@ public class ConfigNodeConfig { private int defaultSchemaRegionGroupNumPerDatabase = 1; /** The maximum number of SchemaRegions expected to be managed by each DataNode. */ - private double schemaRegionPerDataNode = schemaReplicationFactor; + private int schemaRegionPerDataNode = 1; /** The policy of extension DataRegionGroup for each Database. */ private RegionGroupExtensionPolicy dataRegionGroupExtensionPolicy = @@ -103,15 +107,21 @@ public class ConfigNodeConfig { */ private int defaultDataRegionGroupNumPerDatabase = 2; - /** The maximum number of DataRegions expected to be managed by each DataNode. */ - private double dataRegionPerDataNode = 5.0; + /** + * The maximum number of DataRegions expected to be managed by each DataNode. Set to 0 means that + * each dataNode automatically has the number of CPU cores / 2 regions. + */ + private int dataRegionPerDataNode = 0; + + /** each dataNode automatically has the number of CPU cores / 2 regions. */ + private final double dataRegionPerDataNodeProportion = 0.5; /** RegionGroup allocate policy. */ private RegionBalancer.RegionGroupAllocatePolicy regionGroupAllocatePolicy = RegionBalancer.RegionGroupAllocatePolicy.GCR; /** Max concurrent client number. */ - private int rpcMaxConcurrentClientNum = 65535; + private int rpcMaxConcurrentClientNum = 3000; /** just for test wait for 60 second by default. */ private int thriftServerAwaitTimeForStopService = 60; @@ -159,7 +169,7 @@ public class ConfigNodeConfig { systemDir + File.separator + "pipe" + File.separator + "receiver"; /** Procedure Evict ttl. */ - private int procedureCompletedEvictTTL = 800; + private int procedureCompletedEvictTTL = 60; /** Procedure completed clean interval. */ private int procedureCompletedCleanInterval = 30; @@ -171,8 +181,17 @@ public class ConfigNodeConfig { /** The heartbeat interval in milliseconds. */ private long heartbeatIntervalInMs = 1000; - /** The unknown DataNode detect interval in milliseconds. */ - private long unknownDataNodeDetectInterval = heartbeatIntervalInMs; + /** Failure detector implementation */ + private String failureDetector = IFailureDetector.PHI_ACCRUAL_DETECTOR; + + /** Max heartbeat elapsed time threshold for Fixed failure detector */ + private long failureDetectorFixedThresholdInMs = 20000; + + /** Max threshold for Phi accrual failure detector */ + private long failureDetectorPhiThreshold = 30; + + /** Acceptable pause duration for Phi accrual failure detector */ + private long failureDetectorPhiAcceptablePauseInMs = 10000; /** The policy of cluster RegionGroups' leader distribution. */ private String leaderDistributionPolicy = AbstractLeaderBalancer.CFD_POLICY; @@ -308,14 +327,10 @@ private void formulateFolders() { pipeReceiverFileDir = addHomeDir(pipeReceiverFileDir); } - private String addHomeDir(String dir) { - String homeDir = System.getProperty(ConfigNodeConstant.CONFIGNODE_HOME, null); - if (!new File(dir).isAbsolute() && homeDir != null && homeDir.length() > 0) { - if (!homeDir.endsWith(File.separator)) { - dir = homeDir + File.separatorChar + dir; - } else { - dir = homeDir + dir; - } + public static String addHomeDir(String dir) { + final String homeDir = System.getProperty(ConfigNodeConstant.CONFIGNODE_HOME, null); + if (!new File(dir).isAbsolute() && homeDir != null && !homeDir.isEmpty()) { + dir = !homeDir.endsWith(File.separator) ? homeDir + File.separatorChar + dir : homeDir + dir; } return dir; } @@ -339,6 +354,7 @@ public String getClusterName() { public void setClusterName(String clusterName) { this.clusterName = clusterName; + MetricConfigDescriptor.getInstance().getMetricConfig().updateClusterName(clusterName); } public int getConfigNodeId() { @@ -481,11 +497,11 @@ public void setDefaultDataRegionGroupNumPerDatabase(int defaultDataRegionGroupNu this.defaultDataRegionGroupNumPerDatabase = defaultDataRegionGroupNumPerDatabase; } - public double getSchemaRegionPerDataNode() { + public int getSchemaRegionPerDataNode() { return schemaRegionPerDataNode; } - public void setSchemaRegionPerDataNode(double schemaRegionPerDataNode) { + public void setSchemaRegionPerDataNode(int schemaRegionPerDataNode) { this.schemaRegionPerDataNode = schemaRegionPerDataNode; } @@ -497,14 +513,18 @@ public void setDataRegionConsensusProtocolClass(String dataRegionConsensusProtoc this.dataRegionConsensusProtocolClass = dataRegionConsensusProtocolClass; } - public double getDataRegionPerDataNode() { + public int getDataRegionPerDataNode() { return dataRegionPerDataNode; } - public void setDataRegionPerDataNode(double dataRegionPerDataNode) { + public void setDataRegionPerDataNode(int dataRegionPerDataNode) { this.dataRegionPerDataNode = dataRegionPerDataNode; } + public double getDataRegionPerDataNodeProportion() { + return dataRegionPerDataNodeProportion; + } + public RegionBalancer.RegionGroupAllocatePolicy getRegionGroupAllocatePolicy() { return regionGroupAllocatePolicy; } @@ -637,14 +657,6 @@ public void setHeartbeatIntervalInMs(long heartbeatIntervalInMs) { this.heartbeatIntervalInMs = heartbeatIntervalInMs; } - public long getUnknownDataNodeDetectInterval() { - return unknownDataNodeDetectInterval; - } - - public void setUnknownDataNodeDetectInterval(long unknownDataNodeDetectInterval) { - this.unknownDataNodeDetectInterval = unknownDataNodeDetectInterval; - } - public String getLeaderDistributionPolicy() { return leaderDistributionPolicy; } @@ -1199,4 +1211,43 @@ public TConfigNodeLocation generateLocalConfigNodeLocation() { new TEndPoint(getInternalAddress(), getInternalPort()), new TEndPoint(getInternalAddress(), getConsensusPort())); } + + public boolean isConsensusGroupStrongConsistency(TConsensusGroupId regionGroupId) { + return (TConsensusGroupType.SchemaRegion.equals(regionGroupId.getType()) + && getSchemaRegionConsensusProtocolClass().equals(ConsensusFactory.RATIS_CONSENSUS)) + || (TConsensusGroupType.DataRegion.equals(regionGroupId.getType()) + && getDataRegionConsensusProtocolClass().equals(ConsensusFactory.RATIS_CONSENSUS)); + } + + public String getFailureDetector() { + return failureDetector; + } + + public void setFailureDetector(String failureDetector) { + this.failureDetector = failureDetector; + } + + public long getFailureDetectorFixedThresholdInMs() { + return failureDetectorFixedThresholdInMs; + } + + public void setFailureDetectorFixedThresholdInMs(long failureDetectorFixedThresholdInMs) { + this.failureDetectorFixedThresholdInMs = failureDetectorFixedThresholdInMs; + } + + public long getFailureDetectorPhiThreshold() { + return failureDetectorPhiThreshold; + } + + public void setFailureDetectorPhiThreshold(long failureDetectorPhiThreshold) { + this.failureDetectorPhiThreshold = failureDetectorPhiThreshold; + } + + public long getFailureDetectorPhiAcceptablePauseInMs() { + return failureDetectorPhiAcceptablePauseInMs; + } + + public void setFailureDetectorPhiAcceptablePauseInMs(long failureDetectorPhiAcceptablePauseInMs) { + this.failureDetectorPhiAcceptablePauseInMs = failureDetectorPhiAcceptablePauseInMs; + } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConstant.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConstant.java index 5724eb1862fe9..17d23193b9d24 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConstant.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConstant.java @@ -34,6 +34,7 @@ public class ConfigNodeConstant { "Executed failed, check usage: /:"; public static final String REMOVE_DATANODE_PROCESS = "[REMOVE_DATANODE_PROCESS]"; + public static final String REMOVE_AINODE_PROCESS = "[REMOVE_AINODE_PROCESS]"; public static final String REGION_MIGRATE_PROCESS = "[REGION_MIGRATE_PROCESS]"; private ConfigNodeConstant() { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java index 7719053587605..b9efee34e28f4 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java @@ -23,12 +23,14 @@ import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.conf.ConfigurationFileUtils; import org.apache.iotdb.commons.conf.IoTDBConstant; +import org.apache.iotdb.commons.conf.TrimProperties; import org.apache.iotdb.commons.exception.BadNodeUrlException; import org.apache.iotdb.commons.schema.SchemaConstant; import org.apache.iotdb.commons.utils.NodeUrlUtils; import org.apache.iotdb.confignode.manager.load.balancer.RegionBalancer; import org.apache.iotdb.confignode.manager.load.balancer.router.leader.AbstractLeaderBalancer; import org.apache.iotdb.confignode.manager.load.balancer.router.priority.IPriorityBalancer; +import org.apache.iotdb.confignode.manager.load.cache.IFailureDetector; import org.apache.iotdb.confignode.manager.partition.RegionGroupExtensionPolicy; import org.apache.iotdb.metrics.config.MetricConfigDescriptor; import org.apache.iotdb.metrics.utils.NodeType; @@ -46,7 +48,6 @@ import java.nio.charset.StandardCharsets; import java.util.Collections; import java.util.Optional; -import java.util.Properties; public class ConfigNodeDescriptor { private static final Logger LOGGER = LoggerFactory.getLogger(ConfigNodeDescriptor.class); @@ -118,13 +119,13 @@ else if (!urlString.endsWith(".properties")) { } private void loadProps() { - Properties commonProperties = new Properties(); + TrimProperties trimProperties = new TrimProperties(); URL url = getPropsUrl(CommonConfig.SYSTEM_CONFIG_NAME); if (url != null) { try (InputStream inputStream = url.openStream()) { LOGGER.info("start reading ConfigNode conf file: {}", url); - commonProperties.load(new InputStreamReader(inputStream, StandardCharsets.UTF_8)); - loadProperties(commonProperties); + trimProperties.load(new InputStreamReader(inputStream, StandardCharsets.UTF_8)); + loadProperties(trimProperties); } catch (IOException | BadNodeUrlException e) { LOGGER.error("Couldn't load ConfigNode conf file, reject ConfigNode startup.", e); System.exit(-1); @@ -133,7 +134,7 @@ private void loadProps() { commonDescriptor .getConfig() .updatePath(System.getProperty(ConfigNodeConstant.CONFIGNODE_HOME, null)); - MetricConfigDescriptor.getInstance().loadProps(commonProperties, true); + MetricConfigDescriptor.getInstance().loadProps(trimProperties, true); MetricConfigDescriptor.getInstance() .getMetricConfig() .updateRpcInstance(NodeType.CONFIGNODE, SchemaConstant.SYSTEM_DATABASE); @@ -145,7 +146,7 @@ private void loadProps() { } } - private void loadProperties(Properties properties) throws BadNodeUrlException, IOException { + private void loadProperties(TrimProperties properties) throws BadNodeUrlException, IOException { conf.setClusterName( properties.getProperty(IoTDBConstant.CLUSTER_NAME, conf.getClusterName()).trim()); @@ -238,12 +239,13 @@ private void loadProperties(Properties properties) throws BadNodeUrlException, I .trim())); conf.setSchemaRegionPerDataNode( - Double.parseDouble( - properties - .getProperty( - "schema_region_per_data_node", - String.valueOf(conf.getSchemaRegionPerDataNode())) - .trim())); + (int) + Double.parseDouble( + properties + .getProperty( + "schema_region_per_data_node", + String.valueOf(conf.getSchemaRegionPerDataNode())) + .trim())); conf.setDataRegionGroupExtensionPolicy( RegionGroupExtensionPolicy.parse( @@ -258,11 +260,13 @@ private void loadProperties(Properties properties) throws BadNodeUrlException, I String.valueOf(conf.getDefaultDataRegionGroupNumPerDatabase()).trim()))); conf.setDataRegionPerDataNode( - Double.parseDouble( - properties - .getProperty( - "data_region_per_data_node", String.valueOf(conf.getDataRegionPerDataNode())) - .trim())); + (int) + Double.parseDouble( + properties + .getProperty( + "data_region_per_data_node", + String.valueOf(conf.getDataRegionPerDataNode())) + .trim())); try { conf.setRegionAllocateStrategy( @@ -316,6 +320,35 @@ private void loadProperties(Properties properties) throws BadNodeUrlException, I "heartbeat_interval_in_ms", String.valueOf(conf.getHeartbeatIntervalInMs())) .trim())); + String failureDetector = properties.getProperty("failure_detector", conf.getFailureDetector()); + if (IFailureDetector.FIXED_DETECTOR.equals(failureDetector) + || IFailureDetector.PHI_ACCRUAL_DETECTOR.equals(failureDetector)) { + conf.setFailureDetector(failureDetector); + } else { + throw new IOException( + String.format( + "Unknown failure_detector: %s, " + "please set to \"fixed\" or \"phi_accrual\"", + failureDetector)); + } + + conf.setFailureDetectorFixedThresholdInMs( + Long.parseLong( + properties.getProperty( + "failure_detector_fixed_threshold_in_ms", + String.valueOf(conf.getFailureDetectorFixedThresholdInMs())))); + + conf.setFailureDetectorPhiThreshold( + Long.parseLong( + properties.getProperty( + "failure_detector_phi_threshold", + String.valueOf(conf.getFailureDetectorPhiThreshold())))); + + conf.setFailureDetectorPhiAcceptablePauseInMs( + Long.parseLong( + properties.getProperty( + "failure_detector_phi_acceptable_pause_in_ms", + String.valueOf(conf.getFailureDetectorPhiAcceptablePauseInMs())))); + String leaderDistributionPolicy = properties .getProperty("leader_distribution_policy", conf.getLeaderDistributionPolicy()) @@ -401,7 +434,7 @@ private void loadProperties(Properties properties) throws BadNodeUrlException, I loadCQConfig(properties); } - private void loadRatisConsensusConfig(Properties properties) { + private void loadRatisConsensusConfig(TrimProperties properties) { conf.setDataRegionRatisConsensusLogAppenderBufferSize( Long.parseLong( properties @@ -813,7 +846,7 @@ private void loadRatisConsensusConfig(Properties properties) { .trim())); } - private void loadCQConfig(Properties properties) { + private void loadCQConfig(TrimProperties properties) { int cqSubmitThread = Integer.parseInt( properties @@ -871,7 +904,7 @@ public boolean isSeedConfigNode() { } } - public void loadHotModifiedProps(Properties properties) { + public void loadHotModifiedProps(TrimProperties properties) { Optional.ofNullable(properties.getProperty(IoTDBConstant.CLUSTER_NAME)) .ifPresent(conf::setClusterName); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeRemoveCheck.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeRemoveCheck.java index 9c4c480b02cea..b8475643ba25d 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeRemoveCheck.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeRemoveCheck.java @@ -83,10 +83,7 @@ public TConfigNodeLocation removeCheck(String args) { .findFirst() .orElse(null); } catch (BadNodeUrlException e) { - LOGGER.info( - "Usage: remove-confignode.sh " - + "or remove-confignode.sh :", - e); + LOGGER.info("Use SQL: remove confignode ; ", e); return nodeLocation; } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeStartupCheck.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeStartupCheck.java index ced2964ec5cb5..3400b5316fc5e 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeStartupCheck.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeStartupCheck.java @@ -25,6 +25,8 @@ import org.apache.iotdb.commons.exception.ConfigurationException; import org.apache.iotdb.commons.exception.StartupException; import org.apache.iotdb.commons.service.StartupChecks; +import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; +import org.apache.iotdb.confignode.client.sync.SyncDataNodeClientPool; import org.apache.iotdb.confignode.manager.load.balancer.router.leader.AbstractLeaderBalancer; import org.apache.iotdb.confignode.manager.load.balancer.router.priority.IPriorityBalancer; import org.apache.iotdb.consensus.ConsensusFactory; @@ -73,6 +75,7 @@ public void startUpCheck() throws StartupException, IOException, ConfigurationEx verify(); checkGlobalConfig(); createDirsIfNecessary(); + checkRequestManager(); if (SystemPropertiesUtils.isRestarted()) { /* Always restore ConfigNodeId first */ CONF.setConfigNodeId(SystemPropertiesUtils.loadConfigNodeIdWhenRestarted()); @@ -223,4 +226,10 @@ private void createDirIfEmpty(File dir) throws IOException { } } } + + // The checks are in the initialization process of the RequestManager object. + private void checkRequestManager() { + SyncDataNodeClientPool.getInstance(); + CnToDnInternalServiceAsyncRequestManager.getInstance(); + } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlan.java index 8ad8c9b2fd25a..b7a0936ca450f 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlan.java @@ -20,37 +20,14 @@ package org.apache.iotdb.confignode.consensus.request; import org.apache.iotdb.commons.exception.runtime.SerializationRunTimeException; -import org.apache.iotdb.confignode.consensus.request.auth.AuthorPlan; -import org.apache.iotdb.confignode.consensus.request.read.database.CountDatabasePlan; -import org.apache.iotdb.confignode.consensus.request.read.database.GetDatabasePlan; -import org.apache.iotdb.confignode.consensus.request.read.datanode.GetDataNodeConfigurationPlan; -import org.apache.iotdb.confignode.consensus.request.read.function.GetFunctionTablePlan; -import org.apache.iotdb.confignode.consensus.request.read.function.GetUDFJarPlan; -import org.apache.iotdb.confignode.consensus.request.read.partition.CountTimeSlotListPlan; -import org.apache.iotdb.confignode.consensus.request.read.partition.GetDataPartitionPlan; -import org.apache.iotdb.confignode.consensus.request.read.partition.GetNodePathsPartitionPlan; -import org.apache.iotdb.confignode.consensus.request.read.partition.GetOrCreateDataPartitionPlan; -import org.apache.iotdb.confignode.consensus.request.read.partition.GetOrCreateSchemaPartitionPlan; -import org.apache.iotdb.confignode.consensus.request.read.partition.GetSchemaPartitionPlan; -import org.apache.iotdb.confignode.consensus.request.read.partition.GetSeriesSlotListPlan; -import org.apache.iotdb.confignode.consensus.request.read.partition.GetTimeSlotListPlan; -import org.apache.iotdb.confignode.consensus.request.read.pipe.plugin.GetPipePluginJarPlan; -import org.apache.iotdb.confignode.consensus.request.read.pipe.plugin.GetPipePluginTablePlan; -import org.apache.iotdb.confignode.consensus.request.read.pipe.task.ShowPipePlanV2; -import org.apache.iotdb.confignode.consensus.request.read.region.GetRegionIdPlan; -import org.apache.iotdb.confignode.consensus.request.read.region.GetRegionInfoListPlan; -import org.apache.iotdb.confignode.consensus.request.read.subscription.ShowSubscriptionPlan; +import org.apache.iotdb.confignode.consensus.request.read.ainode.GetAINodeConfigurationPlan; +import org.apache.iotdb.confignode.consensus.request.read.model.GetModelInfoPlan; +import org.apache.iotdb.confignode.consensus.request.read.model.ShowModelPlan; import org.apache.iotdb.confignode.consensus.request.read.subscription.ShowTopicPlan; -import org.apache.iotdb.confignode.consensus.request.read.template.CheckTemplateSettablePlan; -import org.apache.iotdb.confignode.consensus.request.read.template.GetAllSchemaTemplatePlan; -import org.apache.iotdb.confignode.consensus.request.read.template.GetAllTemplateSetInfoPlan; -import org.apache.iotdb.confignode.consensus.request.read.template.GetPathsSetTemplatePlan; -import org.apache.iotdb.confignode.consensus.request.read.template.GetSchemaTemplatePlan; -import org.apache.iotdb.confignode.consensus.request.read.template.GetTemplateSetInfoPlan; -import org.apache.iotdb.confignode.consensus.request.read.trigger.GetTransferringTriggersPlan; -import org.apache.iotdb.confignode.consensus.request.read.trigger.GetTriggerJarPlan; -import org.apache.iotdb.confignode.consensus.request.read.trigger.GetTriggerLocationPlan; -import org.apache.iotdb.confignode.consensus.request.read.trigger.GetTriggerTablePlan; +import org.apache.iotdb.confignode.consensus.request.write.ainode.RegisterAINodePlan; +import org.apache.iotdb.confignode.consensus.request.write.ainode.RemoveAINodePlan; +import org.apache.iotdb.confignode.consensus.request.write.ainode.UpdateAINodePlan; +import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan; import org.apache.iotdb.confignode.consensus.request.write.confignode.ApplyConfigNodePlan; import org.apache.iotdb.confignode.consensus.request.write.confignode.RemoveConfigNodePlan; import org.apache.iotdb.confignode.consensus.request.write.confignode.UpdateClusterIdPlan; @@ -58,7 +35,6 @@ import org.apache.iotdb.confignode.consensus.request.write.cq.ActiveCQPlan; import org.apache.iotdb.confignode.consensus.request.write.cq.AddCQPlan; import org.apache.iotdb.confignode.consensus.request.write.cq.DropCQPlan; -import org.apache.iotdb.confignode.consensus.request.write.cq.ShowCQPlan; import org.apache.iotdb.confignode.consensus.request.write.cq.UpdateCQLastExecTimePlan; import org.apache.iotdb.confignode.consensus.request.write.database.AdjustMaxRegionGroupNumPlan; import org.apache.iotdb.confignode.consensus.request.write.database.DatabaseSchemaPlan; @@ -73,7 +49,12 @@ import org.apache.iotdb.confignode.consensus.request.write.datanode.UpdateDataNodePlan; import org.apache.iotdb.confignode.consensus.request.write.function.CreateFunctionPlan; import org.apache.iotdb.confignode.consensus.request.write.function.DropFunctionPlan; +import org.apache.iotdb.confignode.consensus.request.write.model.CreateModelPlan; +import org.apache.iotdb.confignode.consensus.request.write.model.DropModelInNodePlan; +import org.apache.iotdb.confignode.consensus.request.write.model.DropModelPlan; +import org.apache.iotdb.confignode.consensus.request.write.model.UpdateModelInfoPlan; import org.apache.iotdb.confignode.consensus.request.write.partition.AddRegionLocationPlan; +import org.apache.iotdb.confignode.consensus.request.write.partition.AutoCleanPartitionTablePlan; import org.apache.iotdb.confignode.consensus.request.write.partition.CreateDataPartitionPlan; import org.apache.iotdb.confignode.consensus.request.write.partition.CreateSchemaPartitionPlan; import org.apache.iotdb.confignode.consensus.request.write.partition.RemoveRegionLocationPlan; @@ -114,7 +95,6 @@ import org.apache.iotdb.confignode.consensus.request.write.sync.PreCreatePipePlanV1; import org.apache.iotdb.confignode.consensus.request.write.sync.RecordPipeMessagePlan; import org.apache.iotdb.confignode.consensus.request.write.sync.SetPipeStatusPlanV1; -import org.apache.iotdb.confignode.consensus.request.write.sync.ShowPipePlanV1; import org.apache.iotdb.confignode.consensus.request.write.template.CommitSetSchemaTemplatePlan; import org.apache.iotdb.confignode.consensus.request.write.template.CreateSchemaTemplatePlan; import org.apache.iotdb.confignode.consensus.request.write.template.DropSchemaTemplatePlan; @@ -132,8 +112,6 @@ import org.apache.iotdb.consensus.common.request.IConsensusRequest; import org.apache.tsfile.utils.PublicBAOS; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.io.DataOutputStream; import java.io.IOException; @@ -142,11 +120,9 @@ public abstract class ConfigPhysicalPlan implements IConsensusRequest { - private static final Logger LOGGER = LoggerFactory.getLogger(ConfigPhysicalPlan.class); - private final ConfigPhysicalPlanType type; - protected ConfigPhysicalPlan(ConfigPhysicalPlanType type) { + protected ConfigPhysicalPlan(final ConfigPhysicalPlanType type) { this.type = type; } @@ -156,37 +132,37 @@ public ConfigPhysicalPlanType getType() { @Override public ByteBuffer serializeToByteBuffer() { - try (PublicBAOS byteArrayOutputStream = new PublicBAOS(); - DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) { + try (final PublicBAOS byteArrayOutputStream = new PublicBAOS(); + final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) { serializeImpl(outputStream); return ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size()); - } catch (IOException e) { + } catch (final IOException e) { throw new SerializationRunTimeException(e); } } - protected abstract void serializeImpl(DataOutputStream stream) throws IOException; + protected abstract void serializeImpl(final DataOutputStream stream) throws IOException; - protected abstract void deserializeImpl(ByteBuffer buffer) throws IOException; + protected abstract void deserializeImpl(final ByteBuffer buffer) throws IOException; public int getSerializedSize() throws IOException { - PublicBAOS byteArrayOutputStream = new PublicBAOS(); - DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream); + final PublicBAOS byteArrayOutputStream = new PublicBAOS(); + final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream); serializeImpl(outputStream); return byteArrayOutputStream.size(); } public static class Factory { - public static ConfigPhysicalPlan create(ByteBuffer buffer) throws IOException { - short planType = buffer.getShort(); - ConfigPhysicalPlanType configPhysicalPlanType = + public static ConfigPhysicalPlan create(final ByteBuffer buffer) throws IOException { + final short planType = buffer.getShort(); + final ConfigPhysicalPlanType configPhysicalPlanType = ConfigPhysicalPlanType.convertToConfigPhysicalPlanType(planType); if (configPhysicalPlanType == null) { throw new IOException("Unrecognized log configPhysicalPlanType: " + planType); } - ConfigPhysicalPlan plan; + final ConfigPhysicalPlan plan; switch (configPhysicalPlanType) { case RegisterDataNode: plan = new RegisterDataNodePlan(); @@ -197,8 +173,17 @@ public static ConfigPhysicalPlan create(ByteBuffer buffer) throws IOException { case RemoveDataNode: plan = new RemoveDataNodePlan(); break; - case GetDataNodeConfiguration: - plan = new GetDataNodeConfigurationPlan(); + case RegisterAINode: + plan = new RegisterAINodePlan(); + break; + case RemoveAINode: + plan = new RemoveAINodePlan(); + break; + case GetAINodeConfiguration: + plan = new GetAINodeConfigurationPlan(); + break; + case UpdateAINodeConfiguration: + plan = new UpdateAINodePlan(); break; case CreateDatabase: plan = new DatabaseSchemaPlan(ConfigPhysicalPlanType.CreateDatabase); @@ -221,12 +206,6 @@ public static ConfigPhysicalPlan create(ByteBuffer buffer) throws IOException { case AdjustMaxRegionGroupNum: plan = new AdjustMaxRegionGroupNumPlan(); break; - case CountDatabase: - plan = new CountDatabasePlan(); - break; - case GetDatabase: - plan = new GetDatabasePlan(); - break; case CreateRegionGroups: plan = new CreateRegionGroupsPlan(); break; @@ -248,23 +227,14 @@ public static ConfigPhysicalPlan create(ByteBuffer buffer) throws IOException { case PollSpecificRegionMaintainTask: plan = new PollSpecificRegionMaintainTaskPlan(); break; - case GetSchemaPartition: - plan = new GetSchemaPartitionPlan(); - break; case CreateSchemaPartition: plan = new CreateSchemaPartitionPlan(); break; - case GetOrCreateSchemaPartition: - plan = new GetOrCreateSchemaPartitionPlan(); - break; - case GetDataPartition: - plan = new GetDataPartitionPlan(); - break; case CreateDataPartition: plan = new CreateDataPartitionPlan(); break; - case GetOrCreateDataPartition: - plan = new GetOrCreateDataPartitionPlan(); + case AutoCleanPartitionTable: + plan = new AutoCleanPartitionTablePlan(); break; case DeleteProcedure: plan = new DeleteProcedurePlan(); @@ -278,12 +248,6 @@ public static ConfigPhysicalPlan create(ByteBuffer buffer) throws IOException { case DeleteDatabase: plan = new DeleteDatabasePlan(); break; - case ListUserDep: - case ListRoleDep: - case ListUserPrivilegeDep: - case ListRolePrivilegeDep: - case ListUserRolesDep: - case ListRoleUsersDep: case CreateUserDep: case CreateRoleDep: case DropUserDep: @@ -295,12 +259,6 @@ public static ConfigPhysicalPlan create(ByteBuffer buffer) throws IOException { case RevokeRoleDep: case RevokeRoleFromUserDep: case UpdateUserDep: - case ListUser: - case ListRole: - case ListUserPrivilege: - case ListRolePrivilege: - case ListUserRoles: - case ListRoleUsers: case CreateUser: case CreateRole: case DropUser: @@ -342,33 +300,9 @@ public static ConfigPhysicalPlan create(ByteBuffer buffer) throws IOException { case UpdateTriggerStateInTable: plan = new UpdateTriggerStateInTablePlan(); break; - case GetTriggerTable: - plan = new GetTriggerTablePlan(); - break; - case GetTriggerLocation: - plan = new GetTriggerLocationPlan(); - break; - case GetTriggerJar: - plan = new GetTriggerJarPlan(); - break; case CreateSchemaTemplate: plan = new CreateSchemaTemplatePlan(); break; - case GetAllSchemaTemplate: - plan = new GetAllSchemaTemplatePlan(); - break; - case GetSchemaTemplate: - plan = new GetSchemaTemplatePlan(); - break; - case CheckTemplateSettable: - plan = new CheckTemplateSettablePlan(); - break; - case GetPathsSetTemplate: - plan = new GetPathsSetTemplatePlan(); - break; - case GetAllTemplateSetInfo: - plan = new GetAllTemplateSetInfoPlan(); - break; case SetSchemaTemplate: plan = new SetSchemaTemplatePlan(); break; @@ -378,9 +312,6 @@ public static ConfigPhysicalPlan create(ByteBuffer buffer) throws IOException { case CommitSetSchemaTemplate: plan = new CommitSetSchemaTemplatePlan(); break; - case GetTemplateSetInfo: - plan = new GetTemplateSetInfoPlan(); - break; case DropSchemaTemplate: plan = new DropSchemaTemplatePlan(); break; @@ -396,12 +327,6 @@ public static ConfigPhysicalPlan create(ByteBuffer buffer) throws IOException { case ExtendSchemaTemplate: plan = new ExtendSchemaTemplatePlan(); break; - case GetNodePathsPartition: - plan = new GetNodePathsPartitionPlan(); - break; - case GetRegionInfoList: - plan = new GetRegionInfoListPlan(); - break; case CreatePipeSinkV1: plan = new CreatePipeSinkPlanV1(); break; @@ -420,9 +345,6 @@ public static ConfigPhysicalPlan create(ByteBuffer buffer) throws IOException { case DropPipeV1: plan = new DropPipePlanV1(); break; - case ShowPipeV1: - plan = new ShowPipePlanV1(); - break; case RecordPipeMessageV1: plan = new RecordPipeMessagePlan(); break; @@ -438,9 +360,6 @@ public static ConfigPhysicalPlan create(ByteBuffer buffer) throws IOException { case AlterPipeV2: plan = new AlterPipePlanV2(); break; - case ShowPipeV2: - plan = new ShowPipePlanV2(); - break; case OperateMultiplePipesV2: plan = new OperateMultiplePipesPlanV2(); break; @@ -477,9 +396,6 @@ public static ConfigPhysicalPlan create(ByteBuffer buffer) throws IOException { case ConsumerGroupHandleMetaChange: plan = new ConsumerGroupHandleMetaChangePlan(); break; - case ShowSubscription: - plan = new ShowSubscriptionPlan(); - break; case PipeUnsetTemplate: plan = new PipeUnsetSchemaTemplatePlan(); break; @@ -492,27 +408,12 @@ public static ConfigPhysicalPlan create(ByteBuffer buffer) throws IOException { case PipeDeactivateTemplate: plan = new PipeDeactivateTemplatePlan(); break; - case GetRegionId: - plan = new GetRegionIdPlan(); - break; - case GetTimeSlotList: - plan = new GetTimeSlotListPlan(); - break; - case CountTimeSlotList: - plan = new CountTimeSlotListPlan(); - break; - case GetSeriesSlotList: - plan = new GetSeriesSlotListPlan(); - break; case UpdateTriggersOnTransferNodes: plan = new UpdateTriggersOnTransferNodesPlan(); break; case UpdateTriggerLocation: plan = new UpdateTriggerLocationPlan(); break; - case GetTransferringTriggers: - plan = new GetTransferringTriggersPlan(); - break; case ACTIVE_CQ: plan = new ActiveCQPlan(); break; @@ -525,14 +426,23 @@ public static ConfigPhysicalPlan create(ByteBuffer buffer) throws IOException { case UPDATE_CQ_LAST_EXEC_TIME: plan = new UpdateCQLastExecTimePlan(); break; - case SHOW_CQ: - plan = new ShowCQPlan(); + case CreateModel: + plan = new CreateModelPlan(); + break; + case UpdateModelInfo: + plan = new UpdateModelInfoPlan(); break; - case GetFunctionTable: - plan = new GetFunctionTablePlan(); + case DropModel: + plan = new DropModelPlan(); break; - case GetFunctionJar: - plan = new GetUDFJarPlan(); + case ShowModel: + plan = new ShowModelPlan(); + break; + case DropModelInNode: + plan = new DropModelInNodePlan(); + break; + case GetModelInfo: + plan = new GetModelInfoPlan(); break; case CreatePipePlugin: plan = new CreatePipePluginPlan(); @@ -540,12 +450,6 @@ public static ConfigPhysicalPlan create(ByteBuffer buffer) throws IOException { case DropPipePlugin: plan = new DropPipePluginPlan(); break; - case GetPipePluginTable: - plan = new GetPipePluginTablePlan(); - break; - case GetPipePluginJar: - plan = new GetPipePluginJarPlan(); - break; case setSpaceQuota: plan = new SetSpaceQuotaPlan(); break; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlanType.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlanType.java index 84728b3ca9b70..24e5b9ba9c631 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlanType.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlanType.java @@ -36,6 +36,12 @@ public enum ConfigPhysicalPlanType { RemoveDataNode((short) 102), UpdateDataNodeConfiguration((short) 103), + /** AINode. */ + RegisterAINode((short) 104), + UpdateAINodeConfiguration((short) 105), + RemoveAINode((short) 106), + GetAINodeConfiguration((short) 107), + /** Database. */ CreateDatabase((short) 200), SetTTL((short) 201), @@ -74,6 +80,7 @@ public enum ConfigPhysicalPlanType { CreateDataPartition((short) 404), GetOrCreateDataPartition((short) 405), GetNodePathsPartition((short) 406), + AutoCleanPartitionTable((short) 407), /** Procedure. */ UpdateProcedure((short) 500), @@ -184,7 +191,14 @@ public enum ConfigPhysicalPlanType { UPDATE_CQ_LAST_EXEC_TIME((short) 1103), SHOW_CQ((short) 1104), - // 1200-1299 planId is used by IoTDB-ML. + /** AI model. */ + CreateModel((short) 1200), + UpdateModelInfo((short) 1201), + UpdateModelState((short) 1202), + DropModel((short) 1203), + ShowModel((short) 1204), + GetModelInfo((short) 1206), + DropModelInNode((short) 1207), /** Pipe Plugin. */ CreatePipePlugin((short) 1300), diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlanVisitor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlanVisitor.java index bfdf71c7e54be..1e418e3381941 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlanVisitor.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlanVisitor.java @@ -19,7 +19,7 @@ package org.apache.iotdb.confignode.consensus.request; -import org.apache.iotdb.confignode.consensus.request.auth.AuthorPlan; +import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan; import org.apache.iotdb.confignode.consensus.request.write.database.DatabaseSchemaPlan; import org.apache.iotdb.confignode.consensus.request.write.database.DeleteDatabasePlan; import org.apache.iotdb.confignode.consensus.request.write.database.SetTTLPlan; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/cq/ShowCQPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/ConfigPhysicalReadPlan.java similarity index 61% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/cq/ShowCQPlan.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/ConfigPhysicalReadPlan.java index 7c146238a5ed9..032c21a731c0d 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/cq/ShowCQPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/ConfigPhysicalReadPlan.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an @@ -17,29 +17,28 @@ * under the License. */ -package org.apache.iotdb.confignode.consensus.request.write.cq; +package org.apache.iotdb.confignode.consensus.request.read; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; +import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; -import static org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType.SHOW_CQ; +public abstract class ConfigPhysicalReadPlan extends ConfigPhysicalPlan { -public class ShowCQPlan extends ConfigPhysicalPlan { - - public ShowCQPlan() { - super(SHOW_CQ); + protected ConfigPhysicalReadPlan(final ConfigPhysicalPlanType type) { + super(type); } @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); + protected void serializeImpl(final DataOutputStream stream) throws IOException { + // Read request does not need to be serialized } @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - // no customized field to deserialize from + protected void deserializeImpl(final ByteBuffer buffer) throws IOException { + // Read request does not need to be deserialized } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/ainode/GetAINodeConfigurationPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/ainode/GetAINodeConfigurationPlan.java new file mode 100644 index 0000000000000..7222a8f53f8cb --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/ainode/GetAINodeConfigurationPlan.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.consensus.request.read.ainode; + +import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; + +public class GetAINodeConfigurationPlan extends ConfigPhysicalReadPlan { + + // if aiNodeId is set to -1, return all AINode configurations. + private int aiNodeId; + + public GetAINodeConfigurationPlan() { + super(ConfigPhysicalPlanType.GetAINodeConfiguration); + } + + public GetAINodeConfigurationPlan(final int aiNodeId) { + super(ConfigPhysicalPlanType.GetAINodeConfiguration); + this.aiNodeId = aiNodeId; + } + + public int getAiNodeId() { + return aiNodeId; + } + + @Override + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeShort(getType().getPlanType()); + stream.writeInt(aiNodeId); + } + + @Override + protected void deserializeImpl(ByteBuffer buffer) throws IOException { + this.aiNodeId = buffer.getInt(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (!(o instanceof GetAINodeConfigurationPlan)) { + return false; + } + final GetAINodeConfigurationPlan that = (GetAINodeConfigurationPlan) o; + return aiNodeId == that.aiNodeId; + } + + @Override + public int hashCode() { + return Integer.hashCode(aiNodeId); + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/auth/AuthorReadPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/auth/AuthorReadPlan.java new file mode 100644 index 0000000000000..d418aa4d08351 --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/auth/AuthorReadPlan.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.consensus.request.read.auth; + +import org.apache.iotdb.commons.auth.entity.PrivilegeType; +import org.apache.iotdb.commons.path.PartialPath; +import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; + +import java.util.List; +import java.util.Objects; +import java.util.Set; + +public class AuthorReadPlan extends ConfigPhysicalReadPlan { + + private final ConfigPhysicalPlanType authorType; + private final String roleName; + private String password; + private final String newPassword; + private Set permissions; + private final List nodeNameList; + private String userName; + private final boolean grantOpt; + + /** + * {@link AuthorReadPlan} Constructor. + * + * @param authorType author type + * @param userName user name + * @param roleName role name + * @param password password + * @param newPassword new password + * @param permissions permissions + * @param grantOpt with grant option, only grant statement can set grantOpt = true + * @param nodeNameList node name in Path structure + */ + public AuthorReadPlan( + final ConfigPhysicalPlanType authorType, + final String userName, + final String roleName, + final String password, + final String newPassword, + final Set permissions, + final boolean grantOpt, + final List nodeNameList) { + super(authorType); + this.authorType = authorType; + this.userName = userName; + this.roleName = roleName; + this.password = password; + this.newPassword = newPassword; + this.permissions = permissions; + this.grantOpt = grantOpt; + this.nodeNameList = nodeNameList; + } + + public String getRoleName() { + return roleName; + } + + public String getPassword() { + return password; + } + + public void setPassword(final String password) { + this.password = password; + } + + public Set getPermissions() { + return permissions; + } + + public void setPermissions(final Set permissions) { + this.permissions = permissions; + } + + public String getUserName() { + return userName; + } + + public void setUserName(final String userName) { + this.userName = userName; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final AuthorReadPlan that = (AuthorReadPlan) o; + return Objects.equals(authorType, that.authorType) + && Objects.equals(userName, that.userName) + && Objects.equals(roleName, that.roleName) + && Objects.equals(password, that.password) + && Objects.equals(newPassword, that.newPassword) + && Objects.equals(permissions, that.permissions) + && grantOpt == that.grantOpt + && Objects.equals(nodeNameList, that.nodeNameList); + } + + @Override + public int hashCode() { + return Objects.hash( + authorType, userName, roleName, password, newPassword, permissions, nodeNameList, grantOpt); + } + + @Override + public String toString() { + return "[type:" + + authorType + + ", username:" + + userName + + ", rolename:" + + roleName + + ", permissions:" + + PrivilegeType.toPriType(permissions) + + ", grant option:" + + grantOpt + + ", paths:" + + nodeNameList + + "]"; + } +} diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/common/PipeTransferHandshakeConstant.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/cq/ShowCQPlan.java similarity index 70% rename from iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/common/PipeTransferHandshakeConstant.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/cq/ShowCQPlan.java index 07c7bb463905e..5217849deb488 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/common/PipeTransferHandshakeConstant.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/cq/ShowCQPlan.java @@ -17,14 +17,15 @@ * under the License. */ -package org.apache.iotdb.commons.pipe.connector.payload.thrift.common; +package org.apache.iotdb.confignode.consensus.request.read.cq; -public class PipeTransferHandshakeConstant { +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; - public static final String HANDSHAKE_KEY_TIME_PRECISION = "timestampPrecision"; - public static final String HANDSHAKE_KEY_CLUSTER_ID = "clusterID"; +import static org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType.SHOW_CQ; - private PipeTransferHandshakeConstant() { - // Utility class +public class ShowCQPlan extends ConfigPhysicalReadPlan { + + public ShowCQPlan() { + super(SHOW_CQ); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/database/CountDatabasePlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/database/CountDatabasePlan.java index 3e838c08a0698..4772688c70154 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/database/CountDatabasePlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/database/CountDatabasePlan.java @@ -20,37 +20,27 @@ package org.apache.iotdb.confignode.consensus.request.read.database; import org.apache.iotdb.commons.path.PathPatternTree; -import org.apache.iotdb.commons.utils.BasicStructureSerDeUtil; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; import java.util.Arrays; import java.util.List; -public class CountDatabasePlan extends ConfigPhysicalPlan { +public class CountDatabasePlan extends ConfigPhysicalReadPlan { - private String[] storageGroupPattern; - private PathPatternTree scope; + private final String[] storageGroupPattern; + private final PathPatternTree scope; - public CountDatabasePlan() { + public CountDatabasePlan(final List storageGroupPattern, final PathPatternTree scope) { super(ConfigPhysicalPlanType.CountDatabase); - } - - public CountDatabasePlan(ConfigPhysicalPlanType type) { - super(type); - } - - public CountDatabasePlan(List storageGroupPattern, PathPatternTree scope) { - this(); this.storageGroupPattern = storageGroupPattern.toArray(new String[0]); this.scope = scope; } public CountDatabasePlan( - ConfigPhysicalPlanType type, List storageGroupPattern, PathPatternTree scope) { + final ConfigPhysicalPlanType type, + final List storageGroupPattern, + final PathPatternTree scope) { super(type); this.storageGroupPattern = storageGroupPattern.toArray(new String[0]); this.scope = scope; @@ -65,35 +55,14 @@ public PathPatternTree getScope() { } @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - - stream.writeInt(storageGroupPattern.length); - for (String node : storageGroupPattern) { - BasicStructureSerDeUtil.write(node, stream); - } - scope.serialize(stream); - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) { - int length = buffer.getInt(); - storageGroupPattern = new String[length]; - for (int i = 0; i < length; i++) { - storageGroupPattern[i] = BasicStructureSerDeUtil.readString(buffer); - } - scope = PathPatternTree.deserialize(buffer); - } - - @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - CountDatabasePlan that = (CountDatabasePlan) o; + final CountDatabasePlan that = (CountDatabasePlan) o; return Arrays.equals(storageGroupPattern, that.storageGroupPattern); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/database/GetDatabasePlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/database/GetDatabasePlan.java index 161e58776f5b3..2ba4c41949840 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/database/GetDatabasePlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/database/GetDatabasePlan.java @@ -26,11 +26,7 @@ public class GetDatabasePlan extends CountDatabasePlan { - public GetDatabasePlan() { - super(ConfigPhysicalPlanType.GetDatabase); - } - - public GetDatabasePlan(List storageGroupPathPattern, PathPatternTree scope) { + public GetDatabasePlan(final List storageGroupPathPattern, final PathPatternTree scope) { super(ConfigPhysicalPlanType.GetDatabase, storageGroupPathPattern, scope); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/datanode/GetDataNodeConfigurationPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/datanode/GetDataNodeConfigurationPlan.java index 22f7202841d50..7223415c3de09 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/datanode/GetDataNodeConfigurationPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/datanode/GetDataNodeConfigurationPlan.java @@ -19,25 +19,18 @@ package org.apache.iotdb.confignode.consensus.request.read.datanode; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; import java.util.Objects; /** Get DataNodeInfo by the specific DataNode's id. And return all when dataNodeID is set to -1. */ -public class GetDataNodeConfigurationPlan extends ConfigPhysicalPlan { +public class GetDataNodeConfigurationPlan extends ConfigPhysicalReadPlan { - private int dataNodeId; + private final int dataNodeId; - public GetDataNodeConfigurationPlan() { + public GetDataNodeConfigurationPlan(final int dataNodeId) { super(ConfigPhysicalPlanType.GetDataNodeConfiguration); - } - - public GetDataNodeConfigurationPlan(int dataNodeId) { - this(); this.dataNodeId = dataNodeId; } @@ -46,25 +39,14 @@ public Integer getDataNodeId() { } @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - stream.writeInt(dataNodeId); - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) { - this.dataNodeId = buffer.getInt(); - } - - @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - GetDataNodeConfigurationPlan that = (GetDataNodeConfigurationPlan) o; + final GetDataNodeConfigurationPlan that = (GetDataNodeConfigurationPlan) o; return dataNodeId == that.dataNodeId; } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/function/GetFunctionTablePlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/function/GetFunctionTablePlan.java index 0dc530bb7d46b..bcc9fed7b9b85 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/function/GetFunctionTablePlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/function/GetFunctionTablePlan.java @@ -19,26 +19,12 @@ package org.apache.iotdb.confignode.consensus.request.read.function; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; - -public class GetFunctionTablePlan extends ConfigPhysicalPlan { +public class GetFunctionTablePlan extends ConfigPhysicalReadPlan { public GetFunctionTablePlan() { super(ConfigPhysicalPlanType.GetFunctionTable); } - - @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - // do nothing - } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/function/GetUDFJarPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/function/GetUDFJarPlan.java index 09baafdcc4f41..ba81d964360a2 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/function/GetUDFJarPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/function/GetUDFJarPlan.java @@ -19,27 +19,17 @@ package org.apache.iotdb.confignode.consensus.request.read.function; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import org.apache.tsfile.utils.ReadWriteIOUtils; - -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; import java.util.List; import java.util.Objects; -public class GetUDFJarPlan extends ConfigPhysicalPlan { - - private List jarNames; +public class GetUDFJarPlan extends ConfigPhysicalReadPlan { - public GetUDFJarPlan() { - super(ConfigPhysicalPlanType.GetFunctionJar); - } + private final List jarNames; - public GetUDFJarPlan(List triggerNames) { + public GetUDFJarPlan(final List triggerNames) { super(ConfigPhysicalPlanType.GetFunctionJar); jarNames = triggerNames; } @@ -49,27 +39,7 @@ public List getJarNames() { } @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - - ReadWriteIOUtils.write(jarNames.size(), stream); - for (String jarName : jarNames) { - ReadWriteIOUtils.write(jarName, stream); - } - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - int size = ReadWriteIOUtils.readInt(buffer); - jarNames = new ArrayList<>(size); - while (size > 0) { - jarNames.add(ReadWriteIOUtils.readString(buffer)); - size--; - } - } - - @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -79,7 +49,7 @@ public boolean equals(Object o) { if (!super.equals(o)) { return false; } - GetUDFJarPlan that = (GetUDFJarPlan) o; + final GetUDFJarPlan that = (GetUDFJarPlan) o; return Objects.equals(jarNames, that.jarNames); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/model/GetModelInfoPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/model/GetModelInfoPlan.java new file mode 100644 index 0000000000000..9c33c2678829a --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/model/GetModelInfoPlan.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.consensus.request.read.model; + +import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; +import org.apache.iotdb.confignode.rpc.thrift.TGetModelInfoReq; + +import org.apache.tsfile.utils.ReadWriteIOUtils; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Objects; + +public class GetModelInfoPlan extends ConfigPhysicalReadPlan { + + private String modelId; + + public GetModelInfoPlan() { + super(ConfigPhysicalPlanType.GetModelInfo); + } + + public GetModelInfoPlan(final TGetModelInfoReq getModelInfoReq) { + super(ConfigPhysicalPlanType.GetModelInfo); + this.modelId = getModelInfoReq.getModelId(); + } + + public String getModelId() { + return modelId; + } + + @Override + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeShort(getType().getPlanType()); + ReadWriteIOUtils.write(modelId, stream); + } + + @Override + protected void deserializeImpl(ByteBuffer buffer) throws IOException { + this.modelId = ReadWriteIOUtils.readString(buffer); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + final GetModelInfoPlan that = (GetModelInfoPlan) o; + return Objects.equals(modelId, that.modelId); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), modelId); + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/model/ShowModelPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/model/ShowModelPlan.java new file mode 100644 index 0000000000000..df924c97f5b7d --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/model/ShowModelPlan.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.consensus.request.read.model; + +import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; +import org.apache.iotdb.confignode.rpc.thrift.TShowModelReq; + +import org.apache.tsfile.utils.ReadWriteIOUtils; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Objects; + +public class ShowModelPlan extends ConfigPhysicalReadPlan { + + private String modelName; + + public ShowModelPlan() { + super(ConfigPhysicalPlanType.ShowModel); + } + + public ShowModelPlan(final TShowModelReq showModelReq) { + super(ConfigPhysicalPlanType.ShowModel); + if (showModelReq.isSetModelId()) { + this.modelName = showModelReq.getModelId(); + } + } + + public boolean isSetModelName() { + return modelName != null; + } + + public String getModelName() { + return modelName; + } + + @Override + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeShort(getType().getPlanType()); + ReadWriteIOUtils.write(modelName != null, stream); + ReadWriteIOUtils.write(modelName, stream); + } + + @Override + protected void deserializeImpl(ByteBuffer buffer) throws IOException { + boolean isSetModelId = ReadWriteIOUtils.readBool(buffer); + if (isSetModelId) { + this.modelName = ReadWriteIOUtils.readString(buffer); + } + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + final ShowModelPlan that = (ShowModelPlan) o; + return Objects.equals(modelName, that.modelName); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), modelName); + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/CountTimeSlotListPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/CountTimeSlotListPlan.java index acf7c974224ed..0050f712fb038 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/CountTimeSlotListPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/CountTimeSlotListPlan.java @@ -22,18 +22,12 @@ import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; -import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import org.apache.tsfile.utils.ReadWriteIOUtils; - -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; import java.util.Objects; -public class CountTimeSlotListPlan extends ConfigPhysicalPlan { +public class CountTimeSlotListPlan extends ConfigPhysicalReadPlan { private String database; @@ -41,16 +35,12 @@ public class CountTimeSlotListPlan extends ConfigPhysicalPlan { private TConsensusGroupId regionId; - private long startTime; + private final long startTime; - private long endTime; + private final long endTime; - public CountTimeSlotListPlan() { + public CountTimeSlotListPlan(final long startTime, final long endTime) { super(ConfigPhysicalPlanType.CountTimeSlotList); - } - - public CountTimeSlotListPlan(long startTime, long endTime) { - this(); this.startTime = startTime; this.endTime = endTime; this.database = ""; @@ -58,7 +48,7 @@ public CountTimeSlotListPlan(long startTime, long endTime) { this.regionId = new TConsensusGroupId(TConsensusGroupType.DataRegion, -1); } - public void setDatabase(String database) { + public void setDatabase(final String database) { this.database = database; } @@ -66,7 +56,7 @@ public String getDatabase() { return database; } - public void setRegionId(TConsensusGroupId regionId) { + public void setRegionId(final TConsensusGroupId regionId) { this.regionId = regionId; } @@ -74,7 +64,7 @@ public TConsensusGroupId getRegionId() { return regionId; } - public void setSeriesSlotId(TSeriesPartitionSlot seriesSlotId) { + public void setSeriesSlotId(final TSeriesPartitionSlot seriesSlotId) { this.seriesSlotId = seriesSlotId; } @@ -91,33 +81,14 @@ public long getEndTime() { } @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - ReadWriteIOUtils.write(database, stream); - ThriftCommonsSerDeUtils.serializeTSeriesPartitionSlot(seriesSlotId, stream); - ThriftCommonsSerDeUtils.serializeTConsensusGroupId(regionId, stream); - stream.writeLong(startTime); - stream.writeLong(endTime); - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - this.database = ReadWriteIOUtils.readString(buffer); - this.seriesSlotId = ThriftCommonsSerDeUtils.deserializeTSeriesPartitionSlot(buffer); - this.regionId = ThriftCommonsSerDeUtils.deserializeTConsensusGroupId(buffer); - this.startTime = buffer.getLong(); - this.endTime = buffer.getLong(); - } - - @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - CountTimeSlotListPlan that = (CountTimeSlotListPlan) o; + final CountTimeSlotListPlan that = (CountTimeSlotListPlan) o; return database.equals(that.database) && seriesSlotId.equals(that.seriesSlotId) && regionId.equals(that.regionId) diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetDataPartitionPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetDataPartitionPlan.java index 32e494b55344d..cce8e14611490 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetDataPartitionPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetDataPartitionPlan.java @@ -20,39 +20,28 @@ package org.apache.iotdb.confignode.consensus.request.read.partition; import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; -import org.apache.iotdb.commons.utils.BasicStructureSerDeUtil; -import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; import org.apache.iotdb.confignode.rpc.thrift.TDataPartitionReq; import org.apache.iotdb.confignode.rpc.thrift.TTimeSlotList; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.HashMap; import java.util.Map; -import java.util.Map.Entry; import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; /** Get or create DataPartition by the specific partitionSlotsMap. */ -public class GetDataPartitionPlan extends ConfigPhysicalPlan { +public class GetDataPartitionPlan extends ConfigPhysicalReadPlan { // Map>> protected Map> partitionSlotsMap; - public GetDataPartitionPlan() { - super(ConfigPhysicalPlanType.GetDataPartition); - } - - public GetDataPartitionPlan(ConfigPhysicalPlanType configPhysicalPlanType) { + public GetDataPartitionPlan(final ConfigPhysicalPlanType configPhysicalPlanType) { super(configPhysicalPlanType); } public GetDataPartitionPlan( - Map> partitionSlotsMap) { - this(); + final Map> partitionSlotsMap) { + super(ConfigPhysicalPlanType.GetDataPartition); this.partitionSlotsMap = partitionSlotsMap; } @@ -66,58 +55,19 @@ public Map> getPartitionSlotsMa * @param req TDataPartitionReq * @return GetDataPartitionPlan */ - public static GetDataPartitionPlan convertFromRpcTDataPartitionReq(TDataPartitionReq req) { + public static GetDataPartitionPlan convertFromRpcTDataPartitionReq(final TDataPartitionReq req) { return new GetDataPartitionPlan(new ConcurrentHashMap<>(req.getPartitionSlotsMap())); } @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - - stream.writeInt(partitionSlotsMap.size()); - for (Entry> entry : - partitionSlotsMap.entrySet()) { - String storageGroup = entry.getKey(); - Map seriesPartitionTimePartitionSlots = entry.getValue(); - BasicStructureSerDeUtil.write(storageGroup, stream); - stream.writeInt(seriesPartitionTimePartitionSlots.size()); - for (Entry e : - seriesPartitionTimePartitionSlots.entrySet()) { - TSeriesPartitionSlot seriesPartitionSlot = e.getKey(); - TTimeSlotList timePartitionSlotList = e.getValue(); - ThriftCommonsSerDeUtils.serializeTSeriesPartitionSlot(seriesPartitionSlot, stream); - ThriftCommonsSerDeUtils.serializeTTimePartitionSlotList(timePartitionSlotList, stream); - } - } - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) { - partitionSlotsMap = new HashMap<>(); - int storageGroupNum = buffer.getInt(); - for (int i = 0; i < storageGroupNum; i++) { - String storageGroup = BasicStructureSerDeUtil.readString(buffer); - partitionSlotsMap.put(storageGroup, new HashMap<>()); - int seriesPartitionSlotNum = buffer.getInt(); - for (int j = 0; j < seriesPartitionSlotNum; j++) { - TSeriesPartitionSlot seriesPartitionSlot = - ThriftCommonsSerDeUtils.deserializeTSeriesPartitionSlot(buffer); - TTimeSlotList timePartitionSlotList = - ThriftCommonsSerDeUtils.deserializeTTimePartitionSlotList(buffer); - partitionSlotsMap.get(storageGroup).put(seriesPartitionSlot, timePartitionSlotList); - } - } - } - - @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - GetDataPartitionPlan that = (GetDataPartitionPlan) o; + final GetDataPartitionPlan that = (GetDataPartitionPlan) o; return partitionSlotsMap.equals(that.partitionSlotsMap); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetNodePathsPartitionPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetNodePathsPartitionPlan.java index 978a70853b26e..3b8d0a7c8b19a 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetNodePathsPartitionPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetNodePathsPartitionPlan.java @@ -20,17 +20,13 @@ package org.apache.iotdb.confignode.consensus.request.read.partition; import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.path.PathDeserializeUtil; import org.apache.iotdb.commons.path.PathPatternTree; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; import java.util.Objects; -public class GetNodePathsPartitionPlan extends ConfigPhysicalPlan { +public class GetNodePathsPartitionPlan extends ConfigPhysicalReadPlan { private PartialPath partialPath; private PathPatternTree scope; private int level = -1; @@ -43,7 +39,7 @@ public PathPatternTree getScope() { return scope; } - public void setScope(PathPatternTree scope) { + public void setScope(final PathPatternTree scope) { this.scope = scope; } @@ -51,7 +47,7 @@ public PartialPath getPartialPath() { return partialPath; } - public void setPartialPath(PartialPath partialPath) { + public void setPartialPath(final PartialPath partialPath) { this.partialPath = partialPath; } @@ -59,34 +55,19 @@ public int getLevel() { return level; } - public void setLevel(int level) { + public void setLevel(final int level) { this.level = level; } @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - partialPath.serialize(stream); - scope.serialize(stream); - stream.writeInt(level); - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - partialPath = (PartialPath) PathDeserializeUtil.deserialize(buffer); - scope = PathPatternTree.deserialize(buffer); - level = buffer.getInt(); - } - - @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - GetNodePathsPartitionPlan that = (GetNodePathsPartitionPlan) o; + final GetNodePathsPartitionPlan that = (GetNodePathsPartitionPlan) o; return level == that.level && Objects.equals(partialPath, that.partialPath); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetOrCreateDataPartitionPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetOrCreateDataPartitionPlan.java index 347ae31214700..800b7e0e19a49 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetOrCreateDataPartitionPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetOrCreateDataPartitionPlan.java @@ -29,13 +29,9 @@ public class GetOrCreateDataPartitionPlan extends GetDataPartitionPlan { - public GetOrCreateDataPartitionPlan() { - super(ConfigPhysicalPlanType.GetOrCreateDataPartition); - } - public GetOrCreateDataPartitionPlan( - Map> partitionSlotsMap) { - this(); + final Map> partitionSlotsMap) { + super(ConfigPhysicalPlanType.GetOrCreateDataPartition); this.partitionSlotsMap = partitionSlotsMap; } @@ -46,7 +42,7 @@ public GetOrCreateDataPartitionPlan( * @return GetOrCreateDataPartitionPlan */ public static GetOrCreateDataPartitionPlan convertFromRpcTDataPartitionReq( - TDataPartitionReq req) { + final TDataPartitionReq req) { return new GetOrCreateDataPartitionPlan(new ConcurrentHashMap<>(req.getPartitionSlotsMap())); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetOrCreateSchemaPartitionPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetOrCreateSchemaPartitionPlan.java index e245bc7155f7e..c7b7272c569ec 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetOrCreateSchemaPartitionPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetOrCreateSchemaPartitionPlan.java @@ -31,7 +31,8 @@ public GetOrCreateSchemaPartitionPlan() { super(ConfigPhysicalPlanType.GetOrCreateSchemaPartition); } - public GetOrCreateSchemaPartitionPlan(Map> partitionSlotsMap) { + public GetOrCreateSchemaPartitionPlan( + final Map> partitionSlotsMap) { this(); this.partitionSlotsMap = partitionSlotsMap; } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetSchemaPartitionPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetSchemaPartitionPlan.java index 2751c5efaed88..8bfccd18a1c62 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetSchemaPartitionPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetSchemaPartitionPlan.java @@ -20,39 +20,27 @@ package org.apache.iotdb.confignode.consensus.request.read.partition; import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; -import org.apache.iotdb.commons.utils.BasicStructureSerDeUtil; -import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Map.Entry; import java.util.Objects; /** Get or create SchemaPartition by the specific partitionSlotsMap. */ -public class GetSchemaPartitionPlan extends ConfigPhysicalPlan { +public class GetSchemaPartitionPlan extends ConfigPhysicalReadPlan { // Map> // Get all SchemaPartitions when the partitionSlotsMap is empty // Get all exists SchemaPartitions in one StorageGroup when the SeriesPartitionSlot is empty protected Map> partitionSlotsMap; - public GetSchemaPartitionPlan() { - super(ConfigPhysicalPlanType.GetSchemaPartition); - } - - public GetSchemaPartitionPlan(ConfigPhysicalPlanType configPhysicalPlanType) { + public GetSchemaPartitionPlan(final ConfigPhysicalPlanType configPhysicalPlanType) { super(configPhysicalPlanType); } - public GetSchemaPartitionPlan(Map> partitionSlotsMap) { - this(); + public GetSchemaPartitionPlan(final Map> partitionSlotsMap) { + super(ConfigPhysicalPlanType.GetSchemaPartition); this.partitionSlotsMap = partitionSlotsMap; } @@ -61,46 +49,14 @@ public Map> getPartitionSlotsMap() { } @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - - stream.writeInt(partitionSlotsMap.size()); - for (Entry> entry : partitionSlotsMap.entrySet()) { - String storageGroup = entry.getKey(); - List seriesPartitionSlots = entry.getValue(); - BasicStructureSerDeUtil.write(storageGroup, stream); - stream.writeInt(seriesPartitionSlots.size()); - seriesPartitionSlots.forEach( - seriesPartitionSlot -> - ThriftCommonsSerDeUtils.serializeTSeriesPartitionSlot(seriesPartitionSlot, stream)); - } - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - partitionSlotsMap = new HashMap<>(); - int storageGroupNum = buffer.getInt(); - for (int i = 0; i < storageGroupNum; i++) { - String storageGroup = BasicStructureSerDeUtil.readString(buffer); - partitionSlotsMap.put(storageGroup, new ArrayList<>()); - int seriesPartitionSlotNum = buffer.getInt(); - for (int j = 0; j < seriesPartitionSlotNum; j++) { - TSeriesPartitionSlot seriesPartitionSlot = - ThriftCommonsSerDeUtils.deserializeTSeriesPartitionSlot(buffer); - partitionSlotsMap.get(storageGroup).add(seriesPartitionSlot); - } - } - } - - @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - GetSchemaPartitionPlan that = (GetSchemaPartitionPlan) o; + final GetSchemaPartitionPlan that = (GetSchemaPartitionPlan) o; return partitionSlotsMap.equals(that.partitionSlotsMap); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetSeriesSlotListPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetSeriesSlotListPlan.java index 832a9b8fa39ba..f0d130fa5636d 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetSeriesSlotListPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetSeriesSlotListPlan.java @@ -20,28 +20,19 @@ package org.apache.iotdb.confignode.consensus.request.read.partition; import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import org.apache.tsfile.utils.ReadWriteIOUtils; - -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; import java.util.Objects; -public class GetSeriesSlotListPlan extends ConfigPhysicalPlan { +public class GetSeriesSlotListPlan extends ConfigPhysicalReadPlan { - private String database; + private final String database; - private TConsensusGroupType partitionType; + private final TConsensusGroupType partitionType; - public GetSeriesSlotListPlan() { + public GetSeriesSlotListPlan(final String database, final TConsensusGroupType partitionType) { super(ConfigPhysicalPlanType.GetSeriesSlotList); - } - - public GetSeriesSlotListPlan(String database, TConsensusGroupType partitionType) { - this(); this.database = database; this.partitionType = partitionType; } @@ -55,27 +46,14 @@ public TConsensusGroupType getPartitionType() { } @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - ReadWriteIOUtils.write(database, stream); - stream.writeInt(partitionType.ordinal()); - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - this.database = ReadWriteIOUtils.readString(buffer); - this.partitionType = TConsensusGroupType.findByValue(buffer.getInt()); - } - - @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - GetSeriesSlotListPlan that = (GetSeriesSlotListPlan) o; + final GetSeriesSlotListPlan that = (GetSeriesSlotListPlan) o; return database.equals(that.database) && partitionType.equals(that.partitionType); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetTimeSlotListPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetTimeSlotListPlan.java index 1b6f881763e95..00a130fc307e2 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetTimeSlotListPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/partition/GetTimeSlotListPlan.java @@ -22,18 +22,12 @@ import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; -import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import org.apache.tsfile.utils.ReadWriteIOUtils; - -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; import java.util.Objects; -public class GetTimeSlotListPlan extends ConfigPhysicalPlan { +public class GetTimeSlotListPlan extends ConfigPhysicalReadPlan { private String database; @@ -41,16 +35,12 @@ public class GetTimeSlotListPlan extends ConfigPhysicalPlan { private TConsensusGroupId regionId; - private long startTime; + private final long startTime; - private long endTime; + private final long endTime; - public GetTimeSlotListPlan() { + public GetTimeSlotListPlan(final long startTime, final long endTime) { super(ConfigPhysicalPlanType.GetTimeSlotList); - } - - public GetTimeSlotListPlan(long startTime, long endTime) { - this(); this.startTime = startTime; this.endTime = endTime; this.database = ""; @@ -58,7 +48,7 @@ public GetTimeSlotListPlan(long startTime, long endTime) { this.regionId = new TConsensusGroupId(TConsensusGroupType.DataRegion, -1); } - public void setDatabase(String database) { + public void setDatabase(final String database) { this.database = database; } @@ -66,7 +56,7 @@ public String getDatabase() { return database; } - public void setRegionId(TConsensusGroupId regionId) { + public void setRegionId(final TConsensusGroupId regionId) { this.regionId = regionId; } @@ -74,7 +64,7 @@ public TConsensusGroupId getRegionId() { return regionId; } - public void setSeriesSlotId(TSeriesPartitionSlot seriesSlotId) { + public void setSeriesSlotId(final TSeriesPartitionSlot seriesSlotId) { this.seriesSlotId = seriesSlotId; } @@ -91,33 +81,14 @@ public long getEndTime() { } @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - ReadWriteIOUtils.write(database, stream); - ThriftCommonsSerDeUtils.serializeTSeriesPartitionSlot(seriesSlotId, stream); - ThriftCommonsSerDeUtils.serializeTConsensusGroupId(regionId, stream); - stream.writeLong(startTime); - stream.writeLong(endTime); - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - this.database = ReadWriteIOUtils.readString(buffer); - this.seriesSlotId = ThriftCommonsSerDeUtils.deserializeTSeriesPartitionSlot(buffer); - this.regionId = ThriftCommonsSerDeUtils.deserializeTConsensusGroupId(buffer); - this.startTime = buffer.getLong(); - this.endTime = buffer.getLong(); - } - - @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - GetTimeSlotListPlan that = (GetTimeSlotListPlan) o; + final GetTimeSlotListPlan that = (GetTimeSlotListPlan) o; return database.equals(that.database) && seriesSlotId.equals(that.seriesSlotId) && regionId.equals(that.regionId) diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/pipe/plugin/GetPipePluginJarPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/pipe/plugin/GetPipePluginJarPlan.java index 4b5b2152db925..8a7e92a4310c3 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/pipe/plugin/GetPipePluginJarPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/pipe/plugin/GetPipePluginJarPlan.java @@ -19,23 +19,13 @@ package org.apache.iotdb.confignode.consensus.request.read.pipe.plugin; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import org.apache.tsfile.utils.ReadWriteIOUtils; - -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; import java.util.List; -public class GetPipePluginJarPlan extends ConfigPhysicalPlan { - private List jarNames; - - public GetPipePluginJarPlan() { - super(ConfigPhysicalPlanType.GetPipePluginJar); - } +public class GetPipePluginJarPlan extends ConfigPhysicalReadPlan { + private final List jarNames; public GetPipePluginJarPlan(List jarNames) { super(ConfigPhysicalPlanType.GetPipePluginJar); @@ -45,23 +35,4 @@ public GetPipePluginJarPlan(List jarNames) { public List getJarNames() { return jarNames; } - - @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - - ReadWriteIOUtils.write(jarNames.size(), stream); - for (String jarName : jarNames) { - ReadWriteIOUtils.write(jarName, stream); - } - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - int size = ReadWriteIOUtils.readInt(buffer); - jarNames = new ArrayList<>(); - for (int i = 0; i < size; i++) { - jarNames.add(ReadWriteIOUtils.readString(buffer)); - } - } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/pipe/plugin/GetPipePluginTablePlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/pipe/plugin/GetPipePluginTablePlan.java index cabe5cc1139b9..39cf23ed34436 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/pipe/plugin/GetPipePluginTablePlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/pipe/plugin/GetPipePluginTablePlan.java @@ -19,26 +19,12 @@ package org.apache.iotdb.confignode.consensus.request.read.pipe.plugin; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; - -public class GetPipePluginTablePlan extends ConfigPhysicalPlan { +public class GetPipePluginTablePlan extends ConfigPhysicalReadPlan { public GetPipePluginTablePlan() { super(ConfigPhysicalPlanType.GetPipePluginTable); } - - @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - // Empty method, since it is not needed now - } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/pipe/task/ShowPipePlanV2.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/pipe/task/ShowPipePlanV2.java index 1d1acf293f3e0..6bb67d2414620 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/pipe/task/ShowPipePlanV2.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/pipe/task/ShowPipePlanV2.java @@ -19,26 +19,12 @@ package org.apache.iotdb.confignode.consensus.request.read.pipe.task; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; - -public class ShowPipePlanV2 extends ConfigPhysicalPlan { +public class ShowPipePlanV2 extends ConfigPhysicalReadPlan { public ShowPipePlanV2() { super(ConfigPhysicalPlanType.ShowPipeV2); } - - @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - // Empty method, since it is not needed now - } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/region/GetRegionIdPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/region/GetRegionIdPlan.java index 00797a1fb149a..878d18c3bab21 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/region/GetRegionIdPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/region/GetRegionIdPlan.java @@ -22,22 +22,16 @@ import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; -import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import org.apache.tsfile.utils.ReadWriteIOUtils; - -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; import java.util.Objects; -public class GetRegionIdPlan extends ConfigPhysicalPlan { +public class GetRegionIdPlan extends ConfigPhysicalReadPlan { private String database; - private TConsensusGroupType partitionType; + private final TConsensusGroupType partitionType; private TTimePartitionSlot startTimeSlotId; @@ -45,12 +39,8 @@ public class GetRegionIdPlan extends ConfigPhysicalPlan { private TSeriesPartitionSlot seriesSlotId; - public GetRegionIdPlan() { + public GetRegionIdPlan(final TConsensusGroupType partitionType) { super(ConfigPhysicalPlanType.GetRegionId); - } - - public GetRegionIdPlan(TConsensusGroupType partitionType) { - this(); this.partitionType = partitionType; this.database = ""; this.seriesSlotId = new TSeriesPartitionSlot(-1); @@ -62,11 +52,11 @@ public String getDatabase() { return database; } - public void setDatabase(String database) { + public void setDatabase(final String database) { this.database = database; } - public void setSeriesSlotId(TSeriesPartitionSlot seriesSlotId) { + public void setSeriesSlotId(final TSeriesPartitionSlot seriesSlotId) { this.seriesSlotId = seriesSlotId; } @@ -78,7 +68,7 @@ public TTimePartitionSlot getStartTimeSlotId() { return startTimeSlotId; } - public void setStartTimeSlotId(TTimePartitionSlot startTimeSlotId) { + public void setStartTimeSlotId(final TTimePartitionSlot startTimeSlotId) { this.startTimeSlotId = startTimeSlotId; } @@ -86,7 +76,7 @@ public TTimePartitionSlot getEndTimeSlotId() { return endTimeSlotId; } - public void setEndTimeSlotId(TTimePartitionSlot endTimeSlotId) { + public void setEndTimeSlotId(final TTimePartitionSlot endTimeSlotId) { this.endTimeSlotId = endTimeSlotId; } @@ -95,33 +85,14 @@ public TConsensusGroupType getPartitionType() { } @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - stream.writeInt(partitionType.ordinal()); - ReadWriteIOUtils.write(database, stream); - ThriftCommonsSerDeUtils.serializeTSeriesPartitionSlot(seriesSlotId, stream); - ThriftCommonsSerDeUtils.serializeTTimePartitionSlot(startTimeSlotId, stream); - ThriftCommonsSerDeUtils.serializeTTimePartitionSlot(endTimeSlotId, stream); - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - this.partitionType = TConsensusGroupType.findByValue(buffer.getInt()); - this.database = ReadWriteIOUtils.readString(buffer); - this.seriesSlotId = ThriftCommonsSerDeUtils.deserializeTSeriesPartitionSlot(buffer); - this.startTimeSlotId = ThriftCommonsSerDeUtils.deserializeTTimePartitionSlot(buffer); - this.endTimeSlotId = ThriftCommonsSerDeUtils.deserializeTTimePartitionSlot(buffer); - } - - @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - GetRegionIdPlan that = (GetRegionIdPlan) o; + final GetRegionIdPlan that = (GetRegionIdPlan) o; return database.equals(that.database) && partitionType.equals(that.partitionType) && seriesSlotId.equals(that.seriesSlotId) diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/region/GetRegionInfoListPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/region/GetRegionInfoListPlan.java index f302fb03de35f..da504b36d6ce3 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/region/GetRegionInfoListPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/region/GetRegionInfoListPlan.java @@ -19,18 +19,11 @@ package org.apache.iotdb.confignode.consensus.request.read.region; -import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; import org.apache.iotdb.confignode.rpc.thrift.TShowRegionReq; -import org.apache.tsfile.utils.ReadWriteIOUtils; - -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; - -public class GetRegionInfoListPlan extends ConfigPhysicalPlan { +public class GetRegionInfoListPlan extends ConfigPhysicalReadPlan { private TShowRegionReq showRegionReq; @@ -38,7 +31,7 @@ public GetRegionInfoListPlan() { super(ConfigPhysicalPlanType.GetRegionInfoList); } - public GetRegionInfoListPlan(TShowRegionReq showRegionReq) { + public GetRegionInfoListPlan(final TShowRegionReq showRegionReq) { super(ConfigPhysicalPlanType.GetRegionInfoList); this.showRegionReq = showRegionReq; } @@ -47,39 +40,7 @@ public TShowRegionReq getShowRegionReq() { return showRegionReq; } - public void setShowRegionReq(TShowRegionReq showRegionReq) { + public void setShowRegionReq(final TShowRegionReq showRegionReq) { this.showRegionReq = showRegionReq; } - - @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - stream.writeBoolean(showRegionReq != null); - if (showRegionReq != null) { - boolean setConsensusGroupType = showRegionReq.isSetConsensusGroupType(); - stream.writeBoolean(setConsensusGroupType); - if (setConsensusGroupType) { - ReadWriteIOUtils.write(showRegionReq.getConsensusGroupType().ordinal(), stream); - } - boolean setStorageGroups = showRegionReq.isSetDatabases(); - stream.writeBoolean(setStorageGroups); - if (setStorageGroups) { - ReadWriteIOUtils.writeStringList(showRegionReq.getDatabases(), stream); - } - } - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - if (ReadWriteIOUtils.readBool(buffer)) { - this.showRegionReq = new TShowRegionReq(); - if (ReadWriteIOUtils.readBool(buffer)) { - this.showRegionReq.setConsensusGroupType( - TConsensusGroupType.values()[ReadWriteIOUtils.readInt(buffer)]); - } - if (ReadWriteIOUtils.readBool(buffer)) { - this.showRegionReq.setDatabases(ReadWriteIOUtils.readStringList(buffer)); - } - } - } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/subscription/ShowSubscriptionPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/subscription/ShowSubscriptionPlan.java index 881a689539adb..5514e0542d9b0 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/subscription/ShowSubscriptionPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/subscription/ShowSubscriptionPlan.java @@ -19,26 +19,12 @@ package org.apache.iotdb.confignode.consensus.request.read.subscription; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; - -public class ShowSubscriptionPlan extends ConfigPhysicalPlan { +public class ShowSubscriptionPlan extends ConfigPhysicalReadPlan { public ShowSubscriptionPlan() { super(ConfigPhysicalPlanType.ShowSubscription); } - - @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - // Empty method, since it is not needed now - } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/subscription/ShowTopicPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/subscription/ShowTopicPlan.java index 810990ba9609d..7d1f5200aa110 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/subscription/ShowTopicPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/subscription/ShowTopicPlan.java @@ -19,26 +19,12 @@ package org.apache.iotdb.confignode.consensus.request.read.subscription; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; - -public class ShowTopicPlan extends ConfigPhysicalPlan { +public class ShowTopicPlan extends ConfigPhysicalReadPlan { public ShowTopicPlan() { super(ConfigPhysicalPlanType.ShowTopic); } - - @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - // Empty method, since it is not needed now - } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/CheckTemplateSettablePlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/CheckTemplateSettablePlan.java index df6a44cb83f55..42f4c30c7751d 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/CheckTemplateSettablePlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/CheckTemplateSettablePlan.java @@ -19,25 +19,15 @@ package org.apache.iotdb.confignode.consensus.request.read.template; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import org.apache.tsfile.utils.ReadWriteIOUtils; +public class CheckTemplateSettablePlan extends ConfigPhysicalReadPlan { -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; + private final String name; + private final String path; -public class CheckTemplateSettablePlan extends ConfigPhysicalPlan { - - private String name; - private String path; - - public CheckTemplateSettablePlan() { - super(ConfigPhysicalPlanType.CheckTemplateSettable); - } - - public CheckTemplateSettablePlan(String name, String path) { + public CheckTemplateSettablePlan(final String name, final String path) { super(ConfigPhysicalPlanType.CheckTemplateSettable); this.name = name; this.path = path; @@ -50,17 +40,4 @@ public String getName() { public String getPath() { return path; } - - @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - ReadWriteIOUtils.write(name, stream); - ReadWriteIOUtils.write(path, stream); - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - this.name = ReadWriteIOUtils.readString(buffer); - this.path = ReadWriteIOUtils.readString(buffer); - } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/GetAllSchemaTemplatePlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/GetAllSchemaTemplatePlan.java index bf1a7d8ff26ae..0add0c2edf795 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/GetAllSchemaTemplatePlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/GetAllSchemaTemplatePlan.java @@ -19,24 +19,12 @@ package org.apache.iotdb.confignode.consensus.request.read.template; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; - -public class GetAllSchemaTemplatePlan extends ConfigPhysicalPlan { +public class GetAllSchemaTemplatePlan extends ConfigPhysicalReadPlan { public GetAllSchemaTemplatePlan() { super(ConfigPhysicalPlanType.GetAllSchemaTemplate); } - - @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException {} } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/GetAllTemplateSetInfoPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/GetAllTemplateSetInfoPlan.java index f13e5b02e78f9..c56f79165b838 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/GetAllTemplateSetInfoPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/GetAllTemplateSetInfoPlan.java @@ -19,24 +19,12 @@ package org.apache.iotdb.confignode.consensus.request.read.template; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; - -public class GetAllTemplateSetInfoPlan extends ConfigPhysicalPlan { +public class GetAllTemplateSetInfoPlan extends ConfigPhysicalReadPlan { public GetAllTemplateSetInfoPlan() { super(ConfigPhysicalPlanType.GetAllTemplateSetInfo); } - - @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException {} } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/GetPathsSetTemplatePlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/GetPathsSetTemplatePlan.java index e96d4d7957a77..f81805066af37 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/GetPathsSetTemplatePlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/GetPathsSetTemplatePlan.java @@ -20,25 +20,15 @@ package org.apache.iotdb.confignode.consensus.request.read.template; import org.apache.iotdb.commons.path.PathPatternTree; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import org.apache.tsfile.utils.ReadWriteIOUtils; +public class GetPathsSetTemplatePlan extends ConfigPhysicalReadPlan { -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; + private final String name; + private final PathPatternTree scope; -public class GetPathsSetTemplatePlan extends ConfigPhysicalPlan { - - private String name; - private PathPatternTree scope; - - public GetPathsSetTemplatePlan() { - super(ConfigPhysicalPlanType.GetPathsSetTemplate); - } - - public GetPathsSetTemplatePlan(String name, PathPatternTree scope) { + public GetPathsSetTemplatePlan(final String name, final PathPatternTree scope) { super(ConfigPhysicalPlanType.GetPathsSetTemplate); this.name = name; this.scope = scope; @@ -51,17 +41,4 @@ public String getName() { public PathPatternTree getScope() { return scope; } - - @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - ReadWriteIOUtils.write(name, stream); - scope.serialize(stream); - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - this.name = ReadWriteIOUtils.readString(buffer); - this.scope = PathPatternTree.deserialize(buffer); - } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/GetSchemaTemplatePlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/GetSchemaTemplatePlan.java index 627dfa98c68d7..9545429923913 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/GetSchemaTemplatePlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/GetSchemaTemplatePlan.java @@ -19,26 +19,17 @@ package org.apache.iotdb.confignode.consensus.request.read.template; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import org.apache.tsfile.utils.ReadWriteIOUtils; - -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; import java.util.Objects; -public class GetSchemaTemplatePlan extends ConfigPhysicalPlan { +public class GetSchemaTemplatePlan extends ConfigPhysicalReadPlan { - private String templateName; + private final String templateName; - public GetSchemaTemplatePlan() { + public GetSchemaTemplatePlan(final String templateName) { super(ConfigPhysicalPlanType.GetSchemaTemplate); - } - - public GetSchemaTemplatePlan(String templateName) { - this(); this.templateName = templateName; } @@ -47,25 +38,14 @@ public String getTemplateName() { } @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - ReadWriteIOUtils.write(getType().getPlanType(), stream); - ReadWriteIOUtils.write(templateName, stream); - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - this.templateName = ReadWriteIOUtils.readString(buffer); - } - - @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - GetSchemaTemplatePlan that = (GetSchemaTemplatePlan) o; + final GetSchemaTemplatePlan that = (GetSchemaTemplatePlan) o; return this.templateName.equalsIgnoreCase(that.templateName); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/GetTemplateSetInfoPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/GetTemplateSetInfoPlan.java index a63e5ca912edc..4ee8ebd7afd9a 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/GetTemplateSetInfoPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/template/GetTemplateSetInfoPlan.java @@ -20,27 +20,16 @@ package org.apache.iotdb.confignode.consensus.request.read.template; import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.path.PathDeserializeUtil; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import org.apache.tsfile.utils.ReadWriteIOUtils; - -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; import java.util.List; -public class GetTemplateSetInfoPlan extends ConfigPhysicalPlan { - - private List patternList; +public class GetTemplateSetInfoPlan extends ConfigPhysicalReadPlan { - public GetTemplateSetInfoPlan() { - super(ConfigPhysicalPlanType.GetTemplateSetInfo); - } + private final List patternList; - public GetTemplateSetInfoPlan(List patternList) { + public GetTemplateSetInfoPlan(final List patternList) { super(ConfigPhysicalPlanType.GetTemplateSetInfo); this.patternList = patternList; } @@ -48,22 +37,4 @@ public GetTemplateSetInfoPlan(List patternList) { public List getPatternList() { return patternList; } - - @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - ReadWriteIOUtils.write(getType().getPlanType(), stream); - ReadWriteIOUtils.write(patternList.size(), stream); - for (PartialPath pattern : patternList) { - pattern.serialize(stream); - } - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - int size = ReadWriteIOUtils.readInt(buffer); - patternList = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - patternList.add((PartialPath) PathDeserializeUtil.deserialize(buffer)); - } - } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/trigger/GetTransferringTriggersPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/trigger/GetTransferringTriggersPlan.java index f7c9bf599e9de..d919ffd40ba1e 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/trigger/GetTransferringTriggersPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/trigger/GetTransferringTriggersPlan.java @@ -19,26 +19,12 @@ package org.apache.iotdb.confignode.consensus.request.read.trigger; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; - -public class GetTransferringTriggersPlan extends ConfigPhysicalPlan { +public class GetTransferringTriggersPlan extends ConfigPhysicalReadPlan { public GetTransferringTriggersPlan() { super(ConfigPhysicalPlanType.GetTransferringTriggers); } - - @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - // do nothing - } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/trigger/GetTriggerJarPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/trigger/GetTriggerJarPlan.java index 09addc05b859d..04ded3f97e5cd 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/trigger/GetTriggerJarPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/trigger/GetTriggerJarPlan.java @@ -19,27 +19,17 @@ package org.apache.iotdb.confignode.consensus.request.read.trigger; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import org.apache.tsfile.utils.ReadWriteIOUtils; - -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; import java.util.List; import java.util.Objects; -public class GetTriggerJarPlan extends ConfigPhysicalPlan { - - private List jarNames; +public class GetTriggerJarPlan extends ConfigPhysicalReadPlan { - public GetTriggerJarPlan() { - super(ConfigPhysicalPlanType.GetTriggerJar); - } + private final List jarNames; - public GetTriggerJarPlan(List triggerNames) { + public GetTriggerJarPlan(final List triggerNames) { super(ConfigPhysicalPlanType.GetTriggerJar); jarNames = triggerNames; } @@ -49,27 +39,7 @@ public List getJarNames() { } @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - - ReadWriteIOUtils.write(jarNames.size(), stream); - for (String jarName : jarNames) { - ReadWriteIOUtils.write(jarName, stream); - } - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - int size = ReadWriteIOUtils.readInt(buffer); - jarNames = new ArrayList<>(size); - while (size > 0) { - jarNames.add(ReadWriteIOUtils.readString(buffer)); - size--; - } - } - - @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -79,7 +49,7 @@ public boolean equals(Object o) { if (!super.equals(o)) { return false; } - GetTriggerJarPlan that = (GetTriggerJarPlan) o; + final GetTriggerJarPlan that = (GetTriggerJarPlan) o; return Objects.equals(jarNames, that.jarNames); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/trigger/GetTriggerLocationPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/trigger/GetTriggerLocationPlan.java index 597392519de13..a3dad77c035ea 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/trigger/GetTriggerLocationPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/trigger/GetTriggerLocationPlan.java @@ -19,26 +19,16 @@ package org.apache.iotdb.confignode.consensus.request.read.trigger; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import org.apache.tsfile.utils.ReadWriteIOUtils; - -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; import java.util.Objects; -public class GetTriggerLocationPlan extends ConfigPhysicalPlan { - - String triggerName; +public class GetTriggerLocationPlan extends ConfigPhysicalReadPlan { + private final String triggerName; - public GetTriggerLocationPlan() { + public GetTriggerLocationPlan(final String triggerName) { super(ConfigPhysicalPlanType.GetTriggerLocation); - } - - public GetTriggerLocationPlan(String triggerName) { - this(); this.triggerName = triggerName; } @@ -46,24 +36,8 @@ public String getTriggerName() { return triggerName; } - public void setTriggerName(String triggerName) { - this.triggerName = triggerName; - } - - @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - - ReadWriteIOUtils.write(triggerName, stream); - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - this.triggerName = ReadWriteIOUtils.readString(buffer); - } - @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -73,7 +47,7 @@ public boolean equals(Object o) { if (!super.equals(o)) { return false; } - GetTriggerLocationPlan that = (GetTriggerLocationPlan) o; + final GetTriggerLocationPlan that = (GetTriggerLocationPlan) o; return Objects.equals(triggerName, that.triggerName); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/trigger/GetTriggerTablePlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/trigger/GetTriggerTablePlan.java index a1af8644ce01c..4388d78da2488 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/trigger/GetTriggerTablePlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/trigger/GetTriggerTablePlan.java @@ -19,26 +19,16 @@ package org.apache.iotdb.confignode.consensus.request.read.trigger; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; -import org.apache.tsfile.utils.ReadWriteIOUtils; - -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; import java.util.Objects; -public class GetTriggerTablePlan extends ConfigPhysicalPlan { - - boolean onlyStateful; +public class GetTriggerTablePlan extends ConfigPhysicalReadPlan { + private final boolean onlyStateful; - public GetTriggerTablePlan() { + public GetTriggerTablePlan(final boolean onlyStateful) { super(ConfigPhysicalPlanType.GetTriggerTable); - } - - public GetTriggerTablePlan(boolean onlyStateful) { - this(); this.onlyStateful = onlyStateful; } @@ -46,24 +36,8 @@ public boolean isOnlyStateful() { return onlyStateful; } - public void setOnlyStateful(boolean onlyStateful) { - this.onlyStateful = onlyStateful; - } - - @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - stream.writeShort(getType().getPlanType()); - - ReadWriteIOUtils.write(onlyStateful, stream); - } - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - this.onlyStateful = ReadWriteIOUtils.readBool(buffer); - } - @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -73,7 +47,7 @@ public boolean equals(Object o) { if (!super.equals(o)) { return false; } - GetTriggerTablePlan that = (GetTriggerTablePlan) o; + final GetTriggerTablePlan that = (GetTriggerTablePlan) o; return onlyStateful == that.onlyStateful; } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/ttl/ShowTTLPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/ttl/ShowTTLPlan.java index 9dd9eded31eb8..9f2992eeeb1ac 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/ttl/ShowTTLPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/ttl/ShowTTLPlan.java @@ -16,18 +16,15 @@ * specific language governing permissions and limitations * under the License. */ + package org.apache.iotdb.confignode.consensus.request.read.ttl; -import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; import org.apache.iotdb.db.utils.constant.SqlConstant; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; - -public class ShowTTLPlan extends ConfigPhysicalPlan { - private String[] pathPattern; +public class ShowTTLPlan extends ConfigPhysicalReadPlan { + private final String[] pathPattern; public String[] getPathPattern() { return pathPattern; @@ -38,14 +35,8 @@ public ShowTTLPlan() { this.pathPattern = SqlConstant.getSingleRootArray(); } - public ShowTTLPlan(String[] pathPattern) { + public ShowTTLPlan(final String[] pathPattern) { super(ConfigPhysicalPlanType.ShowTTL); this.pathPattern = pathPattern; } - - @Override - protected void serializeImpl(DataOutputStream stream) throws IOException {} - - @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException {} } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/ainode/RegisterAINodePlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/ainode/RegisterAINodePlan.java new file mode 100644 index 0000000000000..5f5cb9ae1ecf5 --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/ainode/RegisterAINodePlan.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.consensus.request.write.ainode; + +import org.apache.iotdb.common.rpc.thrift.TAINodeConfiguration; +import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils; +import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; +import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Objects; + +public class RegisterAINodePlan extends ConfigPhysicalPlan { + + private TAINodeConfiguration aiNodeConfiguration; + + public RegisterAINodePlan() { + super(ConfigPhysicalPlanType.RegisterAINode); + } + + public RegisterAINodePlan(TAINodeConfiguration aiNodeConfiguration) { + this(); + this.aiNodeConfiguration = aiNodeConfiguration; + } + + public TAINodeConfiguration getAINodeConfiguration() { + return aiNodeConfiguration; + } + + @Override + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeShort(getType().getPlanType()); + ThriftCommonsSerDeUtils.serializeTAINodeInfo(aiNodeConfiguration, stream); + } + + @Override + protected void deserializeImpl(ByteBuffer buffer) throws IOException { + aiNodeConfiguration = ThriftCommonsSerDeUtils.deserializeTAINodeInfo(buffer); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + RegisterAINodePlan that = (RegisterAINodePlan) o; + return aiNodeConfiguration.equals(that.aiNodeConfiguration); + } + + @Override + public int hashCode() { + return Objects.hash(aiNodeConfiguration); + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/ainode/RemoveAINodePlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/ainode/RemoveAINodePlan.java new file mode 100644 index 0000000000000..92bfb8b7017fa --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/ainode/RemoveAINodePlan.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.consensus.request.write.ainode; + +import org.apache.iotdb.common.rpc.thrift.TAINodeLocation; +import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils; +import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; +import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Objects; + +public class RemoveAINodePlan extends ConfigPhysicalPlan { + + private TAINodeLocation aiNodeLocation; + + public RemoveAINodePlan() { + super(ConfigPhysicalPlanType.RemoveAINode); + } + + public RemoveAINodePlan(TAINodeLocation taiNodeLocation) { + this(); + this.aiNodeLocation = taiNodeLocation; + } + + @Override + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeShort(getType().getPlanType()); + ThriftCommonsSerDeUtils.serializeTAINodeLocation(aiNodeLocation, stream); + } + + @Override + protected void deserializeImpl(ByteBuffer buffer) throws IOException { + this.aiNodeLocation = ThriftCommonsSerDeUtils.deserializeTAINodeLocation(buffer); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + RemoveAINodePlan that = (RemoveAINodePlan) o; + return aiNodeLocation.equals(that.aiNodeLocation); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), aiNodeLocation); + } + + public TAINodeLocation getAINodeLocation() { + return aiNodeLocation; + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/ainode/UpdateAINodePlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/ainode/UpdateAINodePlan.java new file mode 100644 index 0000000000000..5ef885551d7ab --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/ainode/UpdateAINodePlan.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.consensus.request.write.ainode; + +import org.apache.iotdb.common.rpc.thrift.TAINodeConfiguration; +import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils; +import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; +import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Objects; + +public class UpdateAINodePlan extends ConfigPhysicalPlan { + + private TAINodeConfiguration aiNodeConfiguration; + + public UpdateAINodePlan() { + super(ConfigPhysicalPlanType.UpdateAINodeConfiguration); + } + + public UpdateAINodePlan(TAINodeConfiguration aiNodeConfiguration) { + this(); + this.aiNodeConfiguration = aiNodeConfiguration; + } + + public TAINodeConfiguration getAINodeConfiguration() { + return aiNodeConfiguration; + } + + @Override + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeShort(getType().getPlanType()); + ThriftCommonsSerDeUtils.serializeTAINodeConfiguration(aiNodeConfiguration, stream); + } + + @Override + protected void deserializeImpl(ByteBuffer buffer) throws IOException { + aiNodeConfiguration = ThriftCommonsSerDeUtils.deserializeTAINodeConfiguration(buffer); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!getType().equals(((UpdateAINodePlan) o).getType())) { + return false; + } + UpdateAINodePlan that = (UpdateAINodePlan) o; + return aiNodeConfiguration.equals(that.aiNodeConfiguration); + } + + @Override + public int hashCode() { + return Objects.hash(getType(), aiNodeConfiguration); + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/auth/AuthorPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/auth/AuthorPlan.java similarity index 67% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/auth/AuthorPlan.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/auth/AuthorPlan.java index be162647f8fca..8711cd86587ab 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/auth/AuthorPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/auth/AuthorPlan.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.confignode.consensus.request.auth; +package org.apache.iotdb.confignode.consensus.request.write.auth; import org.apache.iotdb.commons.auth.entity.PrivilegeType; import org.apache.iotdb.commons.exception.MetadataException; @@ -91,7 +91,7 @@ public ConfigPhysicalPlanType getAuthorType() { return authorType; } - public void setAuthorType(ConfigPhysicalPlanType authorType) { + public void setAuthorType(final ConfigPhysicalPlanType authorType) { this.authorType = authorType; } @@ -99,7 +99,7 @@ public String getRoleName() { return roleName; } - public void setRoleName(String roleName) { + public void setRoleName(final String roleName) { this.roleName = roleName; } @@ -107,7 +107,7 @@ public String getPassword() { return password; } - public void setPassword(String password) { + public void setPassword(final String password) { this.password = password; } @@ -115,15 +115,11 @@ public String getNewPassword() { return newPassword; } - public void setNewPassword(String newPassword) { - this.newPassword = newPassword; - } - public Set getPermissions() { return permissions; } - public void setPermissions(Set permissions) { + public void setPermissions(final Set permissions) { this.permissions = permissions; } @@ -131,7 +127,7 @@ public boolean getGrantOpt() { return this.grantOpt; } - public void setGrantOpt(boolean grantOpt) { + public void setGrantOpt(final boolean grantOpt) { this.grantOpt = grantOpt; } @@ -139,7 +135,7 @@ public List getNodeNameList() { return nodeNameList; } - public void setNodeNameList(List nodeNameList) { + public void setNodeNameList(final List nodeNameList) { this.nodeNameList = nodeNameList; } @@ -147,13 +143,13 @@ public String getUserName() { return userName; } - public void setUserName(String userName) { + public void setUserName(final String userName) { this.userName = userName; } @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { - ReadWriteIOUtils.write(getPlanType(authorType), stream); + protected void serializeImpl(final DataOutputStream stream) throws IOException { + ReadWriteIOUtils.write(authorType.getPlanType(), stream); BasicStructureSerDeUtil.write(userName, stream); BasicStructureSerDeUtil.write(roleName, stream); BasicStructureSerDeUtil.write(password, stream); @@ -163,12 +159,12 @@ protected void serializeImpl(DataOutputStream stream) throws IOException { } else { stream.write((byte) 1); stream.writeInt(permissions.size()); - for (int permission : permissions) { + for (final int permission : permissions) { stream.writeInt(permission); } } BasicStructureSerDeUtil.write(nodeNameList.size(), stream); - for (PartialPath partialPath : nodeNameList) { + for (final PartialPath partialPath : nodeNameList) { BasicStructureSerDeUtil.write(partialPath.getFullPath(), stream); } BasicStructureSerDeUtil.write(grantOpt ? 1 : 0, stream); @@ -184,14 +180,14 @@ protected void deserializeImpl(ByteBuffer buffer) { if (hasPermissions == (byte) 0) { this.permissions = null; } else { - int permissionsSize = buffer.getInt(); + final int permissionsSize = buffer.getInt(); this.permissions = new HashSet<>(); for (int i = 0; i < permissionsSize; i++) { permissions.add(buffer.getInt()); } } - int nodeNameListSize = BasicStructureSerDeUtil.readInt(buffer); + final int nodeNameListSize = BasicStructureSerDeUtil.readInt(buffer); nodeNameList = new ArrayList<>(nodeNameListSize); try { for (int i = 0; i < nodeNameListSize; i++) { @@ -206,78 +202,15 @@ protected void deserializeImpl(ByteBuffer buffer) { } } - private short getPlanType(ConfigPhysicalPlanType configPhysicalPlanType) { - short type; - switch (configPhysicalPlanType) { - case CreateUser: - type = ConfigPhysicalPlanType.CreateUser.getPlanType(); - break; - case CreateRole: - type = ConfigPhysicalPlanType.CreateRole.getPlanType(); - break; - case DropUser: - type = ConfigPhysicalPlanType.DropUser.getPlanType(); - break; - case DropRole: - type = ConfigPhysicalPlanType.DropRole.getPlanType(); - break; - case GrantRole: - type = ConfigPhysicalPlanType.GrantRole.getPlanType(); - break; - case GrantUser: - type = ConfigPhysicalPlanType.GrantUser.getPlanType(); - break; - case GrantRoleToUser: - type = ConfigPhysicalPlanType.GrantRoleToUser.getPlanType(); - break; - case RevokeUser: - type = ConfigPhysicalPlanType.RevokeUser.getPlanType(); - break; - case RevokeRole: - type = ConfigPhysicalPlanType.RevokeRole.getPlanType(); - break; - case RevokeRoleFromUser: - type = ConfigPhysicalPlanType.RevokeRoleFromUser.getPlanType(); - break; - case UpdateUser: - type = ConfigPhysicalPlanType.UpdateUser.getPlanType(); - break; - case ListUser: - type = ConfigPhysicalPlanType.ListUser.getPlanType(); - break; - case ListRole: - type = ConfigPhysicalPlanType.ListRole.getPlanType(); - break; - case ListUserPrivilege: - type = ConfigPhysicalPlanType.ListUserPrivilege.getPlanType(); - break; - case ListRolePrivilege: - type = ConfigPhysicalPlanType.ListRolePrivilege.getPlanType(); - break; - case ListUserRoles: - type = ConfigPhysicalPlanType.ListUserRoles.getPlanType(); - break; - case ListRoleUsers: - type = ConfigPhysicalPlanType.ListRoleUsers.getPlanType(); - break; - case CreateUserWithRawPassword: - type = ConfigPhysicalPlanType.CreateUserWithRawPassword.getPlanType(); - break; - default: - throw new IllegalArgumentException("Unknown operator: " + configPhysicalPlanType); - } - return type; - } - @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - AuthorPlan that = (AuthorPlan) o; + final AuthorPlan that = (AuthorPlan) o; return Objects.equals(authorType, that.authorType) && Objects.equals(userName, that.userName) && Objects.equals(roleName, that.roleName) diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/model/CreateModelPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/model/CreateModelPlan.java new file mode 100644 index 0000000000000..61e37cdd21877 --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/model/CreateModelPlan.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.consensus.request.write.model; + +import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; +import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; + +import org.apache.tsfile.utils.ReadWriteIOUtils; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Objects; + +public class CreateModelPlan extends ConfigPhysicalPlan { + + private String modelName; + + public CreateModelPlan() { + super(ConfigPhysicalPlanType.CreateModel); + } + + public CreateModelPlan(String modelName) { + super(ConfigPhysicalPlanType.CreateModel); + this.modelName = modelName; + } + + public String getModelName() { + return modelName; + } + + @Override + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeShort(getType().getPlanType()); + ReadWriteIOUtils.write(modelName, stream); + } + + @Override + protected void deserializeImpl(ByteBuffer buffer) throws IOException { + modelName = ReadWriteIOUtils.readString(buffer); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + CreateModelPlan that = (CreateModelPlan) o; + return Objects.equals(modelName, that.modelName); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), modelName); + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/sync/ShowPipePlanV1.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/model/DropModelInNodePlan.java similarity index 61% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/sync/ShowPipePlanV1.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/model/DropModelInNodePlan.java index 0d8af2d157330..885543f84e156 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/sync/ShowPipePlanV1.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/model/DropModelInNodePlan.java @@ -17,43 +17,54 @@ * under the License. */ -package org.apache.iotdb.confignode.consensus.request.write.sync; +package org.apache.iotdb.confignode.consensus.request.write.model; -import org.apache.iotdb.commons.utils.BasicStructureSerDeUtil; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.Objects; -// Deprecated, restored for upgrade -@Deprecated -public class ShowPipePlanV1 extends ConfigPhysicalPlan { - /** empty pipeName means show all PIPE */ - private String pipeName; +public class DropModelInNodePlan extends ConfigPhysicalPlan { - public ShowPipePlanV1() { - super(ConfigPhysicalPlanType.ShowPipeV1); + private int nodeId; + + public DropModelInNodePlan() { + super(ConfigPhysicalPlanType.DropModelInNode); } - public ShowPipePlanV1(String pipeName) { - this(); - this.pipeName = pipeName; + public DropModelInNodePlan(int nodeId) { + super(ConfigPhysicalPlanType.DropModelInNode); + this.nodeId = nodeId; } - public String getPipeName() { - return pipeName; + public int getNodeId() { + return nodeId; } @Override protected void serializeImpl(DataOutputStream stream) throws IOException { stream.writeShort(getType().getPlanType()); - BasicStructureSerDeUtil.write(pipeName, stream); + stream.writeInt(nodeId); } @Override protected void deserializeImpl(ByteBuffer buffer) throws IOException { - pipeName = BasicStructureSerDeUtil.readString(buffer); + nodeId = buffer.getInt(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof DropModelInNodePlan)) return false; + DropModelInNodePlan that = (DropModelInNodePlan) o; + return nodeId == that.nodeId; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), nodeId); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/model/DropModelPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/model/DropModelPlan.java new file mode 100644 index 0000000000000..813b116c645c5 --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/model/DropModelPlan.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.consensus.request.write.model; + +import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; +import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; + +import org.apache.tsfile.utils.ReadWriteIOUtils; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Objects; + +public class DropModelPlan extends ConfigPhysicalPlan { + + private String modelName; + + public DropModelPlan() { + super(ConfigPhysicalPlanType.DropModel); + } + + public DropModelPlan(String modelName) { + super(ConfigPhysicalPlanType.DropModel); + this.modelName = modelName; + } + + public String getModelName() { + return modelName; + } + + @Override + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeShort(getType().getPlanType()); + ReadWriteIOUtils.write(modelName, stream); + } + + @Override + protected void deserializeImpl(ByteBuffer buffer) throws IOException { + modelName = ReadWriteIOUtils.readString(buffer); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + DropModelPlan that = (DropModelPlan) o; + return modelName.equals(that.modelName); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), modelName); + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/model/UpdateModelInfoPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/model/UpdateModelInfoPlan.java new file mode 100644 index 0000000000000..ca74d2daf69dc --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/model/UpdateModelInfoPlan.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.consensus.request.write.model; + +import org.apache.iotdb.commons.model.ModelInformation; +import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; +import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; + +import org.apache.tsfile.utils.ReadWriteIOUtils; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +public class UpdateModelInfoPlan extends ConfigPhysicalPlan { + + private String modelName; + private ModelInformation modelInformation; + + // The node which has the model which is only updated in model registration + private List nodeIds; + + public UpdateModelInfoPlan() { + super(ConfigPhysicalPlanType.UpdateModelInfo); + } + + public UpdateModelInfoPlan(String modelName, ModelInformation modelInformation) { + super(ConfigPhysicalPlanType.UpdateModelInfo); + this.modelName = modelName; + this.modelInformation = modelInformation; + this.nodeIds = Collections.emptyList(); + } + + public UpdateModelInfoPlan( + String modelName, ModelInformation modelInformation, List nodeIds) { + super(ConfigPhysicalPlanType.UpdateModelInfo); + this.modelName = modelName; + this.modelInformation = modelInformation; + this.nodeIds = nodeIds; + } + + public String getModelName() { + return modelName; + } + + public ModelInformation getModelInformation() { + return modelInformation; + } + + public List getNodeIds() { + return nodeIds; + } + + @Override + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeShort(getType().getPlanType()); + ReadWriteIOUtils.write(modelName, stream); + this.modelInformation.serialize(stream); + ReadWriteIOUtils.write(nodeIds.size(), stream); + for (Integer nodeId : nodeIds) { + ReadWriteIOUtils.write(nodeId, stream); + } + } + + @Override + protected void deserializeImpl(ByteBuffer buffer) throws IOException { + this.modelName = ReadWriteIOUtils.readString(buffer); + this.modelInformation = ModelInformation.deserialize(buffer); + int size = ReadWriteIOUtils.readInt(buffer); + this.nodeIds = new ArrayList<>(); + for (int i = 0; i < size; i++) { + this.nodeIds.add(ReadWriteIOUtils.readInt(buffer)); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + UpdateModelInfoPlan that = (UpdateModelInfoPlan) o; + return modelName.equals(that.modelName) + && modelInformation.equals(that.modelInformation) + && nodeIds.equals(that.nodeIds); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), modelName, modelInformation, nodeIds); + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/partition/AutoCleanPartitionTablePlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/partition/AutoCleanPartitionTablePlan.java new file mode 100644 index 0000000000000..1b1bfbe38f029 --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/partition/AutoCleanPartitionTablePlan.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.consensus.request.write.partition; + +import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; +import org.apache.iotdb.commons.utils.BasicStructureSerDeUtil; +import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils; +import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; +import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Map; +import java.util.Objects; +import java.util.TreeMap; + +public class AutoCleanPartitionTablePlan extends ConfigPhysicalPlan { + + Map databaseTTLMap; + TTimePartitionSlot currentTimeSlot; + + public AutoCleanPartitionTablePlan() { + super(ConfigPhysicalPlanType.AutoCleanPartitionTable); + } + + public AutoCleanPartitionTablePlan( + Map databaseTTLMap, TTimePartitionSlot currentTimeSlot) { + this(); + this.databaseTTLMap = databaseTTLMap; + this.currentTimeSlot = currentTimeSlot; + } + + public Map getDatabaseTTLMap() { + return databaseTTLMap; + } + + public TTimePartitionSlot getCurrentTimeSlot() { + return currentTimeSlot; + } + + @Override + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeShort(getType().getPlanType()); + stream.writeInt(databaseTTLMap.size()); + for (Map.Entry entry : databaseTTLMap.entrySet()) { + BasicStructureSerDeUtil.write(entry.getKey(), stream); + stream.writeLong(entry.getValue()); + } + ThriftCommonsSerDeUtils.serializeTTimePartitionSlot(currentTimeSlot, stream); + } + + @Override + protected void deserializeImpl(ByteBuffer buffer) throws IOException { + int size = buffer.getInt(); + databaseTTLMap = new TreeMap<>(); + for (int i = 0; i < size; i++) { + String key = BasicStructureSerDeUtil.readString(buffer); + long value = buffer.getLong(); + databaseTTLMap.put(key, value); + } + currentTimeSlot = ThriftCommonsSerDeUtils.deserializeTTimePartitionSlot(buffer); + } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + AutoCleanPartitionTablePlan that = (AutoCleanPartitionTablePlan) o; + return Objects.equals(databaseTTLMap, that.databaseTTLMap) + && Objects.equals(currentTimeSlot, that.currentTimeSlot); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), databaseTTLMap, currentTimeSlot); + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/partition/RemoveRegionLocationPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/partition/RemoveRegionLocationPlan.java index 9dc6a00705289..47bc6493aa169 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/partition/RemoveRegionLocationPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/partition/RemoveRegionLocationPlan.java @@ -58,11 +58,6 @@ protected void deserializeImpl(ByteBuffer buffer) throws IOException { deprecatedLocation = ThriftCommonsSerDeUtils.deserializeTDataNodeLocation(buffer); } - @Override - public ConfigPhysicalPlanType getType() { - return super.getType(); - } - public TConsensusGroupId getRegionId() { return regionId; } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/payload/PipeDeleteLogicalViewPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/payload/PipeDeleteLogicalViewPlan.java index 6865b135c7b22..11e050be9d18a 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/payload/PipeDeleteLogicalViewPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/payload/PipeDeleteLogicalViewPlan.java @@ -24,6 +24,8 @@ import org.apache.tsfile.utils.ReadWriteIOUtils; +import javax.annotation.Nonnull; + import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; @@ -38,12 +40,13 @@ public PipeDeleteLogicalViewPlan() { super(ConfigPhysicalPlanType.PipeDeleteLogicalView); } - public PipeDeleteLogicalViewPlan(final ByteBuffer patternTreeBytes) { + public PipeDeleteLogicalViewPlan(final @Nonnull ByteBuffer patternTreeBytes) { super(ConfigPhysicalPlanType.PipeDeleteLogicalView); this.patternTreeBytes = patternTreeBytes; } public ByteBuffer getPatternTreeBytes() { + patternTreeBytes.rewind(); return patternTreeBytes; } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/payload/PipeDeleteTimeSeriesPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/payload/PipeDeleteTimeSeriesPlan.java index 2b651cb034d7a..a5379006bcdcd 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/payload/PipeDeleteTimeSeriesPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/payload/PipeDeleteTimeSeriesPlan.java @@ -24,6 +24,8 @@ import org.apache.tsfile.utils.ReadWriteIOUtils; +import javax.annotation.Nonnull; + import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; @@ -38,12 +40,13 @@ public PipeDeleteTimeSeriesPlan() { super(ConfigPhysicalPlanType.PipeDeleteTimeSeries); } - public PipeDeleteTimeSeriesPlan(final ByteBuffer patternTreeBytes) { + public PipeDeleteTimeSeriesPlan(final @Nonnull ByteBuffer patternTreeBytes) { super(ConfigPhysicalPlanType.PipeDeleteTimeSeries); this.patternTreeBytes = patternTreeBytes; } public ByteBuffer getPatternTreeBytes() { + patternTreeBytes.rewind(); return patternTreeBytes; } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/plugin/CreatePipePluginPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/plugin/CreatePipePluginPlan.java index ea4c986142bed..29cc394ca1e65 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/plugin/CreatePipePluginPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/plugin/CreatePipePluginPlan.java @@ -19,7 +19,7 @@ package org.apache.iotdb.confignode.consensus.request.write.pipe.plugin; -import org.apache.iotdb.commons.pipe.plugin.meta.PipePluginMeta; +import org.apache.iotdb.commons.pipe.agent.plugin.meta.PipePluginMeta; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/runtime/PipeHandleMetaChangePlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/runtime/PipeHandleMetaChangePlan.java index b08905880b9e3..74a3cf37c94bd 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/runtime/PipeHandleMetaChangePlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/runtime/PipeHandleMetaChangePlan.java @@ -19,7 +19,7 @@ package org.apache.iotdb.confignode.consensus.request.write.pipe.runtime; -import org.apache.iotdb.commons.pipe.task.meta.PipeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; @@ -48,7 +48,7 @@ public List getPipeMetaList() { } @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { + protected void serializeImpl(final DataOutputStream stream) throws IOException { stream.writeShort(getType().getPlanType()); stream.writeInt(pipeMetaList.size()); @@ -58,16 +58,16 @@ protected void serializeImpl(DataOutputStream stream) throws IOException { } @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { + protected void deserializeImpl(final ByteBuffer buffer) throws IOException { int size = buffer.getInt(); for (int i = 0; i < size; i++) { - PipeMeta pipeMeta = PipeMeta.deserialize(buffer); + PipeMeta pipeMeta = PipeMeta.deserialize4Coordinator(buffer); pipeMetaList.add(pipeMeta); } } @Override - public boolean equals(Object obj) { + public boolean equals(final Object obj) { if (this == obj) { return true; } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/task/AlterPipePlanV2.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/task/AlterPipePlanV2.java index af4caffe0092d..e380218991793 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/task/AlterPipePlanV2.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/task/AlterPipePlanV2.java @@ -19,8 +19,8 @@ package org.apache.iotdb.confignode.consensus.request.write.pipe.task; -import org.apache.iotdb.commons.pipe.task.meta.PipeRuntimeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeStaticMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeRuntimeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStaticMeta; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/task/CreatePipePlanV2.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/task/CreatePipePlanV2.java index 11dd2ae7c9c90..006d0523130f9 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/task/CreatePipePlanV2.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/task/CreatePipePlanV2.java @@ -19,8 +19,8 @@ package org.apache.iotdb.confignode.consensus.request.write.pipe.task; -import org.apache.iotdb.commons.pipe.task.meta.PipeRuntimeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeStaticMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeRuntimeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStaticMeta; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/task/SetPipeStatusPlanV2.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/task/SetPipeStatusPlanV2.java index b3cdb043a338e..7f417702dd385 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/task/SetPipeStatusPlanV2.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/task/SetPipeStatusPlanV2.java @@ -19,7 +19,7 @@ package org.apache.iotdb.confignode.consensus.request.write.pipe.task; -import org.apache.iotdb.commons.pipe.task.meta.PipeStatus; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStatus; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/ainode/AINodeConfigurationResp.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/ainode/AINodeConfigurationResp.java new file mode 100644 index 0000000000000..018ed605bb31f --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/ainode/AINodeConfigurationResp.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.consensus.response.ainode; + +import org.apache.iotdb.common.rpc.thrift.TAINodeConfiguration; +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeConfigurationResp; +import org.apache.iotdb.consensus.common.DataSet; +import org.apache.iotdb.rpc.TSStatusCode; + +import java.util.Map; + +public class AINodeConfigurationResp implements DataSet { + + private TSStatus status; + private Map aiNodeConfigurationMap; + + public AINodeConfigurationResp() { + // empty constructor + } + + public void setStatus(TSStatus status) { + this.status = status; + } + + public TSStatus getStatus() { + return status; + } + + public void setAiNodeConfigurationMap(Map aiNodeConfigurationMap) { + this.aiNodeConfigurationMap = aiNodeConfigurationMap; + } + + public void convertToRpcAINodeLocationResp(TAINodeConfigurationResp resp) { + resp.setStatus(status); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + resp.setAiNodeConfigurationMap(aiNodeConfigurationMap); + } + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/ainode/AINodeRegisterResp.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/ainode/AINodeRegisterResp.java new file mode 100644 index 0000000000000..c5b9e4b02260d --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/ainode/AINodeRegisterResp.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.consensus.response.ainode; + +import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeRegisterResp; +import org.apache.iotdb.consensus.common.DataSet; +import org.apache.iotdb.rpc.TSStatusCode; + +import java.util.List; + +public class AINodeRegisterResp implements DataSet { + + private TSStatus status; + private List configNodeList; + private Integer aiNodeId; + + public AINodeRegisterResp() { + this.aiNodeId = null; + } + + public TSStatus getStatus() { + return status; + } + + public void setStatus(TSStatus status) { + this.status = status; + } + + public void setConfigNodeList(List configNodeList) { + this.configNodeList = configNodeList; + } + + public void setAINodeId(Integer aiNodeId) { + this.aiNodeId = aiNodeId; + } + + public TAINodeRegisterResp convertToAINodeRegisterResp() { + TAINodeRegisterResp resp = new TAINodeRegisterResp(); + resp.setStatus(status); + resp.setConfigNodeList(configNodeList); + + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + resp.setAiNodeId(aiNodeId); + } + + return resp; + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/model/GetModelInfoResp.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/model/GetModelInfoResp.java new file mode 100644 index 0000000000000..14101b95d1231 --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/model/GetModelInfoResp.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.consensus.response.model; + +import org.apache.iotdb.common.rpc.thrift.TAINodeConfiguration; +import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.confignode.rpc.thrift.TGetModelInfoResp; +import org.apache.iotdb.consensus.common.DataSet; + +import java.nio.ByteBuffer; + +public class GetModelInfoResp implements DataSet { + + private final TSStatus status; + private ByteBuffer serializedModelInformation; + + private int targetAINodeId; + private TEndPoint targetAINodeAddress; + + public TSStatus getStatus() { + return status; + } + + public GetModelInfoResp(TSStatus status) { + this.status = status; + } + + public void setModelInfo(ByteBuffer serializedModelInformation) { + this.serializedModelInformation = serializedModelInformation; + } + + public int getTargetAINodeId() { + return targetAINodeId; + } + + public void setTargetAINodeId(int targetAINodeId) { + this.targetAINodeId = targetAINodeId; + } + + public void setTargetAINodeAddress(TAINodeConfiguration aiNodeConfiguration) { + if (aiNodeConfiguration.getLocation() == null) { + return; + } + this.targetAINodeAddress = aiNodeConfiguration.getLocation().getInternalEndPoint(); + } + + public TGetModelInfoResp convertToThriftResponse() { + TGetModelInfoResp resp = new TGetModelInfoResp(status); + resp.setModelInfo(serializedModelInformation); + resp.setAiNodeAddress(targetAINodeAddress); + return resp; + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/model/ModelTableResp.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/model/ModelTableResp.java new file mode 100644 index 0000000000000..9a23d9ed7130b --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/model/ModelTableResp.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.consensus.response.model; + +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.model.ModelInformation; +import org.apache.iotdb.confignode.rpc.thrift.TShowModelResp; +import org.apache.iotdb.consensus.common.DataSet; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; + +public class ModelTableResp implements DataSet { + + private final TSStatus status; + private final List serializedAllModelInformation; + + public ModelTableResp(TSStatus status) { + this.status = status; + this.serializedAllModelInformation = new ArrayList<>(); + } + + public void addModelInformation(List modelInformationList) throws IOException { + for (ModelInformation modelInformation : modelInformationList) { + this.serializedAllModelInformation.add(modelInformation.serializeShowModelResult()); + } + } + + public void addModelInformation(ModelInformation modelInformation) throws IOException { + this.serializedAllModelInformation.add(modelInformation.serializeShowModelResult()); + } + + public TShowModelResp convertToThriftResponse() throws IOException { + return new TShowModelResp(status, serializedAllModelInformation); + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/pipe/plugin/PipePluginTableResp.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/pipe/plugin/PipePluginTableResp.java index 62532350c0ae0..44f6f1d779fcd 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/pipe/plugin/PipePluginTableResp.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/pipe/plugin/PipePluginTableResp.java @@ -20,7 +20,7 @@ package org.apache.iotdb.confignode.consensus.response.pipe.plugin; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.pipe.plugin.meta.PipePluginMeta; +import org.apache.iotdb.commons.pipe.agent.plugin.meta.PipePluginMeta; import org.apache.iotdb.confignode.rpc.thrift.TGetPipePluginTableResp; import org.apache.iotdb.consensus.common.DataSet; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/pipe/task/PipeTableResp.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/pipe/task/PipeTableResp.java index 4cbe230a977f5..66323d4fbab17 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/pipe/task/PipeTableResp.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/pipe/task/PipeTableResp.java @@ -22,12 +22,12 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.exception.pipe.PipeRuntimeException; -import org.apache.iotdb.commons.pipe.task.meta.PipeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeRuntimeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeStaticMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeTemporaryMeta; -import org.apache.iotdb.confignode.manager.pipe.extractor.ConfigRegionListeningFilter; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeRuntimeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStaticMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTemporaryMetaInCoordinator; +import org.apache.iotdb.confignode.manager.pipe.source.ConfigRegionListeningFilter; import org.apache.iotdb.confignode.rpc.thrift.TGetAllPipeInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TShowPipeInfo; import org.apache.iotdb.confignode.rpc.thrift.TShowPipeResp; @@ -177,7 +177,8 @@ public TShowPipeResp convertToTShowPipeResp() { staticMeta.getProcessorParameters().toString(), staticMeta.getConnectorParameters().toString(), exceptionMessageBuilder.toString()); - final PipeTemporaryMeta temporaryMeta = pipeMeta.getTemporaryMeta(); + final PipeTemporaryMetaInCoordinator temporaryMeta = + (PipeTemporaryMetaInCoordinator) pipeMeta.getTemporaryMeta(); final boolean canCalculateOnLocal = canCalculateOnLocal(pipeMeta); showPipeInfo.setRemainingEventCount( diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/subscription/SubscriptionTableResp.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/subscription/SubscriptionTableResp.java index d6679ea4c1159..e3c336cb5b85b 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/subscription/SubscriptionTableResp.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/subscription/SubscriptionTableResp.java @@ -31,6 +31,9 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.stream.Collectors; public class SubscriptionTableResp implements DataSet { private final TSStatus status; @@ -47,29 +50,30 @@ public SubscriptionTableResp( } public SubscriptionTableResp filter(String topicName) { - if (topicName == null) { - return this; - } else { - final List filteredSubscriptionMeta = new ArrayList<>(); - for (SubscriptionMeta subscriptionMeta : allSubscriptionMeta) { - if (subscriptionMeta.getTopicName().equals(topicName)) { - filteredSubscriptionMeta.add(subscriptionMeta); - break; - } - } - return new SubscriptionTableResp(status, filteredSubscriptionMeta, allConsumerGroupMeta); - } + return new SubscriptionTableResp( + status, + allSubscriptionMeta.stream() + .filter( + subscriptionMeta -> + (Objects.isNull(topicName) + || Objects.equals( + subscriptionMeta.getTopicMeta().getTopicName(), topicName))) + .collect(Collectors.toList()), + allConsumerGroupMeta); } public TShowSubscriptionResp convertToTShowSubscriptionResp() { final List showSubscriptionInfoList = new ArrayList<>(); for (SubscriptionMeta subscriptionMeta : allSubscriptionMeta) { - showSubscriptionInfoList.add( + TShowSubscriptionInfo showSubscriptionInfo = new TShowSubscriptionInfo( - subscriptionMeta.getTopicName(), + subscriptionMeta.getTopicMeta().getTopicName(), subscriptionMeta.getConsumerGroupId(), - subscriptionMeta.getConsumerIds())); + subscriptionMeta.getConsumerIds()); + Optional creationTime = subscriptionMeta.getCreationTime(); + creationTime.ifPresent(showSubscriptionInfo::setCreationTime); + showSubscriptionInfoList.add(showSubscriptionInfo); } return new TShowSubscriptionResp(status).setSubscriptionInfoList(showSubscriptionInfoList); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/statemachine/ConfigRegionStateMachine.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/statemachine/ConfigRegionStateMachine.java index b0d48cb6544e6..bf18b76371869 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/statemachine/ConfigRegionStateMachine.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/statemachine/ConfigRegionStateMachine.java @@ -31,6 +31,7 @@ import org.apache.iotdb.confignode.conf.ConfigNodeConfig; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; import org.apache.iotdb.confignode.exception.physical.UnknownPhysicalPlanTypeException; import org.apache.iotdb.confignode.manager.ConfigManager; import org.apache.iotdb.confignode.manager.consensus.ConsensusManager; @@ -124,7 +125,7 @@ protected TSStatus write(ConfigPhysicalPlan plan) { try { result = executor.executeNonQueryPlan(plan); } catch (UnknownPhysicalPlanTypeException e) { - LOGGER.error(e.getMessage()); + LOGGER.error("Execute non-query plan failed", e); result = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode()); } @@ -166,17 +167,10 @@ public IConsensusRequest deserializeRequest(IConsensusRequest request) { } @Override - public DataSet read(IConsensusRequest request) { - ConfigPhysicalPlan plan; - if (request instanceof ByteBufferConsensusRequest) { - try { - plan = ConfigPhysicalPlan.Factory.create(request.serializeToByteBuffer()); - } catch (Exception e) { - LOGGER.error("Deserialization error for write plan : {}", request); - return null; - } - } else if (request instanceof ConfigPhysicalPlan) { - plan = (ConfigPhysicalPlan) request; + public DataSet read(final IConsensusRequest request) { + final ConfigPhysicalReadPlan plan; + if (request instanceof ConfigPhysicalReadPlan) { + plan = (ConfigPhysicalReadPlan) request; } else { LOGGER.error("Unexpected read plan : {}", request); return null; @@ -184,13 +178,13 @@ public DataSet read(IConsensusRequest request) { return read(plan); } - /** Transmit {@link ConfigPhysicalPlan} to {@link ConfigPlanExecutor} */ - protected DataSet read(ConfigPhysicalPlan plan) { + /** Transmit {@link ConfigPhysicalReadPlan} to {@link ConfigPlanExecutor} */ + protected DataSet read(final ConfigPhysicalReadPlan plan) { DataSet result; try { result = executor.executeQueryPlan(plan); - } catch (UnknownPhysicalPlanTypeException | AuthException e) { - LOGGER.error(e.getMessage()); + } catch (final UnknownPhysicalPlanTypeException | AuthException e) { + LOGGER.error("Execute query plan failed", e); result = null; } return result; @@ -240,40 +234,48 @@ public void notifyLeaderChanged(ConsensusGroupId groupId, int newLeaderId) { int currentNodeId = ConfigNodeDescriptor.getInstance().getConf().getConfigNodeId(); if (currentNodeId != newLeaderId) { LOGGER.info( - "Current node [nodeId:{}, ip:port: {}] is not longer the leader, " + "Current node [nodeId:{}, ip:port: {}] is no longer the leader, " + "the new leader is [nodeId:{}]", currentNodeId, currentNodeTEndPoint, newLeaderId); + } + } - // Stop leader scheduling services - configManager.getPipeManager().getPipeRuntimeCoordinator().stopPipeMetaSync(); - configManager.getPipeManager().getPipeRuntimeCoordinator().stopPipeHeartbeat(); - configManager - .getSubscriptionManager() - .getSubscriptionCoordinator() - .stopSubscriptionMetaSync(); - configManager.getLoadManager().stopLoadServices(); - configManager.getProcedureManager().stopExecutor(); - configManager.getRetryFailedTasksThread().stopRetryFailedTasksService(); - configManager.getPartitionManager().stopRegionCleaner(); - configManager.getCQManager().stopCQScheduler(); - configManager.getClusterSchemaManager().clearSchemaQuotaCache(); - // Remove Metric after leader change - configManager.removeMetrics(); - - // Shutdown leader related service for config pipe - PipeConfigNodeAgent.runtime().notifyLeaderUnavailable(); - - // Clean receiver file dir - PipeConfigNodeAgent.receiver().cleanPipeReceiverDir(); + @Override + public void notifyNotLeader() { + // We get currentNodeId here because the currentNodeId + // couldn't initialize earlier than the ConfigRegionStateMachine + int currentNodeId = ConfigNodeDescriptor.getInstance().getConf().getConfigNodeId(); + LOGGER.info( + "Current node [nodeId:{}, ip:port: {}] is no longer the leader, " + + "start cleaning up related services", + currentNodeId, + currentNodeTEndPoint); + // Stop leader scheduling services + configManager.getPipeManager().getPipeRuntimeCoordinator().stopPipeMetaSync(); + configManager.getPipeManager().getPipeRuntimeCoordinator().stopPipeHeartbeat(); + configManager.getSubscriptionManager().getSubscriptionCoordinator().stopSubscriptionMetaSync(); + configManager.getLoadManager().stopLoadServices(); + configManager.getProcedureManager().stopExecutor(); + configManager.getRetryFailedTasksThread().stopRetryFailedTasksService(); + configManager.getPartitionManager().stopRegionCleaner(); + configManager.getCQManager().stopCQScheduler(); + configManager.getClusterSchemaManager().clearSchemaQuotaCache(); + // Remove Metric after leader change + configManager.removeMetrics(); - LOGGER.info( - "Current node [nodeId:{}, ip:port: {}] is not longer the leader, " - + "all services on old leader are unavailable now.", - currentNodeId, - currentNodeTEndPoint); - } + // Shutdown leader related service for config pipe + PipeConfigNodeAgent.runtime().notifyLeaderUnavailable(); + + // Clean receiver file dir + PipeConfigNodeAgent.receiver().cleanPipeReceiverDir(); + + LOGGER.info( + "Current node [nodeId:{}, ip:port: {}] is no longer the leader, " + + "all services on old leader are unavailable now.", + currentNodeId, + currentNodeTEndPoint); } @Override @@ -430,7 +432,7 @@ private void initStandAloneConfigNode() { PipeConfigNodeAgent.runtime().listener().tryListenToPlan(nextPlan, false); } } catch (UnknownPhysicalPlanTypeException e) { - LOGGER.error(e.getMessage()); + LOGGER.error("Try listen to plan failed", e); } } logReader.close(); @@ -482,6 +484,7 @@ private void createLogFile(int startIndex) { } static class FileComparator implements Comparator { + @Override public int compare(String filename1, String filename2) { long id1 = parseEndIndex(filename1); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterManager.java index 8dba6addffd50..e7d0dea48f804 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterManager.java @@ -33,8 +33,8 @@ import org.apache.iotdb.commons.client.request.AsyncRequestContext; import org.apache.iotdb.commons.client.request.TestConnectionUtils; import org.apache.iotdb.confignode.client.CnToCnNodeRequestType; -import org.apache.iotdb.confignode.client.CnToDnRequestType; import org.apache.iotdb.confignode.client.async.CnToCnInternalServiceAsyncRequestManager; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.ConfigNodeAsyncRequestContext; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; @@ -155,7 +155,7 @@ public TTestConnectionResp submitTestConnectionTaskToEveryNode() { .collect(Collectors.toMap(TDataNodeLocation::getDataNodeId, location -> location)); DataNodeAsyncRequestContext dataNodeAsyncRequestContext = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.SUBMIT_TEST_CONNECTION_TASK, nodeLocations, dataNodeLocationMap); + CnToDnAsyncRequestType.SUBMIT_TEST_CONNECTION_TASK, nodeLocations, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance() .sendAsyncRequest(dataNodeAsyncRequestContext); Map anotherDataNodeLocationMap = @@ -226,8 +226,9 @@ private List testAllDataNodeConnection( TDataNodeLocation::getDataNodeId, TDataNodeLocation::getInternalEndPoint, TServiceType.DataNodeInternalService, - CnToDnRequestType.TEST_CONNECTION, - (AsyncRequestContext handler) -> + CnToDnAsyncRequestType.TEST_CONNECTION, + (AsyncRequestContext + handler) -> CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequest(handler)); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterQuotaManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterQuotaManager.java index b0150b349d80e..cbfa1bc2fb2e5 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterQuotaManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterQuotaManager.java @@ -26,7 +26,7 @@ import org.apache.iotdb.common.rpc.thrift.TSpaceQuota; import org.apache.iotdb.common.rpc.thrift.TThrottleQuota; import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; import org.apache.iotdb.confignode.consensus.request.write.quota.SetSpaceQuotaPlan; @@ -89,7 +89,7 @@ public TSStatus setSpaceQuota(TSetSpaceQuotaReq req) { configManager.getNodeManager().getRegisteredDataNodeLocations(); DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.SET_SPACE_QUOTA, req, dataNodeLocationMap); + CnToDnAsyncRequestType.SET_SPACE_QUOTA, req, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance() .sendAsyncRequestWithRetry(clientHandler); return RpcUtils.squashResponseStatusList(clientHandler.getResponseList()); @@ -196,7 +196,7 @@ public TSStatus setThrottleQuota(TSetThrottleQuotaReq req) { configManager.getNodeManager().getRegisteredDataNodeLocations(); DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.SET_THROTTLE_QUOTA, req, dataNodeLocationMap); + CnToDnAsyncRequestType.SET_THROTTLE_QUOTA, req, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance() .sendAsyncRequestWithRetry(clientHandler); return RpcUtils.squashResponseStatusList(clientHandler.getResponseList()); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java index 59dacc5ad5ca9..13e734d0bc72b 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java @@ -19,11 +19,14 @@ package org.apache.iotdb.confignode.manager; +import org.apache.iotdb.common.rpc.thrift.TAINodeConfiguration; +import org.apache.iotdb.common.rpc.thrift.TAINodeLocation; import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; import org.apache.iotdb.common.rpc.thrift.TDataNodeConfiguration; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TFlushReq; +import org.apache.iotdb.common.rpc.thrift.TPipeHeartbeatResp; import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.common.rpc.thrift.TSchemaNode; @@ -40,12 +43,13 @@ import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.conf.ConfigurationFileUtils; import org.apache.iotdb.commons.conf.IoTDBConstant; +import org.apache.iotdb.commons.conf.TrimProperties; import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.exception.MetadataException; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PathPatternTree; import org.apache.iotdb.commons.path.PathPatternUtil; -import org.apache.iotdb.commons.pipe.connector.payload.airgap.AirGapPseudoTPipeTransferRequest; +import org.apache.iotdb.commons.pipe.sink.payload.airgap.AirGapPseudoTPipeTransferRequest; import org.apache.iotdb.commons.schema.SchemaConstant; import org.apache.iotdb.commons.schema.ttl.TTLCache; import org.apache.iotdb.commons.service.metric.MetricService; @@ -56,7 +60,8 @@ import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; import org.apache.iotdb.confignode.conf.SystemPropertiesUtils; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; -import org.apache.iotdb.confignode.consensus.request.auth.AuthorPlan; +import org.apache.iotdb.confignode.consensus.request.read.ainode.GetAINodeConfigurationPlan; +import org.apache.iotdb.confignode.consensus.request.read.auth.AuthorReadPlan; import org.apache.iotdb.confignode.consensus.request.read.database.CountDatabasePlan; import org.apache.iotdb.confignode.consensus.request.read.database.GetDatabasePlan; import org.apache.iotdb.confignode.consensus.request.read.datanode.GetDataNodeConfigurationPlan; @@ -67,6 +72,8 @@ import org.apache.iotdb.confignode.consensus.request.read.partition.GetSchemaPartitionPlan; import org.apache.iotdb.confignode.consensus.request.read.region.GetRegionInfoListPlan; import org.apache.iotdb.confignode.consensus.request.read.ttl.ShowTTLPlan; +import org.apache.iotdb.confignode.consensus.request.write.ainode.RemoveAINodePlan; +import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan; import org.apache.iotdb.confignode.consensus.request.write.confignode.RemoveConfigNodePlan; import org.apache.iotdb.confignode.consensus.request.write.database.DatabaseSchemaPlan; import org.apache.iotdb.confignode.consensus.request.write.database.SetDataReplicationFactorPlan; @@ -75,6 +82,7 @@ import org.apache.iotdb.confignode.consensus.request.write.database.SetTimePartitionIntervalPlan; import org.apache.iotdb.confignode.consensus.request.write.datanode.RemoveDataNodePlan; import org.apache.iotdb.confignode.consensus.request.write.template.CreateSchemaTemplatePlan; +import org.apache.iotdb.confignode.consensus.response.ainode.AINodeRegisterResp; import org.apache.iotdb.confignode.consensus.response.auth.PermissionInfoResp; import org.apache.iotdb.confignode.consensus.response.database.CountDatabaseResp; import org.apache.iotdb.confignode.consensus.response.database.DatabaseSchemaResp; @@ -105,6 +113,7 @@ import org.apache.iotdb.confignode.manager.subscription.SubscriptionManager; import org.apache.iotdb.confignode.persistence.AuthorInfo; import org.apache.iotdb.confignode.persistence.ClusterInfo; +import org.apache.iotdb.confignode.persistence.ModelInfo; import org.apache.iotdb.confignode.persistence.ProcedureInfo; import org.apache.iotdb.confignode.persistence.TTLInfo; import org.apache.iotdb.confignode.persistence.TriggerInfo; @@ -118,6 +127,9 @@ import org.apache.iotdb.confignode.persistence.schema.ClusterSchemaInfo; import org.apache.iotdb.confignode.persistence.subscription.SubscriptionInfo; import org.apache.iotdb.confignode.procedure.impl.schema.SchemaUtils; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeRegisterReq; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeRestartReq; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeRestartResp; import org.apache.iotdb.confignode.rpc.thrift.TAlterLogicalViewReq; import org.apache.iotdb.confignode.rpc.thrift.TAlterPipeReq; import org.apache.iotdb.confignode.rpc.thrift.TAlterSchemaTemplateReq; @@ -131,6 +143,7 @@ import org.apache.iotdb.confignode.rpc.thrift.TCreateCQReq; import org.apache.iotdb.confignode.rpc.thrift.TCreateConsumerReq; import org.apache.iotdb.confignode.rpc.thrift.TCreateFunctionReq; +import org.apache.iotdb.confignode.rpc.thrift.TCreateModelReq; import org.apache.iotdb.confignode.rpc.thrift.TCreatePipePluginReq; import org.apache.iotdb.confignode.rpc.thrift.TCreatePipeReq; import org.apache.iotdb.confignode.rpc.thrift.TCreateSchemaTemplateReq; @@ -146,10 +159,13 @@ import org.apache.iotdb.confignode.rpc.thrift.TDeleteLogicalViewReq; import org.apache.iotdb.confignode.rpc.thrift.TDeleteTimeSeriesReq; import org.apache.iotdb.confignode.rpc.thrift.TDropCQReq; +import org.apache.iotdb.confignode.rpc.thrift.TDropModelReq; import org.apache.iotdb.confignode.rpc.thrift.TDropPipePluginReq; import org.apache.iotdb.confignode.rpc.thrift.TDropPipeReq; +import org.apache.iotdb.confignode.rpc.thrift.TDropSubscriptionReq; import org.apache.iotdb.confignode.rpc.thrift.TDropTopicReq; import org.apache.iotdb.confignode.rpc.thrift.TDropTriggerReq; +import org.apache.iotdb.confignode.rpc.thrift.TExtendRegionReq; import org.apache.iotdb.confignode.rpc.thrift.TGetAllPipeInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TGetAllSubscriptionInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TGetAllTemplatesResp; @@ -159,6 +175,8 @@ import org.apache.iotdb.confignode.rpc.thrift.TGetJarInListReq; import org.apache.iotdb.confignode.rpc.thrift.TGetJarInListResp; import org.apache.iotdb.confignode.rpc.thrift.TGetLocationForTriggerResp; +import org.apache.iotdb.confignode.rpc.thrift.TGetModelInfoReq; +import org.apache.iotdb.confignode.rpc.thrift.TGetModelInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TGetPathsSetTemplatesReq; import org.apache.iotdb.confignode.rpc.thrift.TGetPathsSetTemplatesResp; import org.apache.iotdb.confignode.rpc.thrift.TGetPipePluginTableResp; @@ -176,16 +194,21 @@ import org.apache.iotdb.confignode.rpc.thrift.TPermissionInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TPipeConfigTransferReq; import org.apache.iotdb.confignode.rpc.thrift.TPipeConfigTransferResp; +import org.apache.iotdb.confignode.rpc.thrift.TReconstructRegionReq; import org.apache.iotdb.confignode.rpc.thrift.TRegionRouteMapResp; +import org.apache.iotdb.confignode.rpc.thrift.TRemoveRegionReq; import org.apache.iotdb.confignode.rpc.thrift.TSchemaNodeManagementResp; import org.apache.iotdb.confignode.rpc.thrift.TSchemaPartitionTableResp; import org.apache.iotdb.confignode.rpc.thrift.TSetDataNodeStatusReq; import org.apache.iotdb.confignode.rpc.thrift.TSetSchemaTemplateReq; +import org.apache.iotdb.confignode.rpc.thrift.TShowAINodesResp; import org.apache.iotdb.confignode.rpc.thrift.TShowCQResp; import org.apache.iotdb.confignode.rpc.thrift.TShowClusterResp; import org.apache.iotdb.confignode.rpc.thrift.TShowConfigNodesResp; import org.apache.iotdb.confignode.rpc.thrift.TShowDataNodesResp; import org.apache.iotdb.confignode.rpc.thrift.TShowDatabaseResp; +import org.apache.iotdb.confignode.rpc.thrift.TShowModelReq; +import org.apache.iotdb.confignode.rpc.thrift.TShowModelResp; import org.apache.iotdb.confignode.rpc.thrift.TShowPipeReq; import org.apache.iotdb.confignode.rpc.thrift.TShowPipeResp; import org.apache.iotdb.confignode.rpc.thrift.TShowSubscriptionReq; @@ -227,7 +250,6 @@ import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Properties; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -277,6 +299,9 @@ public class ConfigManager implements IManager { /** CQ. */ private final CQManager cqManager; + /** AI Model. */ + private final ModelManager modelManager; + /** Pipe */ private final PipeManager pipeManager; @@ -306,6 +331,7 @@ public ConfigManager() throws IOException { UDFInfo udfInfo = new UDFInfo(); TriggerInfo triggerInfo = new TriggerInfo(); CQInfo cqInfo = new CQInfo(); + ModelInfo modelInfo = new ModelInfo(); PipeInfo pipeInfo = new PipeInfo(); QuotaInfo quotaInfo = new QuotaInfo(); TTLInfo ttlInfo = new TTLInfo(); @@ -323,6 +349,7 @@ public ConfigManager() throws IOException { udfInfo, triggerInfo, cqInfo, + modelInfo, pipeInfo, subscriptionInfo, quotaInfo, @@ -344,6 +371,7 @@ public ConfigManager() throws IOException { this.udfManager = new UDFManager(this, udfInfo); this.triggerManager = new TriggerManager(this, triggerInfo); this.cqManager = new CQManager(this); + this.modelManager = new ModelManager(this, modelInfo); this.pipeManager = new PipeManager(this, pipeInfo); this.subscriptionManager = new SubscriptionManager(this, subscriptionInfo); @@ -401,7 +429,7 @@ public DataSet getSystemConfiguration() { } @Override - public DataSet registerDataNode(TDataNodeRegisterReq req) { + public synchronized DataSet registerDataNode(TDataNodeRegisterReq req) { TSStatus status = confirmLeader(); if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { status = ClusterNodeStartUtils.confirmDataNodeRegistration(req, this); @@ -423,6 +451,7 @@ public TDataNodeRestartResp restartDataNode(TDataNodeRestartReq req) { ClusterNodeStartUtils.confirmNodeRestart( NodeType.DataNode, req.getClusterName(), + req.getClusterId(), req.getDataNodeConfiguration().getLocation().getDataNodeId(), req.getDataNodeConfiguration().getLocation(), this); @@ -447,6 +476,37 @@ public DataSet removeDataNode(RemoveDataNodePlan removeDataNodePlan) { } } + @Override + public TAINodeRestartResp restartAINode(TAINodeRestartReq req) { + TSStatus status = confirmLeader(); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + status = + ClusterNodeStartUtils.confirmNodeRestart( + NodeType.AINode, + req.getClusterName(), + req.getClusterId(), + req.getAiNodeConfiguration().getLocation().getAiNodeId(), + req.getAiNodeConfiguration().getLocation(), + this); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return nodeManager.updateAINodeIfNecessary(req); + } + } + return new TAINodeRestartResp() + .setStatus(status) + .setConfigNodeList(getNodeManager().getRegisteredConfigNodes()); + } + + @Override + public TSStatus removeAINode(RemoveAINodePlan removeAINodePlan) { + TSStatus status = confirmLeader(); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return nodeManager.removeAINode(removeAINodePlan); + } else { + return status; + } + } + @Override public TSStatus reportDataNodeShutdown(TDataNodeLocation dataNodeLocation) { TSStatus status = confirmLeader(); @@ -458,7 +518,7 @@ public TSStatus reportDataNodeShutdown(TDataNodeLocation dataNodeLocation) { dataNodeLocation.getDataNodeId(), new NodeHeartbeatSample(NodeStatus.Unknown)); LOGGER.info( - "[ShutdownHook] The DataNode-{} will be shutdown soon, mark it as Unknown", + "The DataNode-{} will be shutdown soon, mark it as Unknown", dataNodeLocation.getDataNodeId()); } return status; @@ -477,6 +537,11 @@ public DataSet getDataNodeConfiguration( } } + @Override + public DataSet getAINodeConfiguration(GetAINodeConfigurationPlan getAINodeConfigurationPlan) { + return nodeManager.getAINodeConfiguration(getAINodeConfigurationPlan); + } + @Override public TShowClusterResp showCluster() { TSStatus status = confirmLeader(); @@ -499,10 +564,22 @@ public TShowClusterResp showCluster() { nodeStatus.putIfAbsent( dataNodeLocation.getDataNodeId(), NodeStatus.Unknown.toString())); + List aiNodeLocations = + getNodeManager().getRegisteredAINodes().stream() + .map(TAINodeConfiguration::getLocation) + .sorted(Comparator.comparingInt(TAINodeLocation::getAiNodeId)) + .collect(Collectors.toList()); + Map nodeStatusMap = getLoadManager().getNodeStatusWithReason(); + aiNodeLocations.forEach( + aiNodeLocation -> + nodeStatusMap.putIfAbsent( + aiNodeLocation.getAiNodeId(), NodeStatus.Unknown.toString())); + return new TShowClusterResp() .setStatus(status) .setConfigNodeList(configNodeLocations) .setDataNodeList(dataNodeLocations) + .setAiNodeList(aiNodeLocations) .setNodeStatus(nodeStatus) .setNodeVersionInfo(nodeVersionInfo); } else { @@ -510,6 +587,7 @@ public TShowClusterResp showCluster() { .setStatus(status) .setConfigNodeList(Collections.emptyList()) .setDataNodeList(Collections.emptyList()) + .setAiNodeList(Collections.emptyList()) .setNodeStatus(Collections.emptyMap()) .setNodeVersionInfo(Collections.emptyMap()); } @@ -1031,6 +1109,11 @@ private void printNewCreatedDataPartition( } protected TSStatus confirmLeader() { + if (NodeStatus.Removing == CommonDescriptor.getInstance().getConfig().getNodeStatus()) { + TSStatus status = new TSStatus(TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()); + status.setMessage("ConfigNode is Removing"); + return status; + } // Make sure the consensus layer has been initialized if (getConsensusManager() == null) { return new TSStatus(TSStatusCode.CONSENSUS_NOT_INITIALIZED.getStatusCode()) @@ -1081,6 +1164,11 @@ public TriggerManager getTriggerManager() { return triggerManager; } + @Override + public ModelManager getModelManager() { + return modelManager; + } + @Override public PipeManager getPipeManager() { return pipeManager; @@ -1097,8 +1185,8 @@ public SubscriptionManager getSubscriptionManager() { } @Override - public TSStatus operatePermission(AuthorPlan authorPlan) { - TSStatus status = confirmLeader(); + public TSStatus operatePermission(final AuthorPlan authorPlan) { + final TSStatus status = confirmLeader(); if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { return permissionManager.operatePermission(authorPlan, false); } else { @@ -1107,12 +1195,12 @@ public TSStatus operatePermission(AuthorPlan authorPlan) { } @Override - public DataSet queryPermission(AuthorPlan authorPlan) { - TSStatus status = confirmLeader(); + public DataSet queryPermission(final AuthorReadPlan authorPlan) { + final TSStatus status = confirmLeader(); if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { return permissionManager.queryPermission(authorPlan); } else { - PermissionInfoResp dataSet = new PermissionInfoResp(); + final PermissionInfoResp dataSet = new PermissionInfoResp(); dataSet.setStatus(status); return dataSet; } @@ -1301,7 +1389,7 @@ public TSStatus checkConfigNodeGlobalConfig(TConfigNodeRegisterReq req) { @Override public TSStatus createPeerForConsensusGroup(List configNodeLocations) { - final long rpcTimeoutInMS = COMMON_CONF.getConnectionTimeoutInMS(); + final long rpcTimeoutInMS = COMMON_CONF.getCnConnectionTimeoutInMS(); final long retryIntervalInMS = 1000; for (int i = 0; i < rpcTimeoutInMS / retryIntervalInMS; i++) { @@ -1349,7 +1437,7 @@ public TSStatus reportConfigNodeShutdown(TConfigNodeLocation configNodeLocation) configNodeLocation.getConfigNodeId(), new NodeHeartbeatSample(NodeStatus.Unknown)); LOGGER.info( - "[ShutdownHook] The ConfigNode-{} will be shutdown soon, mark it as Unknown", + "The ConfigNode-{} will be shutdown soon, mark it as Unknown", configNodeLocation.getConfigNodeId()); } return status; @@ -1483,6 +1571,15 @@ public TSStatus flush(TFlushReq req) { : status; } + @Override + public TSStatus flushOnSpecificDN( + final TFlushReq req, final Map dataNodeLocationMap) { + final TSStatus status = confirmLeader(); + return status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode() + ? RpcUtils.squashResponseStatusList(nodeManager.flushOnSpecificDN(req, dataNodeLocationMap)) + : status; + } + @Override public TSStatus clearCache() { TSStatus status = confirmLeader(); @@ -1495,28 +1592,45 @@ public TSStatus clearCache() { public TSStatus setConfiguration(TSetConfigurationReq req) { TSStatus tsStatus = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); int currentNodeId = CONF.getConfigNodeId(); - if (req.getNodeId() < 0 || currentNodeId == req.getNodeId()) { - URL url = ConfigNodeDescriptor.getPropsUrl(CommonConfig.SYSTEM_CONFIG_NAME); - if (url == null || !new File(url.getFile()).exists()) { + if (currentNodeId != req.getNodeId()) { + tsStatus = confirmLeader(); + if (tsStatus.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { return tsStatus; } - File file = new File(url.getFile()); - Properties properties = new Properties(); + } + if (currentNodeId == req.getNodeId() || req.getNodeId() < 0) { + URL url = ConfigNodeDescriptor.getPropsUrl(CommonConfig.SYSTEM_CONFIG_NAME); + boolean configurationFileFound = (url != null && new File(url.getFile()).exists()); + TrimProperties properties = new TrimProperties(); properties.putAll(req.getConfigs()); - try { - ConfigurationFileUtils.updateConfigurationFile(file, properties); - } catch (Exception e) { - return RpcUtils.getStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR, e.getMessage()); + + if (configurationFileFound) { + File file = new File(url.getFile()); + try { + ConfigurationFileUtils.updateConfiguration( + file, + properties, + mergedProps -> { + ConfigNodeDescriptor.getInstance().loadHotModifiedProps(mergedProps); + }); + } catch (Exception e) { + tsStatus = RpcUtils.getStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR, e.getMessage()); + } + } else { + String msg = + "Unable to find the configuration file. Some modifications are made only in memory."; + tsStatus = RpcUtils.getStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR, msg); + LOGGER.warn(msg); } - ConfigNodeDescriptor.getInstance().loadHotModifiedProps(properties); - if (CONF.getConfigNodeId() == req.getNodeId()) { + if (currentNodeId == req.getNodeId()) { return tsStatus; } } - tsStatus = confirmLeader(); - return tsStatus.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode() - ? RpcUtils.squashResponseStatusList(nodeManager.setConfiguration(req)) - : tsStatus; + List statusListOfOtherNodes = nodeManager.setConfiguration(req); + List statusList = new ArrayList<>(statusListOfOtherNodes.size() + 1); + statusList.add(tsStatus); + statusList.addAll(statusListOfOtherNodes); + return RpcUtils.squashResponseStatusList(statusList); } @Override @@ -1633,9 +1747,10 @@ public TRegionRouteMapResp getLatestRegionRouteMap() { } catch (InterruptedException e) { Thread.currentThread().interrupt(); LOGGER.warn("Unexpected interruption during retry getting latest region route map"); + resp.getStatus().setCode(TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()); + return resp; } } - resp.setTimestamp(System.currentTimeMillis()); resp.setRegionRouteMap(getLoadManager().getRegionPriorityMap()); } @@ -1659,6 +1774,18 @@ public RegionInfoListResp showRegion(GetRegionInfoListPlan getRegionInfoListPlan } } + @Override + public TShowAINodesResp showAINodes() { + TSStatus status = confirmLeader(); + TShowAINodesResp resp = new TShowAINodesResp(); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return resp.setAiNodesInfoList(nodeManager.getRegisteredAINodeInfoList()) + .setStatus(StatusUtils.OK); + } else { + return resp.setStatus(status); + } + } + @Override public TShowDataNodesResp showDataNodes() { TSStatus status = confirmLeader(); @@ -1987,8 +2114,8 @@ public TSStatus deleteLogicalView(TDeleteLogicalViewReq req) { } @Override - public TSStatus alterLogicalView(TAlterLogicalViewReq req) { - TSStatus status = confirmLeader(); + public TSStatus alterLogicalView(final TAlterLogicalViewReq req) { + final TSStatus status = confirmLeader(); if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { return procedureManager.alterLogicalView(req); } else { @@ -2116,6 +2243,14 @@ public TSStatus dropSubscription(TUnsubscribeReq req) { : status; } + @Override + public TSStatus dropSubscriptionById(TDropSubscriptionReq req) { + TSStatus status = confirmLeader(); + return status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode() + ? subscriptionManager.getSubscriptionCoordinator().dropSubscription(req) + : status; + } + @Override public TShowSubscriptionResp showSubscription(TShowSubscriptionReq req) { TSStatus status = confirmLeader(); @@ -2201,6 +2336,30 @@ public TSStatus migrateRegion(TMigrateRegionReq req) { : status; } + @Override + public TSStatus reconstructRegion(TReconstructRegionReq req) { + TSStatus status = confirmLeader(); + return status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode() + ? procedureManager.reconstructRegion(req) + : status; + } + + @Override + public TSStatus extendRegion(TExtendRegionReq req) { + TSStatus status = confirmLeader(); + return status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode() + ? procedureManager.extendRegion(req) + : status; + } + + @Override + public TSStatus removeRegion(TRemoveRegionReq req) { + TSStatus status = confirmLeader(); + return status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode() + ? procedureManager.removeRegion(req) + : status; + } + @Override public TSStatus createCQ(TCreateCQReq req) { TSStatus status = confirmLeader(); @@ -2325,6 +2484,42 @@ public TSStatus transfer(List newUnknownDataList) { return transferResult; } + @Override + public TSStatus createModel(TCreateModelReq req) { + TSStatus status = confirmLeader(); + if (nodeManager.getRegisteredAINodes().isEmpty()) { + return new TSStatus(TSStatusCode.NO_REGISTERED_AI_NODE_ERROR.getStatusCode()) + .setMessage("There is no available AINode! Try to start one."); + } + return status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode() + ? modelManager.createModel(req) + : status; + } + + @Override + public TSStatus dropModel(TDropModelReq req) { + TSStatus status = confirmLeader(); + return status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode() + ? modelManager.dropModel(req) + : status; + } + + @Override + public TShowModelResp showModel(TShowModelReq req) { + TSStatus status = confirmLeader(); + return status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode() + ? modelManager.showModel(req) + : new TShowModelResp(status, Collections.emptyList()); + } + + @Override + public TGetModelInfoResp getModelInfo(TGetModelInfoReq req) { + TSStatus status = confirmLeader(); + return status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode() + ? modelManager.getModelInfo(req) + : new TGetModelInfoResp(status); + } + @Override public TSStatus setSpaceQuota(TSetSpaceQuotaReq req) { TSStatus status = confirmLeader(); @@ -2367,4 +2562,36 @@ public TThrottleQuotaResp getThrottleQuota() { ? clusterQuotaManager.getThrottleQuota() : new TThrottleQuotaResp(status); } + + @Override + public TSStatus pushHeartbeat(final int dataNodeId, final TPipeHeartbeatResp resp) { + final TSStatus status = confirmLeader(); + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; + } + pipeManager + .getPipeRuntimeCoordinator() + .parseHeartbeat( + dataNodeId, + resp.getPipeMetaList(), + resp.getPipeCompletedList(), + resp.getPipeRemainingEventCountList(), + resp.getPipeRemainingTimeList()); + return StatusUtils.OK; + } + + @Override + public DataSet registerAINode(TAINodeRegisterReq req) { + TSStatus status = confirmLeader(); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + status = ClusterNodeStartUtils.confirmAINodeRegistration(req, this); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return nodeManager.registerAINode(req); + } + } + AINodeRegisterResp resp = new AINodeRegisterResp(); + resp.setStatus(status); + resp.setConfigNodeList(getNodeManager().getRegisteredConfigNodes()); + return resp; + } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/IManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/IManager.java index 366be81c9a342..88731538be29d 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/IManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/IManager.java @@ -22,6 +22,7 @@ import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TFlushReq; +import org.apache.iotdb.common.rpc.thrift.TPipeHeartbeatResp; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.common.rpc.thrift.TSetConfigurationReq; import org.apache.iotdb.common.rpc.thrift.TSetSpaceQuotaReq; @@ -29,7 +30,8 @@ import org.apache.iotdb.commons.cluster.NodeStatus; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PathPatternTree; -import org.apache.iotdb.confignode.consensus.request.auth.AuthorPlan; +import org.apache.iotdb.confignode.consensus.request.read.ainode.GetAINodeConfigurationPlan; +import org.apache.iotdb.confignode.consensus.request.read.auth.AuthorReadPlan; import org.apache.iotdb.confignode.consensus.request.read.database.CountDatabasePlan; import org.apache.iotdb.confignode.consensus.request.read.database.GetDatabasePlan; import org.apache.iotdb.confignode.consensus.request.read.datanode.GetDataNodeConfigurationPlan; @@ -37,6 +39,8 @@ import org.apache.iotdb.confignode.consensus.request.read.partition.GetOrCreateDataPartitionPlan; import org.apache.iotdb.confignode.consensus.request.read.region.GetRegionInfoListPlan; import org.apache.iotdb.confignode.consensus.request.read.ttl.ShowTTLPlan; +import org.apache.iotdb.confignode.consensus.request.write.ainode.RemoveAINodePlan; +import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan; import org.apache.iotdb.confignode.consensus.request.write.confignode.RemoveConfigNodePlan; import org.apache.iotdb.confignode.consensus.request.write.database.DatabaseSchemaPlan; import org.apache.iotdb.confignode.consensus.request.write.database.SetDataReplicationFactorPlan; @@ -52,6 +56,9 @@ import org.apache.iotdb.confignode.manager.pipe.coordinator.PipeManager; import org.apache.iotdb.confignode.manager.schema.ClusterSchemaManager; import org.apache.iotdb.confignode.manager.subscription.SubscriptionManager; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeRegisterReq; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeRestartReq; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeRestartResp; import org.apache.iotdb.confignode.rpc.thrift.TAlterLogicalViewReq; import org.apache.iotdb.confignode.rpc.thrift.TAlterPipeReq; import org.apache.iotdb.confignode.rpc.thrift.TAlterSchemaTemplateReq; @@ -63,6 +70,7 @@ import org.apache.iotdb.confignode.rpc.thrift.TCreateCQReq; import org.apache.iotdb.confignode.rpc.thrift.TCreateConsumerReq; import org.apache.iotdb.confignode.rpc.thrift.TCreateFunctionReq; +import org.apache.iotdb.confignode.rpc.thrift.TCreateModelReq; import org.apache.iotdb.confignode.rpc.thrift.TCreatePipePluginReq; import org.apache.iotdb.confignode.rpc.thrift.TCreatePipeReq; import org.apache.iotdb.confignode.rpc.thrift.TCreateSchemaTemplateReq; @@ -77,10 +85,13 @@ import org.apache.iotdb.confignode.rpc.thrift.TDeleteLogicalViewReq; import org.apache.iotdb.confignode.rpc.thrift.TDeleteTimeSeriesReq; import org.apache.iotdb.confignode.rpc.thrift.TDropCQReq; +import org.apache.iotdb.confignode.rpc.thrift.TDropModelReq; import org.apache.iotdb.confignode.rpc.thrift.TDropPipePluginReq; import org.apache.iotdb.confignode.rpc.thrift.TDropPipeReq; +import org.apache.iotdb.confignode.rpc.thrift.TDropSubscriptionReq; import org.apache.iotdb.confignode.rpc.thrift.TDropTopicReq; import org.apache.iotdb.confignode.rpc.thrift.TDropTriggerReq; +import org.apache.iotdb.confignode.rpc.thrift.TExtendRegionReq; import org.apache.iotdb.confignode.rpc.thrift.TGetAllPipeInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TGetAllSubscriptionInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TGetAllTemplatesResp; @@ -90,6 +101,8 @@ import org.apache.iotdb.confignode.rpc.thrift.TGetJarInListReq; import org.apache.iotdb.confignode.rpc.thrift.TGetJarInListResp; import org.apache.iotdb.confignode.rpc.thrift.TGetLocationForTriggerResp; +import org.apache.iotdb.confignode.rpc.thrift.TGetModelInfoReq; +import org.apache.iotdb.confignode.rpc.thrift.TGetModelInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TGetPathsSetTemplatesReq; import org.apache.iotdb.confignode.rpc.thrift.TGetPathsSetTemplatesResp; import org.apache.iotdb.confignode.rpc.thrift.TGetPipePluginTableResp; @@ -106,16 +119,21 @@ import org.apache.iotdb.confignode.rpc.thrift.TPermissionInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TPipeConfigTransferReq; import org.apache.iotdb.confignode.rpc.thrift.TPipeConfigTransferResp; +import org.apache.iotdb.confignode.rpc.thrift.TReconstructRegionReq; import org.apache.iotdb.confignode.rpc.thrift.TRegionRouteMapResp; +import org.apache.iotdb.confignode.rpc.thrift.TRemoveRegionReq; import org.apache.iotdb.confignode.rpc.thrift.TSchemaNodeManagementResp; import org.apache.iotdb.confignode.rpc.thrift.TSchemaPartitionTableResp; import org.apache.iotdb.confignode.rpc.thrift.TSetDataNodeStatusReq; import org.apache.iotdb.confignode.rpc.thrift.TSetSchemaTemplateReq; +import org.apache.iotdb.confignode.rpc.thrift.TShowAINodesResp; import org.apache.iotdb.confignode.rpc.thrift.TShowCQResp; import org.apache.iotdb.confignode.rpc.thrift.TShowClusterResp; import org.apache.iotdb.confignode.rpc.thrift.TShowConfigNodesResp; import org.apache.iotdb.confignode.rpc.thrift.TShowDataNodesResp; import org.apache.iotdb.confignode.rpc.thrift.TShowDatabaseResp; +import org.apache.iotdb.confignode.rpc.thrift.TShowModelReq; +import org.apache.iotdb.confignode.rpc.thrift.TShowModelResp; import org.apache.iotdb.confignode.rpc.thrift.TShowPipeReq; import org.apache.iotdb.confignode.rpc.thrift.TShowPipeResp; import org.apache.iotdb.confignode.rpc.thrift.TShowSubscriptionReq; @@ -130,6 +148,7 @@ import org.apache.iotdb.rpc.TSStatusCode; import java.util.List; +import java.util.Map; /** * A subset of services provided by {@link ConfigManager}. For use internally only, passed to @@ -209,6 +228,13 @@ public interface IManager { */ CQManager getCQManager(); + /** + * Get {@link ModelManager}. + * + * @return {@link ModelManager} instance + */ + ModelManager getModelManager(); + /** * Get {@link PipeManager}. * @@ -275,6 +301,30 @@ public interface IManager { */ DataSet removeDataNode(RemoveDataNodePlan removeDataNodePlan); + /** + * Register AINode + * + * @param req TAINodeRegisterReq + * @return AINodeConfigurationDataSet + */ + DataSet registerAINode(TAINodeRegisterReq req); + + /** + * Restart AINode. + * + * @param req TAINodeRestartReq + * @return SUCCESS_STATUS if allow AINode to restart, REJECT_START otherwise + */ + TAINodeRestartResp restartAINode(TAINodeRestartReq req); + + /** + * Remove AINode. + * + * @param removeAINodePlan RemoveAINodePlan + * @return AINodeToStatusResp + */ + TSStatus removeAINode(RemoveAINodePlan removeAINodePlan); + /** * Report that the specified DataNode will be shutdown. * @@ -291,6 +341,15 @@ public interface IManager { */ DataSet getDataNodeConfiguration(GetDataNodeConfigurationPlan getDataNodeConfigurationPlan); + /** + * Get AINode info. + * + * @param getAINodeConfigurationPlan which contains specific AINode id or -1 to get all AINodes' + * configuration. + * @return AINodeConfigurationDataSet + */ + DataSet getAINodeConfiguration(GetAINodeConfigurationPlan getAINodeConfigurationPlan); + /** * Get Cluster Nodes' information. * @@ -393,14 +452,14 @@ TDataPartitionTableResp getOrCreateDataPartition( * * @return status */ - TSStatus operatePermission(AuthorPlan authorPlan); + TSStatus operatePermission(final AuthorPlan authorPlan); /** * Query Permission. * * @return PermissionInfoDataSet */ - DataSet queryPermission(AuthorPlan authorPlan); + DataSet queryPermission(final AuthorReadPlan authorPlan); /** login. */ TPermissionInfoResp login(String username, String password); @@ -481,6 +540,9 @@ TDataPartitionTableResp getOrCreateDataPartition( /** Flush on all DataNodes. */ TSStatus flush(TFlushReq req); + /** Flush on specific Datanode. */ + TSStatus flushOnSpecificDN(TFlushReq req, Map dataNodeLocationMap); + /** Clear cache on all DataNodes. */ TSStatus clearCache(); @@ -525,6 +587,9 @@ TDataPartitionTableResp getOrCreateDataPartition( /** Show (data/schemaengine) regions. */ DataSet showRegion(GetRegionInfoListPlan getRegionInfoListPlan); + /** Show AINodes. */ + TShowAINodesResp showAINodes(); + /** Show DataNodes. */ TShowDataNodesResp showDataNodes(); @@ -689,6 +754,8 @@ TDataPartitionTableResp getOrCreateDataPartition( TSStatus dropSubscription(TUnsubscribeReq req); + TSStatus dropSubscriptionById(TDropSubscriptionReq req); + TShowSubscriptionResp showSubscription(TShowSubscriptionReq req); TGetAllSubscriptionInfoResp getAllSubscriptionInfo(); @@ -727,6 +794,12 @@ TDataPartitionTableResp getOrCreateDataPartition( TSStatus migrateRegion(TMigrateRegionReq req); + TSStatus reconstructRegion(TReconstructRegionReq req); + + TSStatus extendRegion(TExtendRegionReq req); + + TSStatus removeRegion(TRemoveRegionReq req); + TSStatus createCQ(TCreateCQReq req); TSStatus dropCQ(TDropCQReq req); @@ -737,6 +810,20 @@ TDataPartitionTableResp getOrCreateDataPartition( TSStatus transfer(List newUnknownDataList); + /** Create a model. */ + TSStatus createModel(TCreateModelReq req); + + /** Drop a model. */ + TSStatus dropModel(TDropModelReq req); + + /** Return the model table. */ + TShowModelResp showModel(TShowModelReq req); + + /** Update the model state */ + TGetModelInfoResp getModelInfo(TGetModelInfoReq req); + /** Set space quota. */ TSStatus setSpaceQuota(TSetSpaceQuotaReq req); + + TSStatus pushHeartbeat(final int dataNodeId, final TPipeHeartbeatResp resp); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ModelManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ModelManager.java new file mode 100644 index 0000000000000..3c19dfdb14adc --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ModelManager.java @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.manager; + +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.model.ModelType; +import org.apache.iotdb.confignode.consensus.request.read.model.GetModelInfoPlan; +import org.apache.iotdb.confignode.consensus.request.read.model.ShowModelPlan; +import org.apache.iotdb.confignode.consensus.response.model.GetModelInfoResp; +import org.apache.iotdb.confignode.consensus.response.model.ModelTableResp; +import org.apache.iotdb.confignode.persistence.ModelInfo; +import org.apache.iotdb.confignode.rpc.thrift.TCreateModelReq; +import org.apache.iotdb.confignode.rpc.thrift.TDropModelReq; +import org.apache.iotdb.confignode.rpc.thrift.TGetModelInfoReq; +import org.apache.iotdb.confignode.rpc.thrift.TGetModelInfoResp; +import org.apache.iotdb.confignode.rpc.thrift.TShowModelReq; +import org.apache.iotdb.confignode.rpc.thrift.TShowModelResp; +import org.apache.iotdb.consensus.common.DataSet; +import org.apache.iotdb.consensus.exception.ConsensusException; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +public class ModelManager { + + private static final Logger LOGGER = LoggerFactory.getLogger(ModelManager.class); + + private final ConfigManager configManager; + private final ModelInfo modelInfo; + + public ModelManager(ConfigManager configManager, ModelInfo modelInfo) { + this.configManager = configManager; + this.modelInfo = modelInfo; + } + + public TSStatus createModel(TCreateModelReq req) { + if (modelInfo.contain(req.modelName)) { + return new TSStatus(TSStatusCode.MODEL_EXIST_ERROR.getStatusCode()) + .setMessage(String.format("Model name %s already exists", req.modelName)); + } + return configManager.getProcedureManager().createModel(req.modelName, req.uri); + } + + public TSStatus dropModel(TDropModelReq req) { + if (modelInfo.checkModelType(req.getModelId()) != ModelType.USER_DEFINED) { + return new TSStatus(TSStatusCode.MODEL_EXIST_ERROR.getStatusCode()) + .setMessage(String.format("Built-in model %s can't be removed", req.modelId)); + } + if (!modelInfo.contain(req.modelId)) { + return new TSStatus(TSStatusCode.MODEL_EXIST_ERROR.getStatusCode()) + .setMessage(String.format("Model name %s doesn't exists", req.modelId)); + } + return configManager.getProcedureManager().dropModel(req.getModelId()); + } + + public TShowModelResp showModel(final TShowModelReq req) { + try { + final DataSet response = configManager.getConsensusManager().read(new ShowModelPlan(req)); + return ((ModelTableResp) response).convertToThriftResponse(); + } catch (final ConsensusException e) { + LOGGER.warn( + String.format("Unexpected error happened while showing model %s: ", req.getModelId()), e); + // consensus layer related errors + final TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); + res.setMessage(e.getMessage()); + return new TShowModelResp(res, Collections.emptyList()); + } catch (final IOException e) { + LOGGER.warn("Fail to get ModelTable", e); + return new TShowModelResp( + new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()) + .setMessage(e.getMessage()), + Collections.emptyList()); + } + } + + public TGetModelInfoResp getModelInfo(TGetModelInfoReq req) { + try { + GetModelInfoResp response = + (GetModelInfoResp) configManager.getConsensusManager().read(new GetModelInfoPlan(req)); + if (response.getStatus().getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return new TGetModelInfoResp(response.getStatus()); + } + int aiNodeId = response.getTargetAINodeId(); + if (aiNodeId != 0) { + response.setTargetAINodeAddress( + configManager.getNodeManager().getRegisteredAINode(aiNodeId)); + } else { + if (configManager.getNodeManager().getRegisteredAINodes().isEmpty()) { + return new TGetModelInfoResp( + new TSStatus(TSStatusCode.GET_MODEL_INFO_ERROR.getStatusCode()) + .setMessage("There is no AINode available")); + } + response.setTargetAINodeAddress( + configManager.getNodeManager().getRegisteredAINodes().get(0)); + } + return response.convertToThriftResponse(); + } catch (ConsensusException e) { + LOGGER.warn("Unexpected error happened while getting model: ", e); + // consensus layer related errors + TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); + res.setMessage(e.getMessage()); + return new TGetModelInfoResp(res); + } + } + + public List getModelDistributions(String modelName) { + return modelInfo.getNodeIds(modelName); + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/PermissionManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/PermissionManager.java index a6f0e23fcffbb..9592692d1bc45 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/PermissionManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/PermissionManager.java @@ -24,7 +24,8 @@ import org.apache.iotdb.commons.auth.AuthException; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; -import org.apache.iotdb.confignode.consensus.request.auth.AuthorPlan; +import org.apache.iotdb.confignode.consensus.request.read.auth.AuthorReadPlan; +import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan; import org.apache.iotdb.confignode.consensus.response.auth.PermissionInfoResp; import org.apache.iotdb.confignode.manager.consensus.ConsensusManager; import org.apache.iotdb.confignode.persistence.AuthorInfo; @@ -90,12 +91,12 @@ public TSStatus operatePermission(AuthorPlan authorPlan, boolean isGeneratedByPi * @param authorPlan AuthorReq * @return PermissionInfoResp */ - public PermissionInfoResp queryPermission(AuthorPlan authorPlan) { + public PermissionInfoResp queryPermission(final AuthorReadPlan authorPlan) { try { return (PermissionInfoResp) getConsensusManager().read(authorPlan); - } catch (ConsensusException e) { + } catch (final ConsensusException e) { LOGGER.warn("Failed in the read API executing the consensus layer due to: ", e); - TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); + final TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); res.setMessage(e.getMessage()); return new PermissionInfoResp(res); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java index c77af98375784..460ed5276cafc 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java @@ -23,6 +23,7 @@ import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; import org.apache.iotdb.common.rpc.thrift.TDataNodeConfiguration; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.cluster.NodeStatus; import org.apache.iotdb.commons.conf.CommonConfig; @@ -32,7 +33,7 @@ import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PathDeserializeUtil; import org.apache.iotdb.commons.path.PathPatternTree; -import org.apache.iotdb.commons.pipe.plugin.meta.PipePluginMeta; +import org.apache.iotdb.commons.pipe.agent.plugin.meta.PipePluginMeta; import org.apache.iotdb.commons.schema.view.viewExpression.ViewExpression; import org.apache.iotdb.commons.service.metric.MetricService; import org.apache.iotdb.commons.trigger.TriggerInformation; @@ -40,7 +41,8 @@ import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.confignode.conf.ConfigNodeConfig; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; -import org.apache.iotdb.confignode.consensus.request.auth.AuthorPlan; +import org.apache.iotdb.confignode.consensus.request.write.ainode.RemoveAINodePlan; +import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan; import org.apache.iotdb.confignode.consensus.request.write.confignode.RemoveConfigNodePlan; import org.apache.iotdb.confignode.consensus.request.write.database.SetTTLPlan; import org.apache.iotdb.confignode.consensus.request.write.datanode.RemoveDataNodePlan; @@ -48,15 +50,20 @@ import org.apache.iotdb.confignode.consensus.request.write.region.CreateRegionGroupsPlan; import org.apache.iotdb.confignode.manager.partition.PartitionManager; import org.apache.iotdb.confignode.persistence.ProcedureInfo; +import org.apache.iotdb.confignode.procedure.PartitionTableAutoCleaner; import org.apache.iotdb.confignode.procedure.Procedure; import org.apache.iotdb.confignode.procedure.ProcedureExecutor; import org.apache.iotdb.confignode.procedure.ProcedureMetrics; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.env.RegionMaintainHandler; +import org.apache.iotdb.confignode.procedure.env.RemoveDataNodeHandler; import org.apache.iotdb.confignode.procedure.impl.cq.CreateCQProcedure; +import org.apache.iotdb.confignode.procedure.impl.model.CreateModelProcedure; +import org.apache.iotdb.confignode.procedure.impl.model.DropModelProcedure; import org.apache.iotdb.confignode.procedure.impl.node.AddConfigNodeProcedure; +import org.apache.iotdb.confignode.procedure.impl.node.RemoveAINodeProcedure; import org.apache.iotdb.confignode.procedure.impl.node.RemoveConfigNodeProcedure; -import org.apache.iotdb.confignode.procedure.impl.node.RemoveDataNodeProcedure; +import org.apache.iotdb.confignode.procedure.impl.node.RemoveDataNodesProcedure; import org.apache.iotdb.confignode.procedure.impl.pipe.plugin.CreatePipePluginProcedure; import org.apache.iotdb.confignode.procedure.impl.pipe.plugin.DropPipePluginProcedure; import org.apache.iotdb.confignode.procedure.impl.pipe.runtime.PipeHandleLeaderChangeProcedure; @@ -67,8 +74,13 @@ import org.apache.iotdb.confignode.procedure.impl.pipe.task.DropPipeProcedureV2; import org.apache.iotdb.confignode.procedure.impl.pipe.task.StartPipeProcedureV2; import org.apache.iotdb.confignode.procedure.impl.pipe.task.StopPipeProcedureV2; +import org.apache.iotdb.confignode.procedure.impl.region.AddRegionPeerProcedure; import org.apache.iotdb.confignode.procedure.impl.region.CreateRegionGroupsProcedure; +import org.apache.iotdb.confignode.procedure.impl.region.ReconstructRegionProcedure; import org.apache.iotdb.confignode.procedure.impl.region.RegionMigrateProcedure; +import org.apache.iotdb.confignode.procedure.impl.region.RegionMigrationPlan; +import org.apache.iotdb.confignode.procedure.impl.region.RegionOperationProcedure; +import org.apache.iotdb.confignode.procedure.impl.region.RemoveRegionPeerProcedure; import org.apache.iotdb.confignode.procedure.impl.schema.AlterLogicalViewProcedure; import org.apache.iotdb.confignode.procedure.impl.schema.DeactivateTemplateProcedure; import org.apache.iotdb.confignode.procedure.impl.schema.DeleteDatabaseProcedure; @@ -107,7 +119,10 @@ import org.apache.iotdb.confignode.rpc.thrift.TDatabaseSchema; import org.apache.iotdb.confignode.rpc.thrift.TDeleteLogicalViewReq; import org.apache.iotdb.confignode.rpc.thrift.TDropPipePluginReq; +import org.apache.iotdb.confignode.rpc.thrift.TExtendRegionReq; import org.apache.iotdb.confignode.rpc.thrift.TMigrateRegionReq; +import org.apache.iotdb.confignode.rpc.thrift.TReconstructRegionReq; +import org.apache.iotdb.confignode.rpc.thrift.TRemoveRegionReq; import org.apache.iotdb.confignode.rpc.thrift.TSubscribeReq; import org.apache.iotdb.confignode.rpc.thrift.TUnsubscribeReq; import org.apache.iotdb.consensus.ConsensusFactory; @@ -116,25 +131,26 @@ import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.TSStatusCode; +import org.apache.ratis.util.AutoCloseableLock; import org.apache.tsfile.utils.Binary; import org.apache.tsfile.utils.Pair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.annotation.Nullable; + import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Optional; +import java.util.Set; import java.util.concurrent.ScheduledExecutorService; import java.util.stream.Collectors; - -import static org.apache.iotdb.confignode.conf.ConfigNodeConstant.REGION_MIGRATE_PROCESS; +import java.util.stream.Stream; public class ProcedureManager { private static final Logger LOGGER = LoggerFactory.getLogger(ProcedureManager.class); @@ -143,7 +159,7 @@ public class ProcedureManager { ConfigNodeDescriptor.getInstance().getConf(); private static final CommonConfig COMMON_CONFIG = CommonDescriptor.getInstance().getConfig(); - public static final long PROCEDURE_WAIT_TIME_OUT = COMMON_CONFIG.getConnectionTimeoutInMS(); + public static final long PROCEDURE_WAIT_TIME_OUT = COMMON_CONFIG.getDnConnectionTimeoutInMS(); private static final int PROCEDURE_WAIT_RETRY_TIMEOUT = 10; private static final String PROCEDURE_TIMEOUT_MESSAGE = "Timed out to wait for procedure return. The procedure is still running."; @@ -157,6 +173,8 @@ public class ProcedureManager { private final long planSizeLimit; private ProcedureMetrics procedureMetrics; + private final PartitionTableAutoCleaner partitionTableCleaner; + public ProcedureManager(ConfigManager configManager, ProcedureInfo procedureInfo) { this.configManager = configManager; this.scheduler = new SimpleProcedureScheduler(); @@ -169,6 +187,7 @@ public ProcedureManager(ConfigManager configManager, ProcedureInfo procedureInfo .getConfigNodeRatisConsensusLogAppenderBufferSize() - IoTDBConstant.RAFT_LOG_BASIC_SIZE; this.procedureMetrics = new ProcedureMetrics(this); + this.partitionTableCleaner = new PartitionTableAutoCleaner<>(configManager); } public void startExecutor() { @@ -178,6 +197,7 @@ public void startExecutor() { executor.startCompletedCleaner( CONFIG_NODE_CONFIG.getProcedureCompletedCleanInterval(), CONFIG_NODE_CONFIG.getProcedureCompletedEvictTTL()); + executor.addInternalProcedure(partitionTableCleaner); store.start(); LOGGER.info("ProcedureManager is started successfully."); } @@ -191,6 +211,7 @@ public void stopExecutor() { store.stop(); LOGGER.info("ProcedureManager is stopped successfully."); } + executor.removeInternalProcedure(partitionTableCleaner); } } @@ -208,40 +229,43 @@ public TSStatus testSubProcedure() { public TSStatus deleteDatabases( List deleteSgSchemaList, boolean isGeneratedByPipe) { - List procedureIds = new ArrayList<>(); - for (TDatabaseSchema storageGroupSchema : deleteSgSchemaList) { + List procedures = new ArrayList<>(); + for (TDatabaseSchema databaseSchema : deleteSgSchemaList) { DeleteDatabaseProcedure deleteDatabaseProcedure = - new DeleteDatabaseProcedure(storageGroupSchema, isGeneratedByPipe); - long procedureId = this.executor.submitProcedure(deleteDatabaseProcedure); - procedureIds.add(procedureId); + new DeleteDatabaseProcedure(databaseSchema, isGeneratedByPipe); + DeleteDatabaseProcedure procedure = + new DeleteDatabaseProcedure(databaseSchema, isGeneratedByPipe); + this.executor.submitProcedure(procedure); + procedures.add(procedure); } - List procedureStatus = new ArrayList<>(); - boolean isSucceed = waitingProcedureFinished(procedureIds, procedureStatus); + List results = new ArrayList<>(procedures.size()); + procedures.forEach(procedure -> results.add(waitingProcedureFinished(procedure))); // Clear the previously deleted regions final PartitionManager partitionManager = getConfigManager().getPartitionManager(); partitionManager.getRegionMaintainer().submit(partitionManager::maintainRegionReplicas); - if (isSucceed) { + if (results.stream() + .allMatch(result -> result.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode())) { return StatusUtils.OK; } else { - return RpcUtils.getStatus(procedureStatus); + return RpcUtils.getStatus(results); } } public TSStatus deleteTimeSeries( String queryId, PathPatternTree patternTree, boolean isGeneratedByPipe) { - long procedureId = -1; + DeleteTimeSeriesProcedure procedure = null; synchronized (this) { boolean hasOverlappedTask = false; ProcedureType type; DeleteTimeSeriesProcedure deleteTimeSeriesProcedure; - for (Procedure procedure : executor.getProcedures().values()) { - type = ProcedureFactory.getProcedureType(procedure); + for (Procedure runningProcedure : executor.getProcedures().values()) { + type = ProcedureFactory.getProcedureType(runningProcedure); if (type == null || !type.equals(ProcedureType.DELETE_TIMESERIES_PROCEDURE)) { continue; } - deleteTimeSeriesProcedure = ((DeleteTimeSeriesProcedure) procedure); + deleteTimeSeriesProcedure = ((DeleteTimeSeriesProcedure) runningProcedure); if (queryId.equals(deleteTimeSeriesProcedure.getQueryId())) { - procedureId = deleteTimeSeriesProcedure.getProcId(); + procedure = deleteTimeSeriesProcedure; break; } if (patternTree.isOverlapWith(deleteTimeSeriesProcedure.getPatternTree())) { @@ -250,44 +274,36 @@ public TSStatus deleteTimeSeries( } } - if (procedureId == -1) { + if (procedure == null) { if (hasOverlappedTask) { return RpcUtils.getStatus( TSStatusCode.OVERLAP_WITH_EXISTING_TASK, "Some other task is deleting some target timeseries."); } - procedureId = - this.executor.submitProcedure( - new DeleteTimeSeriesProcedure(queryId, patternTree, isGeneratedByPipe)); + procedure = new DeleteTimeSeriesProcedure(queryId, patternTree, isGeneratedByPipe); + this.executor.submitProcedure(procedure); } } - List procedureStatus = new ArrayList<>(); - boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), procedureStatus); - if (isSucceed) { - return StatusUtils.OK; - } else { - return procedureStatus.get(0); - } + return waitingProcedureFinished(procedure); } public TSStatus deleteLogicalView(TDeleteLogicalViewReq req) { String queryId = req.getQueryId(); PathPatternTree patternTree = PathPatternTree.deserialize(ByteBuffer.wrap(req.getPathPatternTree())); - long procedureId = -1; + DeleteLogicalViewProcedure procedure = null; synchronized (this) { boolean hasOverlappedTask = false; ProcedureType type; DeleteLogicalViewProcedure deleteLogicalViewProcedure; - for (Procedure procedure : executor.getProcedures().values()) { - type = ProcedureFactory.getProcedureType(procedure); + for (Procedure runningProcedure : executor.getProcedures().values()) { + type = ProcedureFactory.getProcedureType(runningProcedure); if (type == null || !type.equals(ProcedureType.DELETE_LOGICAL_VIEW_PROCEDURE)) { continue; } - deleteLogicalViewProcedure = ((DeleteLogicalViewProcedure) procedure); + deleteLogicalViewProcedure = ((DeleteLogicalViewProcedure) runningProcedure); if (queryId.equals(deleteLogicalViewProcedure.getQueryId())) { - procedureId = deleteLogicalViewProcedure.getProcId(); + procedure = deleteLogicalViewProcedure; break; } if (patternTree.isOverlapWith(deleteLogicalViewProcedure.getPatternTree())) { @@ -296,35 +312,26 @@ public TSStatus deleteLogicalView(TDeleteLogicalViewReq req) { } } - if (procedureId == -1) { + if (procedure == null) { if (hasOverlappedTask) { return RpcUtils.getStatus( TSStatusCode.OVERLAP_WITH_EXISTING_TASK, "Some other task is deleting some target views."); } - procedureId = - this.executor.submitProcedure( - new DeleteLogicalViewProcedure( - queryId, - patternTree, - req.isSetIsGeneratedByPipe() && req.isIsGeneratedByPipe())); + procedure = + new DeleteLogicalViewProcedure( + queryId, patternTree, req.isSetIsGeneratedByPipe() && req.isIsGeneratedByPipe()); + this.executor.submitProcedure(procedure); } } - List procedureStatus = new ArrayList<>(); - boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), procedureStatus); - if (isSucceed) { - return StatusUtils.OK; - } else { - return procedureStatus.get(0); - } + return waitingProcedureFinished(procedure); } - public TSStatus alterLogicalView(TAlterLogicalViewReq req) { - String queryId = req.getQueryId(); - ByteBuffer byteBuffer = ByteBuffer.wrap(req.getViewBinary()); - Map viewPathToSourceMap = new HashMap<>(); - int size = byteBuffer.getInt(); + public TSStatus alterLogicalView(final TAlterLogicalViewReq req) { + final String queryId = req.getQueryId(); + final ByteBuffer byteBuffer = ByteBuffer.wrap(req.getViewBinary()); + final Map viewPathToSourceMap = new HashMap<>(); + final int size = byteBuffer.getInt(); PartialPath path; ViewExpression viewExpression; for (int i = 0; i < size; i++) { @@ -333,56 +340,49 @@ public TSStatus alterLogicalView(TAlterLogicalViewReq req) { viewPathToSourceMap.put(path, viewExpression); } - long procedureId = -1; + AlterLogicalViewProcedure procedure = null; synchronized (this) { ProcedureType type; AlterLogicalViewProcedure alterLogicalViewProcedure; - for (Procedure procedure : executor.getProcedures().values()) { - type = ProcedureFactory.getProcedureType(procedure); + for (Procedure runningProcedure : executor.getProcedures().values()) { + type = ProcedureFactory.getProcedureType(runningProcedure); if (type == null || !type.equals(ProcedureType.ALTER_LOGICAL_VIEW_PROCEDURE)) { continue; } - alterLogicalViewProcedure = ((AlterLogicalViewProcedure) procedure); + alterLogicalViewProcedure = ((AlterLogicalViewProcedure) runningProcedure); if (queryId.equals(alterLogicalViewProcedure.getQueryId())) { - procedureId = alterLogicalViewProcedure.getProcId(); + procedure = alterLogicalViewProcedure; break; } } - if (procedureId == -1) { - procedureId = - this.executor.submitProcedure( - new AlterLogicalViewProcedure( - queryId, - viewPathToSourceMap, - req.isSetIsGeneratedByPipe() && req.isIsGeneratedByPipe())); + if (procedure == null) { + procedure = + new AlterLogicalViewProcedure( + queryId, + viewPathToSourceMap, + req.isSetIsGeneratedByPipe() && req.isIsGeneratedByPipe()); + this.executor.submitProcedure(procedure); } } - List procedureStatus = new ArrayList<>(); - boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), procedureStatus); - if (isSucceed) { - return StatusUtils.OK; - } else { - return procedureStatus.get(0); - } + return waitingProcedureFinished(procedure); } public TSStatus setSchemaTemplate( String queryId, String templateName, String templateSetPath, boolean isGeneratedByPipe) { - long procedureId = -1; + SetTemplateProcedure procedure = null; synchronized (this) { boolean hasOverlappedTask = false; ProcedureType type; SetTemplateProcedure setTemplateProcedure; - for (Procedure procedure : executor.getProcedures().values()) { - type = ProcedureFactory.getProcedureType(procedure); + for (Procedure runningProcedure : executor.getProcedures().values()) { + type = ProcedureFactory.getProcedureType(runningProcedure); if (type == null || !type.equals(ProcedureType.SET_TEMPLATE_PROCEDURE)) { continue; } - setTemplateProcedure = (SetTemplateProcedure) procedure; + setTemplateProcedure = (SetTemplateProcedure) runningProcedure; if (queryId.equals(setTemplateProcedure.getQueryId())) { - procedureId = setTemplateProcedure.getProcId(); + procedure = setTemplateProcedure; break; } if (templateSetPath.equals(setTemplateProcedure.getTemplateSetPath())) { @@ -391,43 +391,35 @@ public TSStatus setSchemaTemplate( } } - if (procedureId == -1) { + if (procedure == null) { if (hasOverlappedTask) { return RpcUtils.getStatus( TSStatusCode.OVERLAP_WITH_EXISTING_TASK, "Some other task is setting template on target path."); } - procedureId = - this.executor.submitProcedure( - new SetTemplateProcedure( - queryId, templateName, templateSetPath, isGeneratedByPipe)); + procedure = + new SetTemplateProcedure(queryId, templateName, templateSetPath, isGeneratedByPipe); + this.executor.submitProcedure(procedure); } } - List procedureStatus = new ArrayList<>(); - boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), procedureStatus); - if (isSucceed) { - return StatusUtils.OK; - } else { - return procedureStatus.get(0); - } + return waitingProcedureFinished(procedure); } public TSStatus deactivateTemplate( String queryId, Map> templateSetInfo, boolean isGeneratedByPipe) { - long procedureId = -1; + DeactivateTemplateProcedure procedure = null; synchronized (this) { boolean hasOverlappedTask = false; ProcedureType type; DeactivateTemplateProcedure deactivateTemplateProcedure; - for (Procedure procedure : executor.getProcedures().values()) { - type = ProcedureFactory.getProcedureType(procedure); + for (Procedure runningProcedure : executor.getProcedures().values()) { + type = ProcedureFactory.getProcedureType(runningProcedure); if (type == null || !type.equals(ProcedureType.DEACTIVATE_TEMPLATE_PROCEDURE)) { continue; } - deactivateTemplateProcedure = (DeactivateTemplateProcedure) procedure; + deactivateTemplateProcedure = (DeactivateTemplateProcedure) runningProcedure; if (queryId.equals(deactivateTemplateProcedure.getQueryId())) { - procedureId = deactivateTemplateProcedure.getProcId(); + procedure = deactivateTemplateProcedure; break; } for (PartialPath pattern : templateSetInfo.keySet()) { @@ -447,42 +439,34 @@ public TSStatus deactivateTemplate( } } - if (procedureId == -1) { + if (procedure == null) { if (hasOverlappedTask) { return RpcUtils.getStatus( TSStatusCode.OVERLAP_WITH_EXISTING_TASK, "Some other task is deactivating some target template from target path."); } - procedureId = - this.executor.submitProcedure( - new DeactivateTemplateProcedure(queryId, templateSetInfo, isGeneratedByPipe)); + procedure = new DeactivateTemplateProcedure(queryId, templateSetInfo, isGeneratedByPipe); + this.executor.submitProcedure(procedure); } } - List procedureStatus = new ArrayList<>(); - boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), procedureStatus); - if (isSucceed) { - return StatusUtils.OK; - } else { - return procedureStatus.get(0); - } + return waitingProcedureFinished(procedure); } public TSStatus unsetSchemaTemplate( String queryId, Template template, PartialPath path, boolean isGeneratedByPipe) { - long procedureId = -1; + UnsetTemplateProcedure procedure = null; synchronized (this) { boolean hasOverlappedTask = false; ProcedureType type; UnsetTemplateProcedure unsetTemplateProcedure; - for (Procedure procedure : executor.getProcedures().values()) { - type = ProcedureFactory.getProcedureType(procedure); + for (Procedure runningProcedure : executor.getProcedures().values()) { + type = ProcedureFactory.getProcedureType(runningProcedure); if (type == null || !type.equals(ProcedureType.UNSET_TEMPLATE_PROCEDURE)) { continue; } - unsetTemplateProcedure = (UnsetTemplateProcedure) procedure; + unsetTemplateProcedure = (UnsetTemplateProcedure) runningProcedure; if (queryId.equals(unsetTemplateProcedure.getQueryId())) { - procedureId = unsetTemplateProcedure.getProcId(); + procedure = unsetTemplateProcedure; break; } if (template.getId() == unsetTemplateProcedure.getTemplateId() @@ -492,26 +476,18 @@ public TSStatus unsetSchemaTemplate( } } - if (procedureId == -1) { + if (procedure == null) { if (hasOverlappedTask) { return RpcUtils.getStatus( TSStatusCode.OVERLAP_WITH_EXISTING_TASK, "Some other task is unsetting target template from target path " + path.getFullPath()); } - procedureId = - this.executor.submitProcedure( - new UnsetTemplateProcedure(queryId, template, path, isGeneratedByPipe)); + procedure = new UnsetTemplateProcedure(queryId, template, path, isGeneratedByPipe); + this.executor.submitProcedure(procedure); } } - List procedureStatus = new ArrayList<>(); - boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), procedureStatus); - if (isSucceed) { - return StatusUtils.OK; - } else { - return procedureStatus.get(0); - } + return waitingProcedureFinished(procedure); } /** @@ -536,74 +512,170 @@ public void removeConfigNode(RemoveConfigNodePlan removeConfigNodePlan) { } /** - * Generate {@link RemoveDataNodeProcedure}s, and serially execute all the {@link - * RemoveDataNodeProcedure}s. + * Generate {@link RemoveDataNodesProcedure}s, and serially execute all the {@link + * RemoveDataNodesProcedure}s. */ public boolean removeDataNode(RemoveDataNodePlan removeDataNodePlan) { + Map nodeStatusMap = new HashMap<>(); removeDataNodePlan .getDataNodeLocations() .forEach( - tDataNodeLocation -> { - this.executor.submitProcedure(new RemoveDataNodeProcedure(tDataNodeLocation)); - LOGGER.info("Submit RemoveDataNodeProcedure successfully, {}", tDataNodeLocation); - }); + datanode -> + nodeStatusMap.put( + datanode.getDataNodeId(), + configManager.getLoadManager().getNodeStatus(datanode.getDataNodeId()))); + this.executor.submitProcedure( + new RemoveDataNodesProcedure(removeDataNodePlan.getDataNodeLocations(), nodeStatusMap)); + LOGGER.info( + "Submit RemoveDataNodesProcedure successfully, {}", + removeDataNodePlan.getDataNodeLocations()); return true; } - // region region migration - private TSStatus checkRegionMigrate( - TMigrateRegionReq migrateRegionReq, - TConsensusGroupId regionGroupId, - TDataNodeLocation originalDataNode, - TDataNodeLocation destDataNode, - TDataNodeLocation coordinatorForAddPeer) { - String failMessage = null; - Optional> anotherMigrateProcedure = - this.executor.getProcedures().values().stream() + public boolean removeAINode(RemoveAINodePlan removeAINodePlan) { + this.executor.submitProcedure(new RemoveAINodeProcedure(removeAINodePlan.getAINodeLocation())); + LOGGER.info( + "Submit RemoveAINodeProcedure successfully, {}", removeAINodePlan.getAINodeLocation()); + return true; + } + + public TSStatus checkRemoveDataNodes(List dataNodeLocations) { + // 1. Only one RemoveDataNodesProcedure is allowed in the cluster + Optional> anotherRemoveProcedure = + getExecutor().getProcedures().values().stream() .filter( procedure -> { - if (procedure instanceof RegionMigrateProcedure) { - return !procedure.isFinished() - && ((RegionMigrateProcedure) procedure) - .getConsensusGroupId() - .equals(regionGroupId); + if (procedure instanceof RemoveDataNodesProcedure) { + return !procedure.isFinished(); } return false; }) .findAny(); - ConfigNodeConfig conf = ConfigNodeDescriptor.getInstance().getConf(); - if (TConsensusGroupType.DataRegion == regionGroupId.getType() - && ConsensusFactory.SIMPLE_CONSENSUS.equals(conf.getDataRegionConsensusProtocolClass())) { - failMessage = - "The region you are trying to migrate is using SimpleConsensus, and SimpleConsensus not supports region migration."; - } else if (TConsensusGroupType.SchemaRegion == regionGroupId.getType() - && ConsensusFactory.SIMPLE_CONSENSUS.equals(conf.getSchemaRegionConsensusProtocolClass())) { - failMessage = - "The region you are trying to migrate is using SimpleConsensus, and SimpleConsensus not supports region migration."; - } else if (anotherMigrateProcedure.isPresent()) { + + String failMessage = null; + if (anotherRemoveProcedure.isPresent()) { + List anotherRemoveDataNodes = + ((RemoveDataNodesProcedure) anotherRemoveProcedure.get()).getRemovedDataNodes(); failMessage = String.format( - "Submit RegionMigrateProcedure failed, " - + "because another RegionMigrateProcedure of the same consensus group %d is already in processing. " - + "A consensus group is able to have at most 1 RegionMigrateProcedure at the same time. " + "Submit RemoveDataNodesProcedure failed, " + + "because another RemoveDataNodesProcedure %s is already in processing. " + + "IoTDB is able to have at most 1 RemoveDataNodesProcedure at the same time. " + "For further information, please search [pid%d] in log. ", - regionGroupId.getId(), anotherMigrateProcedure.get().getProcId()); - } else if (originalDataNode == null) { - failMessage = - String.format( - "Submit RegionMigrateProcedure failed, because no original DataNode %d", - migrateRegionReq.getFromId()); - } else if (destDataNode == null) { - failMessage = - String.format( - "Submit RegionMigrateProcedure failed, because no target DataNode %s", - migrateRegionReq.getToId()); - } else if (coordinatorForAddPeer == null) { + anotherRemoveDataNodes, anotherRemoveProcedure.get().getProcId()); + } + + // 2. Check if the RemoveDataNodesProcedure conflicts with the RegionMigrateProcedure + Set removedDataNodesRegionSet = + getEnv().getRemoveDataNodeHandler().getRemovedDataNodesRegionSet(dataNodeLocations); + Optional> conflictRegionMigrateProcedure = + getExecutor().getProcedures().values().stream() + .filter( + procedure -> { + if (procedure instanceof RegionMigrateProcedure) { + RegionMigrateProcedure regionMigrateProcedure = + (RegionMigrateProcedure) procedure; + if (regionMigrateProcedure.isFinished()) { + return false; + } + return removedDataNodesRegionSet.contains(regionMigrateProcedure.getRegionId()) + || dataNodeLocations.contains(regionMigrateProcedure.getDestDataNode()); + } + return false; + }) + .findAny(); + if (conflictRegionMigrateProcedure.isPresent()) { failMessage = String.format( - "%s, There are no other DataNodes could be selected to perform the add peer process, " - + "please check RegionGroup: %s by show regions sql command", - REGION_MIGRATE_PROCESS, regionGroupId); + "Submit RemoveDataNodesProcedure failed, " + + "because another RegionMigrateProcedure %s is already in processing which conflicts with this RemoveDataNodesProcedure. " + + "The RegionMigrateProcedure is migrating the region %s to the DataNode %s. " + + "For further information, please search [pid%d] in log. ", + conflictRegionMigrateProcedure.get().getProcId(), + ((RegionMigrateProcedure) conflictRegionMigrateProcedure.get()).getRegionId(), + ((RegionMigrateProcedure) conflictRegionMigrateProcedure.get()).getDestDataNode(), + conflictRegionMigrateProcedure.get().getProcId()); + } + // 3. Check if the RegionMigrateProcedure generated by RemoveDataNodesProcedure conflicts with + // each other + List regionMigrationPlans = + getEnv().getRemoveDataNodeHandler().getRegionMigrationPlans(dataNodeLocations); + removedDataNodesRegionSet.clear(); + for (RegionMigrationPlan regionMigrationPlan : regionMigrationPlans) { + if (removedDataNodesRegionSet.contains(regionMigrationPlan.getRegionId())) { + failMessage = + String.format( + "Submit RemoveDataNodesProcedure failed, " + + "because the RegionMigrateProcedure generated by this RemoveDataNodesProcedure conflicts with each other. " + + "Only one replica of the same consensus group is allowed to be migrated at the same time." + + "The conflict region id is %s . ", + regionMigrationPlan.getRegionId()); + break; + } + removedDataNodesRegionSet.add(regionMigrationPlan.getRegionId()); + } + + // 4. Check if there are any other unknown or readonly DataNodes in the consensus group that are + // not the remove DataNodes + + for (TDataNodeLocation removeDataNode : dataNodeLocations) { + Set relatedDataNodes = + getEnv().getRemoveDataNodeHandler().getRelatedDataNodeLocations(removeDataNode); + relatedDataNodes.remove(removeDataNode); + + for (TDataNodeLocation relatedDataNode : relatedDataNodes) { + NodeStatus nodeStatus = + getConfigManager().getLoadManager().getNodeStatus(relatedDataNode.getDataNodeId()); + if (nodeStatus == NodeStatus.Unknown || nodeStatus == NodeStatus.ReadOnly) { + failMessage = + String.format( + "Submit RemoveDataNodesProcedure failed, " + + "because when there are other unknown or readonly nodes in the consensus group that are not remove nodes, " + + "the remove operation cannot be performed for security reasons. " + + "Please check the status of the node %s and ensure it is running.", + relatedDataNode.getDataNodeId()); + } + } + } + + if (failMessage != null) { + LOGGER.warn(failMessage); + TSStatus failStatus = new TSStatus(TSStatusCode.REMOVE_DATANODE_ERROR.getStatusCode()); + failStatus.setMessage(failMessage); + return failStatus; + } + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } + + // region region operation related check + + /** + * Checks whether region migration is allowed. + * + * @param migrateRegionReq the migration request details + * @param regionGroupId the ID of the consensus group for the region + * @param originalDataNode the original DataNode location from which the region is being migrated + * @param destDataNode the destination DataNode location to which the region is being migrated + * @param coordinatorForAddPeer the DataNode location acting as the coordinator for adding a peer + * @return the status of the migration request (TSStatus) + */ + private TSStatus checkMigrateRegion( + TMigrateRegionReq migrateRegionReq, + TConsensusGroupId regionGroupId, + TDataNodeLocation originalDataNode, + TDataNodeLocation destDataNode, + TDataNodeLocation coordinatorForAddPeer) { + String failMessage; + if ((failMessage = + regionOperationCommonCheck( + regionGroupId, + destDataNode, + Arrays.asList( + new Pair<>("Original DataNode", originalDataNode), + new Pair<>("Destination DataNode", destDataNode), + new Pair<>("Coordinator for add peer", coordinatorForAddPeer)))) + != null) { + // do nothing } else if (configManager .getPartitionManager() .getAllReplicaSets(originalDataNode.getDataNodeId()) @@ -622,96 +694,477 @@ private TSStatus checkRegionMigrate( String.format( "Submit RegionMigrateProcedure failed, because the target DataNode %s already contains Region %s", migrateRegionReq.getToId(), migrateRegionReq.getRegionId()); - } else if (!configManager - .getNodeManager() - .filterDataNodeThroughStatus(NodeStatus.Running) + } + + if (failMessage != null) { + LOGGER.warn(failMessage); + TSStatus failStatus = new TSStatus(TSStatusCode.MIGRATE_REGION_ERROR.getStatusCode()); + failStatus.setMessage(failMessage); + return failStatus; + } + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } + + private TSStatus checkReconstructRegion( + TReconstructRegionReq req, + TConsensusGroupId regionId, + TDataNodeLocation targetDataNode, + TDataNodeLocation coordinator) { + String failMessage = + regionOperationCommonCheck( + regionId, + targetDataNode, + Arrays.asList( + new Pair<>("Target DataNode", targetDataNode), + new Pair<>("Coordinator", coordinator))); + + ConfigNodeConfig conf = ConfigNodeDescriptor.getInstance().getConf(); + if (configManager + .getPartitionManager() + .getAllReplicaSetsMap(regionId.getType()) + .get(regionId) + .getDataNodeLocationsSize() + == 1) { + failMessage = String.format("%s only has 1 replica, it cannot be reconstructed", regionId); + } else if (configManager + .getPartitionManager() + .getAllReplicaSets(targetDataNode.getDataNodeId()) .stream() - .map(TDataNodeConfiguration::getLocation) - .map(TDataNodeLocation::getDataNodeId) - .collect(Collectors.toSet()) - .contains(migrateRegionReq.getToId())) { - // Here we only check Running DataNode to implement migration, because removing nodes may not - // exist when add peer is performing + .noneMatch(replicaSet -> replicaSet.getRegionId().equals(regionId))) { failMessage = String.format( - "Submit RegionMigrateProcedure failed, because the destDataNode %s is ReadOnly or Unknown.", - migrateRegionReq.getToId()); + "Submit ReconstructRegionProcedure failed, because the target DataNode %s doesn't contain Region %s", + req.getDataNodeId(), regionId); } + if (failMessage != null) { LOGGER.warn(failMessage); - TSStatus failStatus = new TSStatus(TSStatusCode.MIGRATE_REGION_ERROR.getStatusCode()); + TSStatus failStatus = new TSStatus(TSStatusCode.RECONSTRUCT_REGION_ERROR.getStatusCode()); failStatus.setMessage(failMessage); return failStatus; } return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } - public synchronized TSStatus migrateRegion(TMigrateRegionReq migrateRegionReq) { - TConsensusGroupId regionGroupId; - Optional optional = - configManager - .getPartitionManager() - .generateTConsensusGroupIdByRegionId(migrateRegionReq.getRegionId()); - if (optional.isPresent()) { - regionGroupId = optional.get(); - } else { - LOGGER.error("get region group id fail"); - return new TSStatus(TSStatusCode.MIGRATE_REGION_ERROR.getStatusCode()) - .setMessage("get region group id fail"); + private TSStatus checkExtendRegion( + TExtendRegionReq req, + TConsensusGroupId regionId, + TDataNodeLocation targetDataNode, + TDataNodeLocation coordinator) { + String failMessage = + regionOperationCommonCheck( + regionId, + targetDataNode, + Arrays.asList( + new Pair<>("Target DataNode", targetDataNode), + new Pair<>("Coordinator", coordinator))); + if (configManager + .getPartitionManager() + .getAllReplicaSets(targetDataNode.getDataNodeId()) + .stream() + .anyMatch(replicaSet -> replicaSet.getRegionId().equals(regionId))) { + failMessage = + String.format( + "Target DataNode %s already contains region %s", + targetDataNode.getDataNodeId(), req.getRegionId()); } - // find original dn and dest dn - final TDataNodeLocation originalDataNode = - configManager - .getNodeManager() - .getRegisteredDataNode(migrateRegionReq.getFromId()) - .getLocation(); - final TDataNodeLocation destDataNode = - configManager - .getNodeManager() - .getRegisteredDataNode(migrateRegionReq.getToId()) - .getLocation(); - // select coordinator for adding peer - RegionMaintainHandler handler = new RegionMaintainHandler(configManager); - // TODO: choose the DataNode which has lowest load - final TDataNodeLocation coordinatorForAddPeer = - handler - .filterDataNodeWithOtherRegionReplica( - regionGroupId, - destDataNode, - NodeStatus.Running, - NodeStatus.Removing, - NodeStatus.ReadOnly) - .orElse(null); - // Select coordinator for removing peer - // For now, destDataNode temporarily acts as the coordinatorForRemovePeer - final TDataNodeLocation coordinatorForRemovePeer = destDataNode; - - TSStatus status = - checkRegionMigrate( - migrateRegionReq, regionGroupId, originalDataNode, destDataNode, coordinatorForAddPeer); - if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - return status; + if (failMessage != null) { + LOGGER.warn(failMessage); + TSStatus failStatus = new TSStatus(TSStatusCode.RECONSTRUCT_REGION_ERROR.getStatusCode()); + failStatus.setMessage(failMessage); + return failStatus; } + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } - // finally, submit procedure - this.executor.submitProcedure( - new RegionMigrateProcedure( - regionGroupId, - originalDataNode, - destDataNode, - coordinatorForAddPeer, - coordinatorForRemovePeer)); - LOGGER.info( - "Submit RegionMigrateProcedure successfully, Region: {}, Origin DataNode: {}, Dest DataNode: {}, Add Coordinator: {}, Remove Coordinator: {}", - regionGroupId, - originalDataNode, - destDataNode, - coordinatorForAddPeer, - coordinatorForRemovePeer); + private TSStatus checkRemoveRegion( + TRemoveRegionReq req, + TConsensusGroupId regionId, + @Nullable TDataNodeLocation targetDataNode, + TDataNodeLocation coordinator) { + String failMessage = + regionOperationCommonCheck( + regionId, targetDataNode, Arrays.asList(new Pair<>("Coordinator", coordinator))); + + ConfigNodeConfig conf = ConfigNodeDescriptor.getInstance().getConf(); + if (configManager + .getPartitionManager() + .getAllReplicaSetsMap(regionId.getType()) + .get(regionId) + .getDataNodeLocationsSize() + == 1) { + failMessage = String.format("%s only has 1 replica, it cannot be removed", regionId); + } else if (targetDataNode != null + && configManager + .getPartitionManager() + .getAllReplicaSets(targetDataNode.getDataNodeId()) + .stream() + .noneMatch(replicaSet -> replicaSet.getRegionId().equals(regionId))) { + failMessage = + String.format( + "Target DataNode %s doesn't contain Region %s", req.getDataNodeId(), regionId); + } + + if (failMessage != null) { + LOGGER.warn(failMessage); + TSStatus failStatus = new TSStatus(TSStatusCode.REMOVE_REGION_PEER_ERROR.getStatusCode()); + failStatus.setMessage(failMessage); + return failStatus; + } return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } + /** + * The common checks of all region operations, include migration, reconstruction, extension, + * removing + * + * @param regionId region group id, also called consensus group id + * @param targetDataNode DataNode should in Running status + * @param relatedDataNodes Pair + * @return The reason if check failed, or null if check pass + */ + private String regionOperationCommonCheck( + TConsensusGroupId regionId, + TDataNodeLocation targetDataNode, + List> relatedDataNodes) { + String failMessage; + + ConfigNodeConfig conf = ConfigNodeDescriptor.getInstance().getConf(); + if (TConsensusGroupType.DataRegion == regionId.getType() + && ConsensusFactory.SIMPLE_CONSENSUS.equals(conf.getDataRegionConsensusProtocolClass())) { + failMessage = "SimpleConsensus not supports region operation."; + } else if (TConsensusGroupType.SchemaRegion == regionId.getType() + && ConsensusFactory.SIMPLE_CONSENSUS.equals(conf.getSchemaRegionConsensusProtocolClass())) { + failMessage = "SimpleConsensus not supports region operation."; + } else if ((failMessage = checkRegionOperationDuplication(regionId)) != null) { + // need to do nothing more + } else if (relatedDataNodes.stream().anyMatch(pair -> pair.getRight() == null)) { + Pair nullPair = + relatedDataNodes.stream().filter(pair -> pair.getRight() == null).findAny().get(); + failMessage = String.format("Cannot find %s", nullPair.getLeft()); + } else if (targetDataNode != null + && !configManager.getNodeManager().filterDataNodeThroughStatus(NodeStatus.Running).stream() + .map(TDataNodeConfiguration::getLocation) + .map(TDataNodeLocation::getDataNodeId) + .collect(Collectors.toSet()) + .contains(targetDataNode.getDataNodeId())) { + // Here we only check Running DataNode to implement migration, because removing nodes may not + // exist when add peer is performing + failMessage = + String.format( + "Target DataNode %s is not in Running status.", targetDataNode.getDataNodeId()); + } else if ((failMessage = checkRegionOperationWithRemoveDataNode(regionId, targetDataNode)) + != null) { + // need to do nothing more + } + + return failMessage; + } + + private String checkRegionOperationWithRemoveDataNode( + TConsensusGroupId regionId, TDataNodeLocation targetDataNode) { + Optional> conflictRemoveDataNodesProcedure = + getExecutor().getProcedures().values().stream() + .filter( + procedure -> { + if (procedure instanceof RemoveDataNodesProcedure) { + return !procedure.isFinished(); + } + return false; + }) + .findAny(); + + if (conflictRemoveDataNodesProcedure.isPresent()) { + RemoveDataNodeHandler removeDataNodeHandler = env.getRemoveDataNodeHandler(); + List removedDataNodes = + ((RemoveDataNodesProcedure) conflictRemoveDataNodesProcedure.get()).getRemovedDataNodes(); + Set removedDataNodesRegionSet = + removeDataNodeHandler.getRemovedDataNodesRegionSet(removedDataNodes); + if (removedDataNodesRegionSet.contains(regionId)) { + return String.format( + "Another RemoveDataNodesProcedure %s is already in processing which conflicts with this procedure. " + + "The RemoveDataNodesProcedure is removing the DataNodes %s which contains the region %s. " + + "For further information, please search [pid%d] in log. ", + conflictRemoveDataNodesProcedure.get().getProcId(), + removedDataNodes, + regionId, + conflictRemoveDataNodesProcedure.get().getProcId()); + } else if (removedDataNodes.contains(targetDataNode)) { + return String.format( + "Another RemoveDataNodesProcedure %s is already in processing which conflicts with this procedure. " + + "The RemoveDataNodesProcedure is removing the target DataNode %s. " + + "For further information, please search [pid%d] in log. ", + conflictRemoveDataNodesProcedure.get().getProcId(), + targetDataNode, + conflictRemoveDataNodesProcedure.get().getProcId()); + } + } + return null; + } + + private String checkRegionOperationDuplication(TConsensusGroupId regionId) { + List> otherRegionMemberChangeProcedures = + getRegionOperationProcedures() + .filter( + regionMemberChangeProcedure -> + regionId.equals(regionMemberChangeProcedure.getRegionId())) + .collect(Collectors.toList()); + if (!otherRegionMemberChangeProcedures.isEmpty()) { + return String.format( + "%s has some other region operation procedures in progress, their procedure id is: %s", + regionId, otherRegionMemberChangeProcedures); + } + return null; + } + + public List getRegionOperationConsensusIds() { + return getRegionOperationProcedures() + .map(RegionOperationProcedure::getRegionId) + .distinct() + .collect(Collectors.toList()); + } + + private Stream> getRegionOperationProcedures() { + return getExecutor().getProcedures().values().stream() + .filter(procedure -> !procedure.isFinished()) + .filter(procedure -> procedure instanceof RegionOperationProcedure) + .map(procedure -> (RegionOperationProcedure) procedure); + } + + // end region + + public TSStatus migrateRegion(TMigrateRegionReq migrateRegionReq) { + try (AutoCloseableLock ignoredLock = + AutoCloseableLock.acquire(env.getSubmitRegionMigrateLock())) { + TConsensusGroupId regionGroupId; + Optional optional = + configManager + .getPartitionManager() + .generateTConsensusGroupIdByRegionId(migrateRegionReq.getRegionId()); + if (optional.isPresent()) { + regionGroupId = optional.get(); + } else { + LOGGER.error("get region group id fail"); + return new TSStatus(TSStatusCode.MIGRATE_REGION_ERROR.getStatusCode()) + .setMessage("get region group id fail"); + } + + // find original dn and dest dn + final TDataNodeLocation originalDataNode = + configManager + .getNodeManager() + .getRegisteredDataNode(migrateRegionReq.getFromId()) + .getLocation(); + final TDataNodeLocation destDataNode = + configManager + .getNodeManager() + .getRegisteredDataNode(migrateRegionReq.getToId()) + .getLocation(); + // select coordinator for adding peer + RegionMaintainHandler handler = env.getRegionMaintainHandler(); + // TODO: choose the DataNode which has lowest load + final TDataNodeLocation coordinatorForAddPeer = + handler + .filterDataNodeWithOtherRegionReplica( + regionGroupId, + destDataNode, + NodeStatus.Running, + NodeStatus.Removing, + NodeStatus.ReadOnly) + .orElse(null); + // Select coordinator for removing peer + // For now, destDataNode temporarily acts as the coordinatorForRemovePeer + final TDataNodeLocation coordinatorForRemovePeer = destDataNode; + + TSStatus status = + checkMigrateRegion( + migrateRegionReq, + regionGroupId, + originalDataNode, + destDataNode, + coordinatorForAddPeer); + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; + } + + // finally, submit procedure + this.executor.submitProcedure( + new RegionMigrateProcedure( + regionGroupId, + originalDataNode, + destDataNode, + coordinatorForAddPeer, + coordinatorForRemovePeer)); + LOGGER.info( + "[MigrateRegion] Submit RegionMigrateProcedure successfully, Region: {}, Origin DataNode: {}, Dest DataNode: {}, Add Coordinator: {}, Remove Coordinator: {}", + regionGroupId, + originalDataNode, + destDataNode, + coordinatorForAddPeer, + coordinatorForRemovePeer); + + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } + } + + public TSStatus reconstructRegion(TReconstructRegionReq req) { + RegionMaintainHandler handler = env.getRegionMaintainHandler(); + final TDataNodeLocation targetDataNode = + configManager.getNodeManager().getRegisteredDataNode(req.getDataNodeId()).getLocation(); + try (AutoCloseableLock ignoredLock = + AutoCloseableLock.acquire(env.getSubmitRegionMigrateLock())) { + List procedures = new ArrayList<>(); + for (int x : req.getRegionIds()) { + TConsensusGroupId regionId = + configManager + .getPartitionManager() + .generateTConsensusGroupIdByRegionId(x) + .orElseThrow(() -> new IllegalArgumentException("Region id " + x + " is invalid")); + final TDataNodeLocation coordinator = + handler + .filterDataNodeWithOtherRegionReplica( + regionId, + targetDataNode, + NodeStatus.Running, + NodeStatus.Removing, + NodeStatus.ReadOnly) + .orElse(null); + TSStatus status = checkReconstructRegion(req, regionId, targetDataNode, coordinator); + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; + } + procedures.add(new ReconstructRegionProcedure(regionId, targetDataNode, coordinator)); + } + // all checks pass, submit all procedures + procedures.forEach( + reconstructRegionProcedure -> { + this.executor.submitProcedure(reconstructRegionProcedure); + LOGGER.info( + "[ReconstructRegion] Submit ReconstructRegionProcedure successfully, {}", + reconstructRegionProcedure); + }); + } + return RpcUtils.SUCCESS_STATUS; + } + + public TSStatus extendRegion(TExtendRegionReq req) { + try (AutoCloseableLock ignoredLock = + AutoCloseableLock.acquire(env.getSubmitRegionMigrateLock())) { + TConsensusGroupId regionId; + Optional optional = + configManager + .getPartitionManager() + .generateTConsensusGroupIdByRegionId(req.getRegionId()); + if (optional.isPresent()) { + regionId = optional.get(); + } else { + LOGGER.error("get region group id fail"); + return new TSStatus(TSStatusCode.EXTEND_REGION_ERROR.getStatusCode()) + .setMessage("get region group id fail"); + } + + // find target dn + final TDataNodeLocation targetDataNode = + configManager.getNodeManager().getRegisteredDataNode(req.getDataNodeId()).getLocation(); + // select coordinator for adding peer + RegionMaintainHandler handler = env.getRegionMaintainHandler(); + final TDataNodeLocation coordinator = + handler + .filterDataNodeWithOtherRegionReplica( + regionId, + targetDataNode, + NodeStatus.Running, + NodeStatus.Removing, + NodeStatus.ReadOnly) + .orElse(null); + // do the check + TSStatus status = checkExtendRegion(req, regionId, targetDataNode, coordinator); + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; + } + // submit procedure + AddRegionPeerProcedure procedure = + new AddRegionPeerProcedure(regionId, coordinator, targetDataNode); + this.executor.submitProcedure(procedure); + LOGGER.info("[ExtendRegion] Submit AddRegionPeerProcedure successfully: {}", procedure); + + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } + } + + public TSStatus removeRegion(TRemoveRegionReq req) { + try (AutoCloseableLock ignoredLock = + AutoCloseableLock.acquire(env.getSubmitRegionMigrateLock())) { + TConsensusGroupId regionId; + Optional optional = + configManager + .getPartitionManager() + .generateTConsensusGroupIdByRegionId(req.getRegionId()); + if (optional.isPresent()) { + regionId = optional.get(); + } else { + LOGGER.error("get region group id fail"); + return new TSStatus(TSStatusCode.REMOVE_REGION_PEER_ERROR.getStatusCode()) + .setMessage("get region group id fail"); + } + + // find target dn + final TDataNodeLocation targetDataNode = + configManager.getNodeManager().getRegisteredDataNode(req.getDataNodeId()).getLocation(); + + // select coordinator for removing peer + RegionMaintainHandler handler = env.getRegionMaintainHandler(); + final TDataNodeLocation coordinator = + handler + .filterDataNodeWithOtherRegionReplica( + regionId, + targetDataNode, + NodeStatus.Running, + NodeStatus.Removing, + NodeStatus.ReadOnly) + .orElse(null); + + // do the check + TSStatus status = checkRemoveRegion(req, regionId, targetDataNode, coordinator); + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; + } + + // SPECIAL CASE + if (targetDataNode == null) { + // If targetDataNode is null, it means the target DataNode does not exist in the + // NodeManager. + // In this case, simply clean up the partition table once and do nothing else. + LOGGER.warn( + "Remove region: Target DataNode {} not found, will simply clean up the partition table of region {} and do nothing else.", + req.getDataNodeId(), + req.getRegionId()); + this.executor + .getEnvironment() + .getRegionMaintainHandler() + .removeRegionLocation( + regionId, buildFakeDataNodeLocation(req.getDataNodeId(), "FakeIpForRemoveRegion")); + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } + + // submit procedure + RemoveRegionPeerProcedure procedure = + new RemoveRegionPeerProcedure(regionId, coordinator, targetDataNode); + this.executor.submitProcedure(procedure); + LOGGER.info( + "[RemoveRegionPeer] Submit RemoveRegionPeerProcedure successfully: {}", procedure); + + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } + } + + private static TDataNodeLocation buildFakeDataNodeLocation(int dataNodeId, String message) { + TEndPoint fakeEndPoint = new TEndPoint(message, -1); + return new TDataNodeLocation( + dataNodeId, fakeEndPoint, fakeEndPoint, fakeEndPoint, fakeEndPoint, fakeEndPoint); + } + // endregion /** @@ -722,17 +1175,15 @@ public synchronized TSStatus migrateRegion(TMigrateRegionReq migrateRegionReq) { */ public TSStatus createRegionGroups( TConsensusGroupType consensusGroupType, CreateRegionGroupsPlan createRegionGroupsPlan) { - final long procedureId = - executor.submitProcedure( - new CreateRegionGroupsProcedure(consensusGroupType, createRegionGroupsPlan)); - final List statusList = new ArrayList<>(); - final boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), statusList); - if (isSucceed) { - return RpcUtils.SUCCESS_STATUS; + CreateRegionGroupsProcedure procedure = + new CreateRegionGroupsProcedure(consensusGroupType, createRegionGroupsPlan); + executor.submitProcedure(procedure); + TSStatus status = waitingProcedureFinished(procedure); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; } else { return new TSStatus(TSStatusCode.CREATE_REGION_ERROR.getStatusCode()) - .setMessage(statusList.get(0).getMessage()); + .setMessage(status.getMessage()); } } @@ -760,15 +1211,13 @@ && new UpdateProcedurePlan(createTriggerProcedure).getSerializedSize() > planSiz .setMessage(e.getMessage()); } - final long procedureId = executor.submitProcedure(createTriggerProcedure); - final List statusList = new ArrayList<>(); - final boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), statusList); - if (isSucceed) { - return RpcUtils.SUCCESS_STATUS; + executor.submitProcedure(createTriggerProcedure); + TSStatus status = waitingProcedureFinished(createTriggerProcedure); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; } else { return new TSStatus(TSStatusCode.CREATE_TRIGGER_ERROR.getStatusCode()) - .setMessage(statusList.get(0).getMessage()); + .setMessage(status.getMessage()); } } @@ -779,25 +1228,39 @@ && new UpdateProcedurePlan(createTriggerProcedure).getSerializedSize() > planSiz * {@link TSStatusCode#DROP_TRIGGER_ERROR} otherwise */ public TSStatus dropTrigger(String triggerName, boolean isGeneratedByPipe) { - long procedureId = - executor.submitProcedure(new DropTriggerProcedure(triggerName, isGeneratedByPipe)); - final List statusList = new ArrayList<>(); - final boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), statusList); - if (isSucceed) { - return RpcUtils.SUCCESS_STATUS; + DropTriggerProcedure procedure = new DropTriggerProcedure(triggerName, isGeneratedByPipe); + executor.submitProcedure(procedure); + TSStatus status = waitingProcedureFinished(procedure); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; } else { return new TSStatus(TSStatusCode.DROP_TRIGGER_ERROR.getStatusCode()) - .setMessage(statusList.get(0).getMessage()); + .setMessage(status.getMessage()); } } public TSStatus createCQ(TCreateCQReq req, ScheduledExecutorService scheduledExecutor) { - final long procedureId = - executor.submitProcedure(new CreateCQProcedure(req, scheduledExecutor)); - final List statusList = new ArrayList<>(); - waitingProcedureFinished(Collections.singletonList(procedureId), statusList); - return statusList.get(0); + CreateCQProcedure procedure = new CreateCQProcedure(req, scheduledExecutor); + executor.submitProcedure(procedure); + return waitingProcedureFinished(procedure); + } + + public TSStatus createModel(String modelName, String uri) { + long procedureId = executor.submitProcedure(new CreateModelProcedure(modelName, uri)); + LOGGER.info("CreateModelProcedure was submitted, procedureId: {}.", procedureId); + return RpcUtils.SUCCESS_STATUS; + } + + public TSStatus dropModel(String modelId) { + DropModelProcedure procedure = new DropModelProcedure(modelId); + executor.submitProcedure(procedure); + TSStatus status = waitingProcedureFinished(procedure); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; + } else { + return new TSStatus(TSStatusCode.DROP_MODEL_ERROR.getStatusCode()) + .setMessage(status.getMessage()); + } } public TSStatus createPipePlugin( @@ -819,45 +1282,40 @@ && new UpdateProcedurePlan(createPipePluginProcedure).getSerializedSize() .setMessage(e.getMessage()); } - final long procedureId = executor.submitProcedure(createPipePluginProcedure); - final List statusList = new ArrayList<>(); - final boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), statusList); - if (isSucceed) { - return RpcUtils.SUCCESS_STATUS; + executor.submitProcedure(createPipePluginProcedure); + TSStatus status = waitingProcedureFinished(createPipePluginProcedure); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; } else { return new TSStatus(TSStatusCode.CREATE_PIPE_PLUGIN_ERROR.getStatusCode()) - .setMessage(statusList.get(0).getMessage()); + .setMessage(status.getMessage()); } } public TSStatus dropPipePlugin(TDropPipePluginReq req) { - final long procedureId = - executor.submitProcedure( - new DropPipePluginProcedure( - req.getPluginName(), req.isSetIfExistsCondition() && req.isIfExistsCondition())); - final List statusList = new ArrayList<>(); - final boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), statusList); - if (isSucceed) { - return RpcUtils.SUCCESS_STATUS; + DropPipePluginProcedure procedure = + new DropPipePluginProcedure( + req.getPluginName(), req.isSetIfExistsCondition() && req.isIfExistsCondition()); + executor.submitProcedure(procedure); + TSStatus status = waitingProcedureFinished(procedure); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; } else { return new TSStatus(TSStatusCode.DROP_PIPE_PLUGIN_ERROR.getStatusCode()) - .setMessage(statusList.get(0).getMessage()); + .setMessage(status.getMessage()); } } public TSStatus createPipe(TCreatePipeReq req) { try { - final long procedureId = executor.submitProcedure(new CreatePipeProcedureV2(req)); - final List statusList = new ArrayList<>(); - final boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), statusList); - if (isSucceed) { - return statusList.get(0); + CreatePipeProcedureV2 procedure = new CreatePipeProcedureV2(req); + executor.submitProcedure(procedure); + TSStatus status = waitingProcedureFinished(procedure); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; } else { return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode()) - .setMessage(wrapTimeoutMessageForPipeProcedure(statusList.get(0).getMessage())); + .setMessage(wrapTimeoutMessageForPipeProcedure(status.getMessage())); } } catch (Exception e) { return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode()).setMessage(e.getMessage()); @@ -866,15 +1324,14 @@ public TSStatus createPipe(TCreatePipeReq req) { public TSStatus alterPipe(TAlterPipeReq req) { try { - final long procedureId = executor.submitProcedure(new AlterPipeProcedureV2(req)); - final List statusList = new ArrayList<>(); - final boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), statusList); - if (isSucceed) { - return statusList.get(0); + AlterPipeProcedureV2 procedure = new AlterPipeProcedureV2(req); + executor.submitProcedure(procedure); + TSStatus status = waitingProcedureFinished(procedure); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; } else { return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode()) - .setMessage(wrapTimeoutMessageForPipeProcedure(statusList.get(0).getMessage())); + .setMessage(wrapTimeoutMessageForPipeProcedure(status.getMessage())); } } catch (Exception e) { return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode()).setMessage(e.getMessage()); @@ -883,15 +1340,14 @@ public TSStatus alterPipe(TAlterPipeReq req) { public TSStatus startPipe(String pipeName) { try { - final long procedureId = executor.submitProcedure(new StartPipeProcedureV2(pipeName)); - final List statusList = new ArrayList<>(); - final boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), statusList); - if (isSucceed) { - return statusList.get(0); + StartPipeProcedureV2 procedure = new StartPipeProcedureV2(pipeName); + executor.submitProcedure(procedure); + TSStatus status = waitingProcedureFinished(procedure); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; } else { return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode()) - .setMessage(wrapTimeoutMessageForPipeProcedure(statusList.get(0).getMessage())); + .setMessage(wrapTimeoutMessageForPipeProcedure(status.getMessage())); } } catch (Exception e) { return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode()).setMessage(e.getMessage()); @@ -900,15 +1356,14 @@ public TSStatus startPipe(String pipeName) { public TSStatus stopPipe(String pipeName) { try { - final long procedureId = executor.submitProcedure(new StopPipeProcedureV2(pipeName)); - final List statusList = new ArrayList<>(); - final boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), statusList); - if (isSucceed) { - return statusList.get(0); + StopPipeProcedureV2 procedure = new StopPipeProcedureV2(pipeName); + executor.submitProcedure(procedure); + TSStatus status = waitingProcedureFinished(procedure); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; } else { return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode()) - .setMessage(wrapTimeoutMessageForPipeProcedure(statusList.get(0).getMessage())); + .setMessage(wrapTimeoutMessageForPipeProcedure(status.getMessage())); } } catch (Exception e) { return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode()).setMessage(e.getMessage()); @@ -917,15 +1372,14 @@ public TSStatus stopPipe(String pipeName) { public TSStatus dropPipe(String pipeName) { try { - final long procedureId = executor.submitProcedure(new DropPipeProcedureV2(pipeName)); - final List statusList = new ArrayList<>(); - final boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), statusList); - if (isSucceed) { - return statusList.get(0); + DropPipeProcedureV2 procedure = new DropPipeProcedureV2(pipeName); + executor.submitProcedure(procedure); + TSStatus status = waitingProcedureFinished(procedure); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; } else { return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode()) - .setMessage(wrapTimeoutMessageForPipeProcedure(statusList.get(0).getMessage())); + .setMessage(wrapTimeoutMessageForPipeProcedure(status.getMessage())); } } catch (Exception e) { return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode()).setMessage(e.getMessage()); @@ -960,18 +1414,16 @@ public void pipeHandleMetaChange( public TSStatus pipeHandleMetaChangeWithBlock( boolean needWriteConsensusOnConfigNodes, boolean needPushPipeMetaToDataNodes) { try { - final long procedureId = - executor.submitProcedure( - new PipeHandleMetaChangeProcedure( - needWriteConsensusOnConfigNodes, needPushPipeMetaToDataNodes)); - final List statusList = new ArrayList<>(); - final boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), statusList); - if (isSucceed) { - return RpcUtils.SUCCESS_STATUS; + PipeHandleMetaChangeProcedure procedure = + new PipeHandleMetaChangeProcedure( + needWriteConsensusOnConfigNodes, needPushPipeMetaToDataNodes); + executor.submitProcedure(procedure); + TSStatus status = waitingProcedureFinished(procedure); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; } else { return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode()) - .setMessage(wrapTimeoutMessageForPipeProcedure(statusList.get(0).getMessage())); + .setMessage(wrapTimeoutMessageForPipeProcedure(status.getMessage())); } } catch (Exception e) { return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode()).setMessage(e.getMessage()); @@ -980,15 +1432,14 @@ public TSStatus pipeHandleMetaChangeWithBlock( public TSStatus pipeMetaSync() { try { - final long procedureId = executor.submitProcedure(new PipeMetaSyncProcedure()); - final List statusList = new ArrayList<>(); - final boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), statusList); - if (isSucceed) { - return RpcUtils.SUCCESS_STATUS; + PipeMetaSyncProcedure procedure = new PipeMetaSyncProcedure(); + executor.submitProcedure(procedure); + TSStatus status = waitingProcedureFinished(procedure); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; } else { return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode()) - .setMessage(wrapTimeoutMessageForPipeProcedure(statusList.get(0).getMessage())); + .setMessage(wrapTimeoutMessageForPipeProcedure(status.getMessage())); } } catch (Exception e) { return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode()).setMessage(e.getMessage()); @@ -997,15 +1448,14 @@ public TSStatus pipeMetaSync() { public TSStatus createTopic(TCreateTopicReq req) { try { - final long procedureId = executor.submitProcedure(new CreateTopicProcedure(req)); - final List statusList = new ArrayList<>(); - final boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), statusList); - if (isSucceed) { - return statusList.get(0); + CreateTopicProcedure procedure = new CreateTopicProcedure(req); + executor.submitProcedure(procedure); + TSStatus status = waitingProcedureFinished(procedure); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; } else { return new TSStatus(TSStatusCode.CREATE_TOPIC_ERROR.getStatusCode()) - .setMessage(wrapTimeoutMessageForPipeProcedure(statusList.get(0).getMessage())); + .setMessage(wrapTimeoutMessageForPipeProcedure(status.getMessage())); } } catch (Exception e) { return new TSStatus(TSStatusCode.CREATE_TOPIC_ERROR.getStatusCode()) @@ -1015,15 +1465,14 @@ public TSStatus createTopic(TCreateTopicReq req) { public TSStatus dropTopic(String topicName) { try { - final long procedureId = executor.submitProcedure(new DropTopicProcedure(topicName)); - final List statusList = new ArrayList<>(); - final boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), statusList); - if (isSucceed) { - return statusList.get(0); + DropTopicProcedure procedure = new DropTopicProcedure(topicName); + executor.submitProcedure(procedure); + TSStatus status = waitingProcedureFinished(procedure); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; } else { return new TSStatus(TSStatusCode.DROP_TOPIC_ERROR.getStatusCode()) - .setMessage(wrapTimeoutMessageForPipeProcedure(statusList.get(0).getMessage())); + .setMessage(wrapTimeoutMessageForPipeProcedure(status.getMessage())); } } catch (Exception e) { return new TSStatus(TSStatusCode.DROP_TOPIC_ERROR.getStatusCode()).setMessage(e.getMessage()); @@ -1032,15 +1481,14 @@ public TSStatus dropTopic(String topicName) { public TSStatus topicMetaSync() { try { - final long procedureId = executor.submitProcedure(new TopicMetaSyncProcedure()); - final List statusList = new ArrayList<>(); - final boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), statusList); - if (isSucceed) { - return RpcUtils.SUCCESS_STATUS; + TopicMetaSyncProcedure procedure = new TopicMetaSyncProcedure(); + executor.submitProcedure(procedure); + TSStatus status = waitingProcedureFinished(procedure); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; } else { return new TSStatus(TSStatusCode.TOPIC_PUSH_META_ERROR.getStatusCode()) - .setMessage(wrapTimeoutMessageForPipeProcedure(statusList.get(0).getMessage())); + .setMessage(wrapTimeoutMessageForPipeProcedure(status.getMessage())); } } catch (Exception e) { return new TSStatus(TSStatusCode.TOPIC_PUSH_META_ERROR.getStatusCode()) @@ -1050,15 +1498,14 @@ public TSStatus topicMetaSync() { public TSStatus createConsumer(TCreateConsumerReq req) { try { - final long procedureId = executor.submitProcedure(new CreateConsumerProcedure(req)); - final List statusList = new ArrayList<>(); - final boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), statusList); - if (isSucceed) { - return statusList.get(0); + CreateConsumerProcedure procedure = new CreateConsumerProcedure(req); + executor.submitProcedure(procedure); + TSStatus status = waitingProcedureFinished(procedure); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; } else { return new TSStatus(TSStatusCode.CREATE_CONSUMER_ERROR.getStatusCode()) - .setMessage(wrapTimeoutMessageForPipeProcedure(statusList.get(0).getMessage())); + .setMessage(wrapTimeoutMessageForPipeProcedure(status.getMessage())); } } catch (Exception e) { return new TSStatus(TSStatusCode.CREATE_CONSUMER_ERROR.getStatusCode()) @@ -1068,15 +1515,14 @@ public TSStatus createConsumer(TCreateConsumerReq req) { public TSStatus dropConsumer(TCloseConsumerReq req) { try { - final long procedureId = executor.submitProcedure(new DropConsumerProcedure(req)); - final List statusList = new ArrayList<>(); - final boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), statusList); - if (isSucceed) { - return statusList.get(0); + DropConsumerProcedure procedure = new DropConsumerProcedure(req); + executor.submitProcedure(procedure); + TSStatus status = waitingProcedureFinished(procedure); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; } else { return new TSStatus(TSStatusCode.DROP_CONSUMER_ERROR.getStatusCode()) - .setMessage(wrapTimeoutMessageForPipeProcedure(statusList.get(0).getMessage())); + .setMessage(wrapTimeoutMessageForPipeProcedure(status.getMessage())); } } catch (Exception e) { return new TSStatus(TSStatusCode.DROP_CONSUMER_ERROR.getStatusCode()) @@ -1086,15 +1532,14 @@ public TSStatus dropConsumer(TCloseConsumerReq req) { public TSStatus consumerGroupMetaSync() { try { - final long procedureId = executor.submitProcedure(new ConsumerGroupMetaSyncProcedure()); - final List statusList = new ArrayList<>(); - final boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), statusList); - if (isSucceed) { - return RpcUtils.SUCCESS_STATUS; + ConsumerGroupMetaSyncProcedure procedure = new ConsumerGroupMetaSyncProcedure(); + executor.submitProcedure(procedure); + TSStatus status = waitingProcedureFinished(procedure); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; } else { return new TSStatus(TSStatusCode.CONSUMER_PUSH_META_ERROR.getStatusCode()) - .setMessage(wrapTimeoutMessageForPipeProcedure(statusList.get(0).getMessage())); + .setMessage(wrapTimeoutMessageForPipeProcedure(status.getMessage())); } } catch (Exception e) { return new TSStatus(TSStatusCode.CONSUMER_PUSH_META_ERROR.getStatusCode()) @@ -1104,15 +1549,18 @@ public TSStatus consumerGroupMetaSync() { public TSStatus createSubscription(TSubscribeReq req) { try { - final long procedureId = executor.submitProcedure(new CreateSubscriptionProcedure(req)); - final List statusList = new ArrayList<>(); - final boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), statusList); - if (isSucceed) { - return statusList.get(0); + CreateSubscriptionProcedure procedure = new CreateSubscriptionProcedure(req); + executor.submitProcedure(procedure); + TSStatus status = waitingProcedureFinished(procedure); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; + } else if (PROCEDURE_TIMEOUT_MESSAGE.equals(status.getMessage())) { + // we assume that a timeout has occurred in the procedure related to the pipe in the + // subscription procedure + return new TSStatus(TSStatusCode.SUBSCRIPTION_PIPE_TIMEOUT_ERROR.getStatusCode()) + .setMessage(wrapTimeoutMessageForPipeProcedure(status.getMessage())); } else { - return new TSStatus(TSStatusCode.SUBSCRIPTION_SUBSCRIBE_ERROR.getStatusCode()) - .setMessage(wrapTimeoutMessageForPipeProcedure(statusList.get(0).getMessage())); + return new TSStatus(TSStatusCode.SUBSCRIPTION_SUBSCRIBE_ERROR.getStatusCode()); } } catch (Exception e) { return new TSStatus(TSStatusCode.SUBSCRIPTION_SUBSCRIBE_ERROR.getStatusCode()) @@ -1122,15 +1570,18 @@ public TSStatus createSubscription(TSubscribeReq req) { public TSStatus dropSubscription(TUnsubscribeReq req) { try { - final long procedureId = executor.submitProcedure(new DropSubscriptionProcedure(req)); - final List statusList = new ArrayList<>(); - final boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), statusList); - if (isSucceed) { - return statusList.get(0); + DropSubscriptionProcedure procedure = new DropSubscriptionProcedure(req); + executor.submitProcedure(procedure); + TSStatus status = waitingProcedureFinished(procedure); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; + } else if (PROCEDURE_TIMEOUT_MESSAGE.equals(status.getMessage())) { + // we assume that a timeout has occurred in the procedure related to the pipe in the + // subscription procedure + return new TSStatus(TSStatusCode.SUBSCRIPTION_PIPE_TIMEOUT_ERROR.getStatusCode()) + .setMessage(wrapTimeoutMessageForPipeProcedure(status.getMessage())); } else { - return new TSStatus(TSStatusCode.SUBSCRIPTION_UNSUBSCRIBE_ERROR.getStatusCode()) - .setMessage(wrapTimeoutMessageForPipeProcedure(statusList.get(0).getMessage())); + return new TSStatus(TSStatusCode.SUBSCRIPTION_UNSUBSCRIBE_ERROR.getStatusCode()); } } catch (Exception e) { return new TSStatus(TSStatusCode.SUBSCRIPTION_UNSUBSCRIBE_ERROR.getStatusCode()) @@ -1141,16 +1592,10 @@ public TSStatus dropSubscription(TUnsubscribeReq req) { public TSStatus operateAuthPlan( AuthorPlan authorPlan, List dns, boolean isGeneratedByPipe) { try { - final long procedureId = - executor.submitProcedure(new AuthOperationProcedure(authorPlan, dns, isGeneratedByPipe)); - final List statusList = new ArrayList<>(); - final boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), statusList); - if (isSucceed) { - return RpcUtils.SUCCESS_STATUS; - } else { - return new TSStatus(statusList.get(0).getCode()).setMessage(statusList.get(0).getMessage()); - } + AuthOperationProcedure procedure = + new AuthOperationProcedure(authorPlan, dns, isGeneratedByPipe); + executor.submitProcedure(procedure); + return waitingProcedureFinished(procedure); } catch (Exception e) { return new TSStatus(TSStatusCode.AUTH_OPERATE_EXCEPTION.getStatusCode()) .setMessage(e.getMessage()); @@ -1158,71 +1603,60 @@ public TSStatus operateAuthPlan( } public TSStatus setTTL(SetTTLPlan setTTLPlan, final boolean isGeneratedByPipe) { - long procedureId = executor.submitProcedure(new SetTTLProcedure(setTTLPlan, isGeneratedByPipe)); - - List procedureStatus = new ArrayList<>(); - boolean isSucceed = - waitingProcedureFinished(Collections.singletonList(procedureId), procedureStatus); - if (isSucceed) { - return RpcUtils.SUCCESS_STATUS; - } else { - return procedureStatus.get(0); - } + SetTTLProcedure procedure = new SetTTLProcedure(setTTLPlan, isGeneratedByPipe); + executor.submitProcedure(procedure); + return waitingProcedureFinished(procedure); } /** - * Waiting until the specific procedures finished. + * Waiting until the specific procedure finished. * - * @param procedureIds The specific procedures' index - * @param statusList The corresponding running results of these procedures - * @return True if all Procedures finished successfully, false otherwise + * @param procedure The specific procedure + * @return TSStatus the running result of this procedure */ - private boolean waitingProcedureFinished(List procedureIds, List statusList) { - boolean isSucceed = true; - for (long procedureId : procedureIds) { - final long startTimeForCurrentProcedure = System.currentTimeMillis(); - while (executor.isRunning() - && !executor.isFinished(procedureId) - && System.currentTimeMillis() - startTimeForCurrentProcedure < PROCEDURE_WAIT_TIME_OUT) { - sleepWithoutInterrupt(PROCEDURE_WAIT_RETRY_TIMEOUT); - } - final Procedure finishedProcedure = - executor.getResultOrProcedure(procedureId); - if (!finishedProcedure.isFinished()) { - // The procedure is still executing - statusList.add( - RpcUtils.getStatus(TSStatusCode.OVERLAP_WITH_EXISTING_TASK, PROCEDURE_TIMEOUT_MESSAGE)); - isSucceed = false; - continue; - } - if (finishedProcedure.isSuccess()) { - if (Objects.nonNull(finishedProcedure.getResult())) { - statusList.add( + private TSStatus waitingProcedureFinished(Procedure procedure) { + if (procedure == null) { + LOGGER.error("Unexpected null procedure parameters for waitingProcedureFinished"); + return RpcUtils.getStatus(TSStatusCode.INTERNAL_SERVER_ERROR); + } + TSStatus status; + final long startTimeForCurrentProcedure = System.currentTimeMillis(); + while (executor.isRunning() + && !executor.isFinished(procedure.getProcId()) + && System.currentTimeMillis() - startTimeForCurrentProcedure < PROCEDURE_WAIT_TIME_OUT) { + sleepWithoutInterrupt(PROCEDURE_WAIT_RETRY_TIMEOUT); + } + if (!procedure.isFinished()) { + // The procedure is still executing + status = + RpcUtils.getStatus(TSStatusCode.OVERLAP_WITH_EXISTING_TASK, PROCEDURE_TIMEOUT_MESSAGE); + } else { + if (procedure.isSuccess()) { + if (procedure.getResult() != null) { + status = RpcUtils.getStatus( - TSStatusCode.SUCCESS_STATUS, Arrays.toString(finishedProcedure.getResult()))); + TSStatusCode.SUCCESS_STATUS, Arrays.toString(procedure.getResult())); } else { - statusList.add(StatusUtils.OK); + status = StatusUtils.OK; } } else { - if (finishedProcedure.getException().getCause() instanceof IoTDBException) { - final IoTDBException e = (IoTDBException) finishedProcedure.getException().getCause(); + if (procedure.getException().getCause() instanceof IoTDBException) { + final IoTDBException e = (IoTDBException) procedure.getException().getCause(); if (e instanceof BatchProcessException) { - statusList.add( + status = RpcUtils.getStatus( Arrays.stream(((BatchProcessException) e).getFailingStatus()) - .collect(Collectors.toList()))); + .collect(Collectors.toList())); } else { - statusList.add(RpcUtils.getStatus(e.getErrorCode(), e.getMessage())); + status = RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); } } else { - statusList.add( - StatusUtils.EXECUTE_STATEMENT_ERROR.setMessage( - finishedProcedure.getException().getMessage())); + status = + StatusUtils.EXECUTE_STATEMENT_ERROR.setMessage(procedure.getException().getMessage()); } - isSucceed = false; } } - return isSucceed; + return status; } private static String wrapTimeoutMessageForPipeProcedure(String message) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/TTLManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/TTLManager.java index fc390314e397e..b0c4dd5f2969c 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/TTLManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/TTLManager.java @@ -22,6 +22,7 @@ import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.path.PartialPath; +import org.apache.iotdb.commons.utils.CommonDateTimeUtils; import org.apache.iotdb.commons.utils.PathUtils; import org.apache.iotdb.confignode.consensus.request.read.ttl.ShowTTLPlan; import org.apache.iotdb.confignode.consensus.request.write.database.DatabaseSchemaPlan; @@ -128,6 +129,21 @@ public int getTTLCount() { return ttlInfo.getTTLCount(); } + /** + * Get the maximum ttl of the subtree of the corresponding database. + * + * @param database the path of the database. + * @return the maximum ttl of the subtree of the corresponding database. return NULL_TTL if the + * TTL is not set or the database does not exist. + */ + public long getDatabaseMaxTTL(final String database) { + final long ttl = ttlInfo.getDatabaseMaxTTL(database); + return ttl == Long.MAX_VALUE || ttl < 0 + ? ttl + : CommonDateTimeUtils.convertMilliTimeWithPrecision( + ttl, CommonDescriptor.getInstance().getConfig().getTimestampPrecision()); + } + /** Only used for upgrading from old database-level ttl to device-level ttl. */ public void setTTL(Map databaseTTLMap) throws IllegalPathException { ttlInfo.setTTL(databaseTTLMap); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/TriggerManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/TriggerManager.java index b1a96d029af2a..5f64c4963125f 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/TriggerManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/TriggerManager.java @@ -24,7 +24,7 @@ import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PathDeserializeUtil; import org.apache.iotdb.commons.trigger.TriggerInformation; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; import org.apache.iotdb.confignode.consensus.request.read.trigger.GetTransferringTriggersPlan; @@ -250,7 +250,7 @@ public List updateTriggerLocation( DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.UPDATE_TRIGGER_LOCATION, request, dataNodeLocationMap); + CnToDnAsyncRequestType.UPDATE_TRIGGER_LOCATION, request, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); return clientHandler.getResponseList(); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/UDFManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/UDFManager.java index ad86879d8280f..00ed020a14e24 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/UDFManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/UDFManager.java @@ -23,7 +23,7 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.conf.IoTDBConstant; import org.apache.iotdb.commons.udf.UDFInformation; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; @@ -129,7 +129,7 @@ private List createFunctionOnDataNodes(UDFInformation udfInformation, new TCreateFunctionInstanceReq(udfInformation.serialize()).setJarFile(jarFile); DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.CREATE_FUNCTION, req, dataNodeLocationMap); + CnToDnAsyncRequestType.CREATE_FUNCTION, req, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); return clientHandler.getResponseList(); } @@ -163,7 +163,7 @@ private List dropFunctionOnDataNodes(String functionName) { DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.DROP_FUNCTION, request, dataNodeLocationMap); + CnToDnAsyncRequestType.DROP_FUNCTION, request, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); return clientHandler.getResponseList(); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/consensus/ConsensusManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/consensus/ConsensusManager.java index 6133089d5a178..e5e3473eedf2a 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/consensus/ConsensusManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/consensus/ConsensusManager.java @@ -31,6 +31,7 @@ import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; import org.apache.iotdb.confignode.conf.SystemPropertiesUtils; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; import org.apache.iotdb.confignode.consensus.statemachine.ConfigRegionStateMachine; import org.apache.iotdb.confignode.exception.AddPeerException; import org.apache.iotdb.confignode.manager.IManager; @@ -67,7 +68,7 @@ public class ConsensusManager { private static final CommonConfig COMMON_CONF = CommonDescriptor.getInstance().getConfig(); private static final int SEED_CONFIG_NODE_ID = 0; private static final long MAX_WAIT_READY_TIME_MS = - CommonDescriptor.getInstance().getConfig().getConnectionTimeoutInMS() / 2; + CommonDescriptor.getInstance().getConfig().getCnConnectionTimeoutInMS() / 2; private static final long RETRY_WAIT_TIME_MS = 100; /** There is only one ConfigNodeGroup */ @@ -217,14 +218,14 @@ private void setConsensusLayer(ConfigRegionStateMachine stateMachine) { CONF.getConfigNodeRatisPeriodicSnapshotInterval()) .setRetryTimesMax(10) .setRetryWaitMillis( - COMMON_CONF.getConnectionTimeoutInMS() / 10) + COMMON_CONF.getCnConnectionTimeoutInMS() / 10) .build()) .setRead( RatisConfig.Read.newBuilder() // use thrift connection timeout to unify read timeout .setReadTimeout( TimeDuration.valueOf( - COMMON_CONF.getConnectionTimeoutInMS(), + COMMON_CONF.getCnConnectionTimeoutInMS(), TimeUnit.MILLISECONDS)) .build()) .build()) @@ -332,7 +333,7 @@ public TSStatus write(ConfigPhysicalPlan plan) throws ConsensusException { * * @throws ConsensusException When read doesn't success */ - public DataSet read(ConfigPhysicalPlan plan) throws ConsensusException { + public DataSet read(final ConfigPhysicalReadPlan plan) throws ConsensusException { return consensusImpl.read(DEFAULT_CONSENSUS_GROUP_ID, plan); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/cq/CQManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/cq/CQManager.java index ec27ec8cc92e6..5726b3ce82698 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/cq/CQManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/cq/CQManager.java @@ -25,8 +25,8 @@ import org.apache.iotdb.commons.cq.CQState; import org.apache.iotdb.confignode.conf.ConfigNodeConfig; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; +import org.apache.iotdb.confignode.consensus.request.read.cq.ShowCQPlan; import org.apache.iotdb.confignode.consensus.request.write.cq.DropCQPlan; -import org.apache.iotdb.confignode.consensus.request.write.cq.ShowCQPlan; import org.apache.iotdb.confignode.consensus.response.cq.ShowCQResp; import org.apache.iotdb.confignode.manager.ConfigManager; import org.apache.iotdb.confignode.persistence.cq.CQInfo; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/LoadManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/LoadManager.java index 3260c3679d176..c2214fa2d11ce 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/LoadManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/LoadManager.java @@ -219,15 +219,6 @@ public double getFreeDiskSpace(int dataNodeId) { return loadCache.getFreeDiskSpace(dataNodeId); } - /** - * Get the loadScore of each DataNode. - * - * @return Map - */ - public Map getAllDataNodeLoadScores() { - return loadCache.getAllDataNodeLoadScores(); - } - /** * Get the lowest loadScore DataNode. * @@ -262,11 +253,15 @@ public void forceUpdateNodeCache( loadCache.cacheConfigNodeHeartbeatSample(nodeId, heartbeatSample); break; case DataNode: - default: loadCache.cacheDataNodeHeartbeatSample(nodeId, heartbeatSample); break; + case AINode: + loadCache.cacheAINodeHeartbeatSample(nodeId, heartbeatSample); + break; + default: + break; } - loadCache.updateNodeStatistics(); + loadCache.updateNodeStatistics(true); eventService.checkAndBroadcastNodeStatisticsChangeEventIfNecessary(); } @@ -278,7 +273,7 @@ public void forceUpdateNodeCache( */ public void removeNodeCache(int nodeId) { loadCache.removeNodeCache(nodeId); - loadCache.updateNodeStatistics(); + loadCache.updateNodeStatistics(true); eventService.checkAndBroadcastNodeStatisticsChangeEventIfNecessary(); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RouteBalancer.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RouteBalancer.java index 94e1e5c0eb66f..e2f1c6758ccb8 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RouteBalancer.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RouteBalancer.java @@ -22,10 +22,11 @@ import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; import org.apache.iotdb.common.rpc.thrift.TDataNodeConfiguration; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TFlushReq; import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.cluster.NodeStatus; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; import org.apache.iotdb.confignode.conf.ConfigNodeConfig; @@ -56,14 +57,19 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.TreeMap; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.BiConsumer; import java.util.stream.Collectors; /** The RouteBalancer guides the cluster RegionGroups' leader distribution and routing priority. */ @@ -80,8 +86,6 @@ public class RouteBalancer implements IClusterStatusSubscriber { && ConsensusFactory.RATIS_CONSENSUS.equals(DATA_REGION_CONSENSUS_PROTOCOL_CLASS)) || (CONF.isEnableAutoLeaderBalanceForIoTConsensus() && ConsensusFactory.IOT_CONSENSUS.equals(DATA_REGION_CONSENSUS_PROTOCOL_CLASS)) - || (CONF.isEnableAutoLeaderBalanceForIoTConsensus() - && ConsensusFactory.FAST_IOT_CONSENSUS.equals(DATA_REGION_CONSENSUS_PROTOCOL_CLASS)) || (CONF.isEnableAutoLeaderBalanceForIoTConsensus() && ConsensusFactory.IOT_CONSENSUS_V2.equals(DATA_REGION_CONSENSUS_PROTOCOL_CLASS)) // The simple consensus protocol will always automatically designate itself as the leader @@ -115,11 +119,16 @@ public class RouteBalancer implements IClusterStatusSubscriber { private static final long BALANCE_RATIS_LEADER_FAILED_INTERVAL_IN_NS = 20 * 1000L * 1000L * 1000L; private final Map lastFailedTimeForLeaderBalance; + private final Map> lastBalancedOldLeaderId2RegionMap; + private Map lastDataRegion2OldLeaderMap; + private Set lastBalancedDataRegionSet; + public RouteBalancer(IManager configManager) { this.configManager = configManager; this.priorityMapLock = new ReentrantReadWriteLock(); this.regionPriorityMap = new TreeMap<>(); this.lastFailedTimeForLeaderBalance = new TreeMap<>(); + this.lastBalancedOldLeaderId2RegionMap = new ConcurrentHashMap<>(); switch (CONF.getLeaderDistributionPolicy()) { case AbstractLeaderBalancer.GREEDY_POLICY: @@ -169,7 +178,7 @@ private void balanceRegionLeader( long currentTime = System.nanoTime(); AtomicInteger requestId = new AtomicInteger(0); DataNodeAsyncRequestContext clientHandler = - new DataNodeAsyncRequestContext<>(CnToDnRequestType.CHANGE_REGION_LEADER); + new DataNodeAsyncRequestContext<>(CnToDnAsyncRequestType.CHANGE_REGION_LEADER); Map successTransferMap = new TreeMap<>(); optimalLeaderMap.forEach( (regionGroupId, newLeaderId) -> { @@ -179,21 +188,40 @@ private void balanceRegionLeader( return; } - if (newLeaderId != -1 && !newLeaderId.equals(currentLeaderMap.get(regionGroupId))) { + int oldLeaderId = currentLeaderMap.get(regionGroupId); + if (newLeaderId != -1 && !newLeaderId.equals(oldLeaderId)) { LOGGER.info( "[LeaderBalancer] Try to change the leader of Region: {} to DataNode: {} ", regionGroupId, newLeaderId); switch (consensusProtocolClass) { - case ConsensusFactory.FAST_IOT_CONSENSUS: - case ConsensusFactory.IOT_CONSENSUS_V2: case ConsensusFactory.IOT_CONSENSUS: case ConsensusFactory.SIMPLE_CONSENSUS: - // For IoTConsensus or SimpleConsensus or PipeConsensus protocol, change + // For IoTConsensus or SimpleConsensus protocol, change // RegionRouteMap is enough successTransferMap.put( regionGroupId, new ConsensusGroupHeartbeatSample(currentTime, newLeaderId)); break; + case ConsensusFactory.IOT_CONSENSUS_V2: + // For IoTConsensusV2 protocol, change RegionRouteMap and execute flush on old + // region leader + successTransferMap.put( + regionGroupId, new ConsensusGroupHeartbeatSample(currentTime, newLeaderId)); + // Prepare data for flushOldLeader + if (oldLeaderId != -1) { + lastBalancedOldLeaderId2RegionMap.compute( + oldLeaderId, + (k, v) -> { + if (v == null) { + List value = new ArrayList<>(); + value.add(String.valueOf(regionGroupId.getId())); + return value; + } + v.add(String.valueOf(regionGroupId.getId())); + return v; + }); + } + break; case ConsensusFactory.RATIS_CONSENSUS: default: // For ratis protocol, the ConfigNode-leader will send a changeLeaderRequest to the @@ -245,12 +273,100 @@ private void balanceRegionLeader( } } } + getLoadManager().forceUpdateConsensusGroupCache(successTransferMap); + + // Prepare data for invalidSchemaCacheOfOldLeaders + if (regionGroupType.equals(TConsensusGroupType.DataRegion)) { + lastBalancedDataRegionSet = successTransferMap.keySet(); + lastDataRegion2OldLeaderMap = currentLeaderMap; + } + } + + private void invalidateSchemaCacheOfOldLeaders() { + BiConsumer, Set> consumer = + (oldLeaderMap, successTransferSet) -> { + final DataNodeAsyncRequestContext invalidateSchemaCacheRequestHandler = + new DataNodeAsyncRequestContext<>(CnToDnAsyncRequestType.INVALIDATE_LAST_CACHE); + final AtomicInteger requestIndex = new AtomicInteger(0); + oldLeaderMap.entrySet().stream() + .filter(entry -> successTransferSet.contains(entry.getKey())) + .forEach( + entry -> { + // set target + final Integer dataNodeId = entry.getValue(); + if (dataNodeId == -1) { + return; + } + final TDataNodeLocation dataNodeLocation = + getNodeManager().getRegisteredDataNode(dataNodeId).getLocation(); + if (dataNodeLocation == null) { + LOGGER.warn("DataNodeLocation is null, datanodeId {}", dataNodeId); + return; + } + invalidateSchemaCacheRequestHandler.putNodeLocation( + requestIndex.get(), dataNodeLocation); + // set req + final TConsensusGroupId consensusGroupId = entry.getKey(); + final String database = + getPartitionManager().getRegionDatabase(consensusGroupId); + invalidateSchemaCacheRequestHandler.putRequest(requestIndex.get(), database); + requestIndex.incrementAndGet(); + }); + CnToDnInternalServiceAsyncRequestManager.getInstance() + .sendAsyncRequest(invalidateSchemaCacheRequestHandler); + }; + + if (IS_ENABLE_AUTO_LEADER_BALANCE_FOR_DATA_REGION) { + consumer.accept(lastDataRegion2OldLeaderMap, lastBalancedDataRegionSet); + } + } + + private void flushOldLeaderIfIoTV2() { + if (!IS_ENABLE_AUTO_LEADER_BALANCE_FOR_DATA_REGION + || !Objects.equals( + DATA_REGION_CONSENSUS_PROTOCOL_CLASS, ConsensusFactory.IOT_CONSENSUS_V2)) { + return; + } + + BiConsumer> consumer = + (oldLeaderId, regionGroupIds) -> { + TDataNodeConfiguration configuration = + getNodeManager().getRegisteredDataNode(oldLeaderId); + Map oldLeaderDataNodeLocation = new HashMap<>(); + oldLeaderDataNodeLocation.put( + configuration.getLocation().dataNodeId, configuration.getLocation()); + + TFlushReq flushReq = new TFlushReq(); + flushReq.setRegionIds(regionGroupIds); + // Do our best to flush. If flush failed, never retry + TSStatus result = configManager.flushOnSpecificDN(flushReq, oldLeaderDataNodeLocation); + if (result.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + LOGGER.info( + "[IoTConsensusV2 Leader Changed] Successfully flush old leader {} for region {}", + oldLeaderId, + regionGroupIds); + } else { + LOGGER.info( + "[IoTConsensusV2 Leader Changed] Failed to flush old leader {} for region {}", + oldLeaderId, + regionGroupIds); + } + }; + lastBalancedOldLeaderId2RegionMap.forEach(consumer); + // after flush, clear map for next balance + lastBalancedOldLeaderId2RegionMap.clear(); + } + + private synchronized void handleBalanceAction() { + invalidateSchemaCacheOfOldLeaders(); + flushOldLeaderIfIoTV2(); } public synchronized void balanceRegionLeaderAndPriority() { balanceRegionLeader(); balanceRegionPriority(); + handleBalanceAction(); } /** Balance cluster RegionGroup route priority through configured algorithm. */ @@ -261,20 +377,17 @@ private synchronized void balanceRegionPriority() { new TreeMap<>(); try { Map regionLeaderMap = getLoadManager().getRegionLeaderMap(); - Map dataNodeLoadScoreMap = getLoadManager().getAllDataNodeLoadScores(); // Balancing region priority in each SchemaRegionGroup Map optimalRegionPriorityMap = priorityRouter.generateOptimalRoutePriority( getPartitionManager().getAllReplicaSets(TConsensusGroupType.SchemaRegion), - regionLeaderMap, - dataNodeLoadScoreMap); + regionLeaderMap); // Balancing region priority in each DataRegionGroup optimalRegionPriorityMap.putAll( priorityRouter.generateOptimalRoutePriority( getPartitionManager().getAllReplicaSets(TConsensusGroupType.DataRegion), - regionLeaderMap, - dataNodeLoadScoreMap)); + regionLeaderMap)); optimalRegionPriorityMap.forEach( (regionGroupId, optimalRegionPriority) -> { @@ -310,7 +423,7 @@ private void broadcastLatestRegionPriorityMap() { Map tmpPriorityMap = getRegionPriorityMap(); DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.UPDATE_REGION_ROUTE_MAP, + CnToDnAsyncRequestType.UPDATE_REGION_ROUTE_MAP, new TRegionRouteReq(broadcastTime, tmpPriorityMap), dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); @@ -436,5 +549,6 @@ public void onRegionGroupStatisticsChanged(RegionGroupStatisticsChangeEvent even public void onConsensusGroupStatisticsChanged(ConsensusGroupStatisticsChangeEvent event) { balanceRegionLeader(); balanceRegionPriority(); + handleBalanceAction(); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/PartiteGraphReplicationRegionGroupAllocator.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/PartiteGraphReplicationRegionGroupAllocator.java index b24acc1bd4616..1205e0abc067c 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/PartiteGraphReplicationRegionGroupAllocator.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/PartiteGraphReplicationRegionGroupAllocator.java @@ -69,10 +69,9 @@ public TRegionReplicaSet generateOptimalRegionReplicasDistribution( TConsensusGroupId consensusGroupId) { this.regionPerDataNode = - (int) - (consensusGroupId.getType().equals(TConsensusGroupType.DataRegion) - ? ConfigNodeDescriptor.getInstance().getConf().getDataRegionPerDataNode() - : ConfigNodeDescriptor.getInstance().getConf().getSchemaRegionPerDataNode()); + consensusGroupId.getType().equals(TConsensusGroupType.DataRegion) + ? ConfigNodeDescriptor.getInstance().getConf().getDataRegionPerDataNode() + : ConfigNodeDescriptor.getInstance().getConf().getSchemaRegionPerDataNode(); prepare(replicationFactor, availableDataNodeMap, allocatedRegionGroups); // Select a set of optimal alpha nodes diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/priority/GreedyPriorityBalancer.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/priority/GreedyPriorityBalancer.java index 6eacf6f906b62..3d2ef54deb263 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/priority/GreedyPriorityBalancer.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/priority/GreedyPriorityBalancer.java @@ -19,16 +19,11 @@ package org.apache.iotdb.confignode.manager.load.balancer.router.priority; import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; -import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; -import org.apache.tsfile.utils.Pair; - -import java.util.Comparator; import java.util.List; import java.util.Map; import java.util.TreeMap; -import java.util.Vector; /** The GreedyPriorityBalancer always pick the Replica with the lowest loadScore */ public class GreedyPriorityBalancer implements IPriorityBalancer { @@ -39,48 +34,15 @@ public GreedyPriorityBalancer() { @Override public Map generateOptimalRoutePriority( - List replicaSets, - Map regionLeaderMap, - Map dataNodeLoadScoreMap) { + List replicaSets, Map regionLeaderMap) { Map regionPriorityMap = new TreeMap<>(); replicaSets.forEach( replicaSet -> { - TRegionReplicaSet sortedReplicaSet = - sortReplicasByLoadScore(replicaSet, dataNodeLoadScoreMap); - regionPriorityMap.put(sortedReplicaSet.getRegionId(), sortedReplicaSet); + regionPriorityMap.put(replicaSet.getRegionId(), replicaSet); }); return regionPriorityMap; } - - protected static TRegionReplicaSet sortReplicasByLoadScore( - TRegionReplicaSet replicaSet, Map dataNodeLoadScoreMap) { - TRegionReplicaSet sortedReplicaSet = new TRegionReplicaSet(); - sortedReplicaSet.setRegionId(replicaSet.getRegionId()); - - // List> for sorting - List> sortList = new Vector<>(); - replicaSet - .getDataNodeLocations() - .forEach( - dataNodeLocation -> { - // The absenteeism of loadScoreMap means ConfigNode-leader doesn't receive any - // heartbeat from that DataNode. - // In this case we put a maximum loadScore into the sortList. - sortList.add( - new Pair<>( - dataNodeLoadScoreMap.computeIfAbsent( - dataNodeLocation.getDataNodeId(), empty -> Long.MAX_VALUE), - dataNodeLocation)); - }); - - sortList.sort(Comparator.comparingLong(Pair::getLeft)); - for (Pair entry : sortList) { - sortedReplicaSet.addToDataNodeLocations(entry.getRight()); - } - - return sortedReplicaSet; - } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/priority/IPriorityBalancer.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/priority/IPriorityBalancer.java index 47d8650964514..0fa270c8908b5 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/priority/IPriorityBalancer.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/priority/IPriorityBalancer.java @@ -34,12 +34,9 @@ public interface IPriorityBalancer { * * @param replicaSets All RegionGroups * @param regionLeaderMap The current leader of each RegionGroup - * @param dataNodeLoadScoreMap The current load score of each DataNode * @return Map, The optimal route priority for each * RegionGroup. The replica with higher sorting result have higher priority. */ Map generateOptimalRoutePriority( - List replicaSets, - Map regionLeaderMap, - Map dataNodeLoadScoreMap); + List replicaSets, Map regionLeaderMap); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/priority/LeaderPriorityBalancer.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/priority/LeaderPriorityBalancer.java index 04113f5d54b01..5fc9b5ddb455a 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/priority/LeaderPriorityBalancer.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/priority/LeaderPriorityBalancer.java @@ -35,31 +35,24 @@ public LeaderPriorityBalancer() { @Override public Map generateOptimalRoutePriority( - List replicaSets, - Map regionLeaderMap, - Map dataNodeLoadScoreMap) { - + List replicaSets, Map regionLeaderMap) { Map regionPriorityMap = new TreeMap<>(); replicaSets.forEach( replicaSet -> { - /* 1. Sort replicaSet by loadScore */ - TRegionReplicaSet sortedReplicaSet = - sortReplicasByLoadScore(replicaSet, dataNodeLoadScoreMap); - /* 2. Pick leader if leader exists and available */ + /* 1. Pick leader if leader exists and available */ int leaderId = regionLeaderMap.getOrDefault(replicaSet.getRegionId(), -1); - if (leaderId != -1 - && dataNodeLoadScoreMap.getOrDefault(leaderId, Long.MAX_VALUE) < Long.MAX_VALUE) { - for (int i = 0; i < sortedReplicaSet.getDataNodeLocationsSize(); i++) { - if (sortedReplicaSet.getDataNodeLocations().get(i).getDataNodeId() == leaderId) { - Collections.swap(sortedReplicaSet.getDataNodeLocations(), 0, i); + if (leaderId != -1) { + for (int i = 0; i < replicaSet.getDataNodeLocationsSize(); i++) { + if (replicaSet.getDataNodeLocations().get(i).getDataNodeId() == leaderId) { + Collections.swap(replicaSet.getDataNodeLocations(), 0, i); break; } } } - regionPriorityMap.put(sortedReplicaSet.getRegionId(), sortedReplicaSet); + regionPriorityMap.put(replicaSet.getRegionId(), replicaSet); }); return regionPriorityMap; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/AbstractLoadCache.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/AbstractLoadCache.java index 01958a589c884..6355dfdcc8283 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/AbstractLoadCache.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/AbstractLoadCache.java @@ -19,6 +19,11 @@ package org.apache.iotdb.confignode.manager.load.cache; +import org.apache.iotdb.confignode.conf.ConfigNodeConfig; +import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; +import org.apache.iotdb.confignode.manager.load.cache.detector.FixedDetector; +import org.apache.iotdb.confignode.manager.load.cache.detector.PhiAccrualDetector; + import java.util.Collections; import java.util.LinkedList; import java.util.List; @@ -32,17 +37,34 @@ public abstract class AbstractLoadCache { // Max heartbeat cache samples store size private static final int MAXIMUM_WINDOW_SIZE = 100; - // The Status will be set to Unknown when the response time of heartbeat is more than 20s - protected static final long HEARTBEAT_TIMEOUT_TIME_IN_NS = 20_000_000_000L; // Caching the recent MAXIMUM_WINDOW_SIZE heartbeat sample protected final List slidingWindow; // The current statistics calculated by the latest heartbeat sample protected final AtomicReference currentStatistics; + protected final IFailureDetector failureDetector; + + private static final ConfigNodeConfig CONF = ConfigNodeDescriptor.getInstance().getConf(); + protected AbstractLoadCache() { this.currentStatistics = new AtomicReference<>(); this.slidingWindow = Collections.synchronizedList(new LinkedList<>()); + switch (CONF.getFailureDetector()) { + case IFailureDetector.PHI_ACCRUAL_DETECTOR: + this.failureDetector = + new PhiAccrualDetector( + CONF.getFailureDetectorPhiThreshold(), + CONF.getFailureDetectorPhiAcceptablePauseInMs() * 1000_000L, + CONF.getHeartbeatIntervalInMs() * 200_000L, + 60, + new FixedDetector(CONF.getFailureDetectorFixedThresholdInMs() * 1000_000L)); + break; + case IFailureDetector.FIXED_DETECTOR: + default: + this.failureDetector = + new FixedDetector(CONF.getFailureDetectorFixedThresholdInMs() * 1000_000L); + } } /** @@ -56,7 +78,7 @@ public void cacheHeartbeatSample(AbstractHeartbeatSample newHeartbeatSample) { // And un-sequential heartbeats will be discarded. if (getLastSample() == null || getLastSample().getSampleLogicalTimestamp() - < newHeartbeatSample.getSampleLogicalTimestamp()) { + <= newHeartbeatSample.getSampleLogicalTimestamp()) { slidingWindow.add(newHeartbeatSample); } @@ -71,14 +93,14 @@ public void cacheHeartbeatSample(AbstractHeartbeatSample newHeartbeatSample) { * * @return The latest heartbeat sample. */ - protected AbstractHeartbeatSample getLastSample() { + public AbstractHeartbeatSample getLastSample() { return slidingWindow.isEmpty() ? null : slidingWindow.get(slidingWindow.size() - 1); } /** * Update currentStatistics based on the latest heartbeat sample that cached in the slidingWindow. */ - public abstract void updateCurrentStatistics(); + public abstract void updateCurrentStatistics(boolean forceUpdate); public AbstractStatistics getCurrentStatistics() { return currentStatistics.get(); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/IFailureDetector.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/IFailureDetector.java new file mode 100644 index 0000000000000..be6a8f621bab2 --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/IFailureDetector.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.manager.load.cache; + +import java.util.List; + +/** + * IFailureDetector is the judge for node status (UNKNOWN). {@link #isAvailable will be called each + * fixed interval updating the node status} + */ +public interface IFailureDetector { + String FIXED_DETECTOR = "fixed"; + String PHI_ACCRUAL_DETECTOR = "phi_accrual"; + + /** + * Given the heartbeat history, decide whether this endpoint is still available + * + * @param history heartbeat history + * @return false if the endpoint is under failure + */ + boolean isAvailable(List history); +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/LoadCache.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/LoadCache.java index e51d69073874d..3796f7ac2b767 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/LoadCache.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/LoadCache.java @@ -19,6 +19,7 @@ package org.apache.iotdb.confignode.manager.load.cache; +import org.apache.iotdb.common.rpc.thrift.TAINodeConfiguration; import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; @@ -29,11 +30,14 @@ import org.apache.iotdb.commons.cluster.NodeStatus; import org.apache.iotdb.commons.cluster.NodeType; import org.apache.iotdb.commons.cluster.RegionStatus; +import org.apache.iotdb.confignode.conf.ConfigNodeConfig; +import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; import org.apache.iotdb.confignode.manager.IManager; import org.apache.iotdb.confignode.manager.ProcedureManager; import org.apache.iotdb.confignode.manager.load.cache.consensus.ConsensusGroupCache; import org.apache.iotdb.confignode.manager.load.cache.consensus.ConsensusGroupHeartbeatSample; import org.apache.iotdb.confignode.manager.load.cache.consensus.ConsensusGroupStatistics; +import org.apache.iotdb.confignode.manager.load.cache.node.AINodeHeartbeatCache; import org.apache.iotdb.confignode.manager.load.cache.node.BaseNodeCache; import org.apache.iotdb.confignode.manager.load.cache.node.ConfigNodeHeartbeatCache; import org.apache.iotdb.confignode.manager.load.cache.node.DataNodeHeartbeatCache; @@ -74,6 +78,8 @@ public class LoadCache { ProcedureManager.PROCEDURE_WAIT_TIME_OUT - TimeUnit.SECONDS.toMillis(2), TimeUnit.SECONDS.toMillis(10)); + private static final ConfigNodeConfig CONF = ConfigNodeDescriptor.getInstance().getConf(); + // Map // False indicates there is no processing heartbeat request, true otherwise private final Map heartbeatProcessingMap; @@ -81,6 +87,8 @@ public class LoadCache { private final Map nodeCacheMap; // Map private final Map regionGroupCacheMap; + // Map> + private final Map> regionSizeMap; // Map private final Map consensusGroupCacheMap; // Map @@ -90,6 +98,7 @@ public LoadCache() { this.nodeCacheMap = new ConcurrentHashMap<>(); this.heartbeatProcessingMap = new ConcurrentHashMap<>(); this.regionGroupCacheMap = new ConcurrentHashMap<>(); + this.regionSizeMap = new ConcurrentHashMap<>(); this.consensusGroupCacheMap = new ConcurrentHashMap<>(); this.confirmedConfigNodeMap = new ConcurrentHashMap<>(); } @@ -97,7 +106,8 @@ public LoadCache() { public void initHeartbeatCache(IManager configManager) { initNodeHeartbeatCache( configManager.getNodeManager().getRegisteredConfigNodes(), - configManager.getNodeManager().getRegisteredDataNodes()); + configManager.getNodeManager().getRegisteredDataNodes(), + configManager.getNodeManager().getRegisteredAINodes()); initRegionGroupHeartbeatCache( configManager.getClusterSchemaManager().getDatabaseNames().stream() .collect( @@ -109,7 +119,8 @@ public void initHeartbeatCache(IManager configManager) { /** Initialize the nodeCacheMap when the ConfigNode-Leader is switched. */ private void initNodeHeartbeatCache( List registeredConfigNodes, - List registeredDataNodes) { + List registeredDataNodes, + List registeredAINodes) { final int CURRENT_NODE_ID = ConfigNodeHeartbeatCache.CURRENT_NODE_ID; nodeCacheMap.clear(); @@ -135,6 +146,13 @@ private void initNodeHeartbeatCache( int dataNodeId = dataNodeConfiguration.getLocation().getDataNodeId(); createNodeHeartbeatCache(NodeType.DataNode, dataNodeId); }); + + // Init AiNodeHeartbeatCache + registeredAINodes.forEach( + aiNodeConfiguration -> { + int aiNodeId = aiNodeConfiguration.getLocation().getAiNodeId(); + createNodeHeartbeatCache(NodeType.AINode, aiNodeId); + }); } /** @@ -150,13 +168,16 @@ private void initRegionGroupHeartbeatCache( regionReplicaSets.forEach( regionReplicaSet -> { TConsensusGroupId regionGroupId = regionReplicaSet.getRegionId(); + boolean isStrongConsistency = + CONF.isConsensusGroupStrongConsistency(regionGroupId); regionGroupCacheMap.put( regionGroupId, new RegionGroupCache( database, regionReplicaSet.getDataNodeLocations().stream() .map(TDataNodeLocation::getDataNodeId) - .collect(Collectors.toSet()))); + .collect(Collectors.toSet()), + isStrongConsistency)); consensusGroupCacheMap.put(regionGroupId, new ConsensusGroupCache()); })); } @@ -192,9 +213,11 @@ public void createNodeHeartbeatCache(NodeType nodeType, int nodeId) { nodeCacheMap.put(nodeId, new ConfigNodeHeartbeatCache(nodeId)); break; case DataNode: - default: nodeCacheMap.put(nodeId, new DataNodeHeartbeatCache(nodeId)); break; + case AINode: + nodeCacheMap.put(nodeId, new AINodeHeartbeatCache(nodeId)); + break; } heartbeatProcessingMap.put(nodeId, new AtomicBoolean(false)); } @@ -225,8 +248,21 @@ public void cacheDataNodeHeartbeatSample(int nodeId, NodeHeartbeatSample sample) Optional.ofNullable(heartbeatProcessingMap.get(nodeId)).ifPresent(node -> node.set(false)); } + /** + * Cache the latest heartbeat sample of a AINode. + * + * @param nodeId the id of the AINode + * @param sample the latest heartbeat sample + */ + public void cacheAINodeHeartbeatSample(int nodeId, NodeHeartbeatSample sample) { + nodeCacheMap + .computeIfAbsent(nodeId, empty -> new AINodeHeartbeatCache(nodeId)) + .cacheHeartbeatSample(sample); + Optional.ofNullable(heartbeatProcessingMap.get(nodeId)).ifPresent(node -> node.set(false)); + } + public void resetHeartbeatProcessing(int nodeId) { - heartbeatProcessingMap.get(nodeId).set(false); + Optional.ofNullable(heartbeatProcessingMap.get(nodeId)).ifPresent(node -> node.set(false)); } /** @@ -248,7 +284,9 @@ public void removeNodeCache(int nodeId) { */ public void createRegionGroupHeartbeatCache( String database, TConsensusGroupId regionGroupId, Set dataNodeIds) { - regionGroupCacheMap.put(regionGroupId, new RegionGroupCache(database, dataNodeIds)); + boolean isStrongConsistency = CONF.isConsensusGroupStrongConsistency(regionGroupId); + regionGroupCacheMap.put( + regionGroupId, new RegionGroupCache(database, dataNodeIds, isStrongConsistency)); consensusGroupCacheMap.put(regionGroupId, new ConsensusGroupCache()); } @@ -280,6 +318,14 @@ public void cacheRegionHeartbeatSample( .ifPresent(group -> group.cacheHeartbeatSample(nodeId, sample, overwrite)); } + public RegionStatus getRegionCacheLastSampleStatus(TConsensusGroupId regionGroupId, int nodeId) { + return Optional.ofNullable(regionGroupCacheMap.get(regionGroupId)) + .map(regionGroupCache -> regionGroupCache.getRegionCache(nodeId)) + .map(regionCache -> (RegionHeartbeatSample) regionCache.getLastSample()) + .map(RegionHeartbeatSample::getStatus) + .orElse(RegionStatus.Unknown); + } + /** * Remove the cache of the specified Region in the specified RegionGroup. * @@ -305,8 +351,10 @@ public void cacheConsensusSample( } /** Update the NodeStatistics of all Nodes. */ - public void updateNodeStatistics() { - nodeCacheMap.values().forEach(BaseNodeCache::updateCurrentStatistics); + public void updateNodeStatistics(boolean forceUpdate) { + nodeCacheMap + .values() + .forEach(baseNodeCache -> baseNodeCache.updateCurrentStatistics(forceUpdate)); } /** Update the RegionGroupStatistics of all RegionGroups. */ @@ -316,7 +364,9 @@ public void updateRegionGroupStatistics() { /** Update the ConsensusGroupStatistics of all RegionGroups. */ public void updateConsensusGroupStatistics() { - consensusGroupCacheMap.values().forEach(ConsensusGroupCache::updateCurrentStatistics); + consensusGroupCacheMap + .values() + .forEach(consensusGroupCache -> consensusGroupCache.updateCurrentStatistics(false)); } /** @@ -439,8 +489,9 @@ public Map getCurrentConsensusGroup * @return NodeStatus of the specified Node. Unknown if cache doesn't exist. */ public NodeStatus getNodeStatus(int nodeId) { - BaseNodeCache nodeCache = nodeCacheMap.get(nodeId); - return nodeCache == null ? NodeStatus.Unknown : nodeCache.getNodeStatus(); + return Optional.ofNullable(nodeCacheMap.get(nodeId)) + .map(BaseNodeCache::getNodeStatus) + .orElse(NodeStatus.Unknown); } /** @@ -450,10 +501,9 @@ public NodeStatus getNodeStatus(int nodeId) { * @return The specified Node's current status if the nodeCache contains it, Unknown otherwise */ public String getNodeStatusWithReason(int nodeId) { - BaseNodeCache nodeCache = nodeCacheMap.get(nodeId); - return nodeCache == null - ? NodeStatus.Unknown.getStatus() + "(NoHeartbeat)" - : nodeCache.getNodeStatusWithReason(); + return Optional.ofNullable(nodeCacheMap.get(nodeId)) + .map(BaseNodeCache::getNodeStatusWithReason) + .orElseGet(() -> NodeStatus.Unknown.getStatus() + "(NoHeartbeat)"); } /** @@ -507,25 +557,9 @@ public List filterDataNodeThroughStatus(NodeStatus... status) { * @return The free disk space that sample through heartbeat, 0 if no heartbeat received */ public double getFreeDiskSpace(int dataNodeId) { - DataNodeHeartbeatCache dataNodeHeartbeatCache = - (DataNodeHeartbeatCache) nodeCacheMap.get(dataNodeId); - return dataNodeHeartbeatCache == null ? 0d : dataNodeHeartbeatCache.getFreeDiskSpace(); - } - - /** - * Get the loadScore of each DataNode. - * - * @return Map - */ - public Map getAllDataNodeLoadScores() { - Map result = new ConcurrentHashMap<>(); - nodeCacheMap.forEach( - (dataNodeId, heartbeatCache) -> { - if (heartbeatCache instanceof DataNodeHeartbeatCache) { - result.put(dataNodeId, heartbeatCache.getLoadScore()); - } - }); - return result; + return Optional.ofNullable((DataNodeHeartbeatCache) nodeCacheMap.get(dataNodeId)) + .map(DataNodeHeartbeatCache::getFreeDiskSpace) + .orElse(0d); } /** @@ -564,12 +598,9 @@ public int getLowestLoadDataNode(List dataNodeIds) { * @return Corresponding RegionStatus if cache exists, Unknown otherwise */ public RegionStatus getRegionStatus(TConsensusGroupId consensusGroupId, int dataNodeId) { - return regionGroupCacheMap.containsKey(consensusGroupId) - ? regionGroupCacheMap - .get(consensusGroupId) - .getCurrentStatistics() - .getRegionStatus(dataNodeId) - : RegionStatus.Unknown; + return Optional.ofNullable(regionGroupCacheMap.get(consensusGroupId)) + .map(x -> x.getCurrentStatistics().getRegionStatus(dataNodeId)) + .orElse(RegionStatus.Unknown); } /** @@ -579,9 +610,9 @@ public RegionStatus getRegionStatus(TConsensusGroupId consensusGroupId, int data * @return Corresponding RegionGroupStatus if cache exists, Disabled otherwise */ public RegionGroupStatus getRegionGroupStatus(TConsensusGroupId consensusGroupId) { - return regionGroupCacheMap.containsKey(consensusGroupId) - ? regionGroupCacheMap.get(consensusGroupId).getCurrentStatistics().getRegionGroupStatus() - : RegionGroupStatus.Disabled; + return Optional.ofNullable(regionGroupCacheMap.get(consensusGroupId)) + .map(x -> x.getCurrentStatistics().getRegionGroupStatus()) + .orElse(RegionGroupStatus.Disabled); } /** @@ -729,4 +760,12 @@ public void updateConfirmedConfigNodeEndPoints( public Set getConfirmedConfigNodeEndPoints(int dataNodeId) { return confirmedConfigNodeMap.get(dataNodeId); } + + public void updateRegionSizeMap(int dataNodeId, Map regionSizeMap) { + this.regionSizeMap.put(dataNodeId, regionSizeMap); + } + + public Map> getRegionSizeMap() { + return regionSizeMap; + } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/consensus/ConsensusGroupCache.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/consensus/ConsensusGroupCache.java index 32d798c48b358..aa924dc29b585 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/consensus/ConsensusGroupCache.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/consensus/ConsensusGroupCache.java @@ -36,7 +36,7 @@ public ConsensusGroupCache() { } @Override - public synchronized void updateCurrentStatistics() { + public synchronized void updateCurrentStatistics(boolean forceUpdate) { ConsensusGroupHeartbeatSample lastSample; synchronized (slidingWindow) { lastSample = (ConsensusGroupHeartbeatSample) getLastSample(); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/detector/FixedDetector.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/detector/FixedDetector.java new file mode 100644 index 0000000000000..35fb4e3f2a1c7 --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/detector/FixedDetector.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.manager.load.cache.detector; + +import org.apache.iotdb.confignode.manager.load.cache.AbstractHeartbeatSample; +import org.apache.iotdb.confignode.manager.load.cache.IFailureDetector; +import org.apache.iotdb.confignode.manager.load.cache.node.NodeHeartbeatSample; +import org.apache.iotdb.confignode.manager.load.cache.region.RegionHeartbeatSample; + +import org.apache.tsfile.utils.Preconditions; + +import java.util.List; +import java.util.Optional; + +/** + * FixedDetector will decide a node unknown iff. Time elapsed from last heartbeat exceeds the + * heartbeatTimeoutNs. + */ +public class FixedDetector implements IFailureDetector { + private final long heartbeatTimeoutNs; + + public FixedDetector(long heartbeatTimeoutNs) { + this.heartbeatTimeoutNs = heartbeatTimeoutNs; + } + + @Override + public boolean isAvailable(List history) { + final AbstractHeartbeatSample lastSample = + history.isEmpty() ? null : history.get(history.size() - 1); + if (lastSample != null) { + Preconditions.checkArgument( + lastSample instanceof NodeHeartbeatSample || lastSample instanceof RegionHeartbeatSample); + } + final long lastSendTime = + Optional.ofNullable(lastSample) + .map(AbstractHeartbeatSample::getSampleLogicalTimestamp) + .orElse(0L); + final long currentNanoTime = System.nanoTime(); + return currentNanoTime - lastSendTime <= heartbeatTimeoutNs; + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/detector/PhiAccrualDetector.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/detector/PhiAccrualDetector.java new file mode 100644 index 0000000000000..39cc45c151914 --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/detector/PhiAccrualDetector.java @@ -0,0 +1,181 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.manager.load.cache.detector; + +import org.apache.iotdb.confignode.manager.load.cache.AbstractHeartbeatSample; +import org.apache.iotdb.confignode.manager.load.cache.IFailureDetector; +import org.apache.iotdb.confignode.manager.load.cache.node.NodeHeartbeatSample; +import org.apache.iotdb.confignode.manager.load.cache.region.RegionHeartbeatSample; + +import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics; +import org.apache.tsfile.utils.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; + +/** + * The Phi Failure Detector, proposed by Hayashibara, Naohiro, et al. "The/spl phi/accrual failure + * detector.". It is an accrual approach based on heartbeat history analysis with dynamic + * sensitivity and tunable threshold. It is adaptive with early failure detection, increased + * accuracy and improved system stability. + * + *

Initially, Phi has a cold start period where it will only collect heartbeat samples and + * fallback decision-making to {@link FixedDetector}. After collecting enough samples, it will start + * failure detection using the Phi algo. + */ +public class PhiAccrualDetector implements IFailureDetector { + private static final Logger LOGGER = LoggerFactory.getLogger(PhiAccrualDetector.class); + private final long threshold; + private final long acceptableHeartbeatPauseNs; + private final long minHeartbeatStdNs; + private final int codeStartSampleCount; + private final IFailureDetector fallbackDuringColdStart; + + public PhiAccrualDetector( + long threshold, + long acceptableHeartbeatPauseNs, + long minHeartbeatStdNs, + int minimalSampleCount, + IFailureDetector fallbackDuringColdStart) { + this.threshold = threshold; + this.acceptableHeartbeatPauseNs = acceptableHeartbeatPauseNs; + this.minHeartbeatStdNs = minHeartbeatStdNs; + this.codeStartSampleCount = minimalSampleCount; + this.fallbackDuringColdStart = fallbackDuringColdStart; + } + + @Override + public boolean isAvailable(List history) { + if (history.size() < codeStartSampleCount) { + /* We haven't received enough heartbeat replies.*/ + return fallbackDuringColdStart.isAvailable(history); + } + final PhiAccrual phiAccrual = create(history); + final boolean isAvailable = phiAccrual.phi() < (double) this.threshold; + if (!isAvailable && LOGGER.isDebugEnabled()) { + // log the status change and dump the heartbeat history for analysis use + final StringBuilder builder = new StringBuilder(); + builder.append("["); + for (double interval : phiAccrual.heartbeatIntervals) { + final long msInterval = (long) interval / 1000_000; + builder.append(msInterval).append(", "); + } + builder.append(phiAccrual.timeElapsedSinceLastHeartbeat / 1000_000); + builder.append("]"); + LOGGER.debug(String.format("Node Down, heartbeat history (ms): %s", builder)); + } + + return isAvailable; + } + + PhiAccrual create(List history) { + final List heartbeatIntervals = new ArrayList<>(); + + long lastTs = -1; + for (final AbstractHeartbeatSample sample : history) { + // ensure getSampleLogicalTimestamp() will return system nano timestamp + Preconditions.checkArgument( + sample instanceof NodeHeartbeatSample || sample instanceof RegionHeartbeatSample); + if (lastTs == -1) { + lastTs = sample.getSampleLogicalTimestamp(); + continue; + } + heartbeatIntervals.add((double) sample.getSampleLogicalTimestamp() - lastTs); + lastTs = sample.getSampleLogicalTimestamp(); + } + final long lastHeartbeatTimestamp = history.get(history.size() - 1).getSampleLogicalTimestamp(); + final long timeElapsedSinceLastHeartbeat = System.nanoTime() - lastHeartbeatTimestamp; + + final double[] intervalArray = + heartbeatIntervals.stream().mapToDouble(Double::doubleValue).toArray(); + return new PhiAccrual( + intervalArray, + timeElapsedSinceLastHeartbeat, + minHeartbeatStdNs, + acceptableHeartbeatPauseNs); + } + + /** + * The φ Accrual Failure Detector implementation. See φ + * Accrual + */ + static final class PhiAccrual { + /* + * All the heartbeat related intervals within this class should be calculated in unit of nanoseconds + */ + private final double[] heartbeatIntervals; + private final long timeElapsedSinceLastHeartbeat; + private final long minHeartbeatStd; + private final long acceptableHeartbeatPause; + + PhiAccrual( + double[] heartbeatIntervals, + long timeElapsedSinceLastHeartbeat, + long minHeartbeatStd, + long acceptableHeartbeatPause) { + Preconditions.checkArgument(heartbeatIntervals.length > 0); + Preconditions.checkArgument(timeElapsedSinceLastHeartbeat >= 0); + this.heartbeatIntervals = heartbeatIntervals; + this.timeElapsedSinceLastHeartbeat = timeElapsedSinceLastHeartbeat; + this.minHeartbeatStd = minHeartbeatStd; + this.acceptableHeartbeatPause = acceptableHeartbeatPause; + } + + /** + * @return phi value given the heartbeat interval history + */ + double phi() { + final DescriptiveStatistics ds = new DescriptiveStatistics(heartbeatIntervals); + double mean = ds.getMean(); + double std = ds.getStandardDeviation(); + + /* ensure the std is valid */ + std = Math.max(std, minHeartbeatStd); + + /* add tolerance specified by acceptableHeartbeatPause */ + mean += acceptableHeartbeatPause; + + return p(timeElapsedSinceLastHeartbeat, mean, std); + } + + /** + * Core method for calculating the phi φ coefficient. It uses a logistic approximation to the + * cumulative normal distribution. + * + * @param elapsedTime the difference of the times (current - last heartbeat timestamp) + * @param historyMean the mean of the history distribution + * @param historyStd the standard deviation of the history distribution + * @return The value of the φ + */ + private double p(double elapsedTime, double historyMean, double historyStd) { + final double y = (elapsedTime - historyMean) / historyStd; + /* Math.exp will return {@link Double.POSITIVE_INFINITY} SAFELY when overflows. */ + double e = Math.exp(-y * (1.5976 + 0.070566 * y * y)); + if (elapsedTime > historyMean) { + return -Math.log10(e / (1.0 + e)); + } else { + return -Math.log10(1.0 - 1.0 / (1.0 + e)); + } + } + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/node/AINodeHeartbeatCache.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/node/AINodeHeartbeatCache.java new file mode 100644 index 0000000000000..187c35802af06 --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/node/AINodeHeartbeatCache.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.manager.load.cache.node; + +import org.apache.iotdb.common.rpc.thrift.TLoadSample; +import org.apache.iotdb.commons.cluster.NodeStatus; +import org.apache.iotdb.confignode.manager.load.cache.AbstractHeartbeatSample; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicReference; + +public class AINodeHeartbeatCache extends BaseNodeCache { + + private final AtomicReference latestLoadSample; + + public AINodeHeartbeatCache(int aiNodeId) { + super(aiNodeId); + this.latestLoadSample = new AtomicReference<>(new TLoadSample()); + } + + @Override + public void updateCurrentStatistics(boolean forceUpdate) { + NodeHeartbeatSample lastSample; + final List heartbeatHistory; + /* Update Node status */ + NodeStatus status = null; + String statusReason = null; + long currentNanoTime = System.nanoTime(); + synchronized (slidingWindow) { + lastSample = (NodeHeartbeatSample) getLastSample(); + heartbeatHistory = Collections.unmodifiableList(slidingWindow); + /* Update load sample */ + if (lastSample != null && lastSample.isSetLoadSample()) { + latestLoadSample.set((lastSample.getLoadSample())); + } + + if (lastSample != null && NodeStatus.Removing.equals(lastSample.getStatus())) { + status = NodeStatus.Removing; + } else if (!failureDetector.isAvailable(heartbeatHistory)) { + /* Failure detector decides that this AINode is UNKNOWN */ + status = NodeStatus.Unknown; + } else if (lastSample != null) { + status = lastSample.getStatus(); + statusReason = lastSample.getStatusReason(); + } + } + + long loadScore = NodeStatus.isNormalStatus(status) ? 0 : Long.MAX_VALUE; + + NodeStatistics newStatistics = + new NodeStatistics(currentNanoTime, status, statusReason, loadScore); + if (!currentStatistics.get().equals(newStatistics)) { + // Update the current NodeStatistics if necessary + currentStatistics.set(newStatistics); + } + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/node/ConfigNodeHeartbeatCache.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/node/ConfigNodeHeartbeatCache.java index 72daed535120a..a4b8051cb748f 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/node/ConfigNodeHeartbeatCache.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/node/ConfigNodeHeartbeatCache.java @@ -21,6 +21,10 @@ import org.apache.iotdb.commons.cluster.NodeStatus; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; +import org.apache.iotdb.confignode.manager.load.cache.AbstractHeartbeatSample; + +import java.util.Collections; +import java.util.List; /** Heartbeat cache for cluster ConfigNodes. */ public class ConfigNodeHeartbeatCache extends BaseNodeCache { @@ -44,28 +48,30 @@ public ConfigNodeHeartbeatCache(int configNodeId, NodeStatistics statistics) { } @Override - public synchronized void updateCurrentStatistics() { + public synchronized void updateCurrentStatistics(boolean forceUpdate) { // Skip itself and the Removing status can not be updated if (nodeId == CURRENT_NODE_ID || NodeStatus.Removing.equals(getNodeStatus())) { return; } NodeHeartbeatSample lastSample; - synchronized (slidingWindow) { - lastSample = (NodeHeartbeatSample) getLastSample(); - } - long lastSendTime = lastSample == null ? 0 : lastSample.getSampleLogicalTimestamp(); - // Update Node status NodeStatus status; long currentNanoTime = System.nanoTime(); - if (lastSample == null) { - status = NodeStatus.Unknown; - } else if (currentNanoTime - lastSendTime > HEARTBEAT_TIMEOUT_TIME_IN_NS) { - // TODO: Optimize Unknown judge logic - status = NodeStatus.Unknown; - } else { - status = lastSample.getStatus(); + final List heartbeatHistory; + synchronized (slidingWindow) { + lastSample = (NodeHeartbeatSample) getLastSample(); + heartbeatHistory = Collections.unmodifiableList(slidingWindow); + + if (lastSample == null) { + /* First heartbeat not received from this ConfigNode, status is UNKNOWN */ + status = NodeStatus.Unknown; + } else if (!failureDetector.isAvailable(heartbeatHistory)) { + /* Failure detector decides that this ConfigNode is UNKNOWN */ + status = NodeStatus.Unknown; + } else { + status = lastSample.getStatus(); + } } /* Update loadScore */ diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/node/DataNodeHeartbeatCache.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/node/DataNodeHeartbeatCache.java index 8948384efa881..87dccb1465dc5 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/node/DataNodeHeartbeatCache.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/node/DataNodeHeartbeatCache.java @@ -19,14 +19,22 @@ package org.apache.iotdb.confignode.manager.load.cache.node; +import org.apache.iotdb.common.rpc.thrift.TLoadSample; import org.apache.iotdb.commons.cluster.NodeStatus; -import org.apache.iotdb.mpp.rpc.thrift.TLoadSample; +import org.apache.iotdb.confignode.manager.load.cache.AbstractHeartbeatSample; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Collections; +import java.util.List; import java.util.concurrent.atomic.AtomicReference; /** Heartbeat cache for cluster DataNodes. */ public class DataNodeHeartbeatCache extends BaseNodeCache { + private static final Logger LOGGER = LoggerFactory.getLogger(DataNodeHeartbeatCache.class); + // TODO: The load sample may be moved into NodeStatistics in the future private final AtomicReference latestLoadSample; @@ -37,35 +45,36 @@ public DataNodeHeartbeatCache(int dataNodeId) { } @Override - public synchronized void updateCurrentStatistics() { + public synchronized void updateCurrentStatistics(boolean forceUpdate) { // The Removing status can not be updated - if (NodeStatus.Removing.equals(getNodeStatus())) { + if (!forceUpdate && NodeStatus.Removing.equals(getNodeStatus())) { return; } NodeHeartbeatSample lastSample; - synchronized (slidingWindow) { - lastSample = (NodeHeartbeatSample) getLastSample(); - } - long lastSendTime = lastSample == null ? 0 : lastSample.getSampleLogicalTimestamp(); - - /* Update load sample */ - if (lastSample != null && lastSample.isSetLoadSample()) { - latestLoadSample.set(lastSample.getLoadSample()); - } - + final List heartbeatHistory; /* Update Node status */ NodeStatus status; String statusReason = null; long currentNanoTime = System.nanoTime(); - if (lastSample == null) { - status = NodeStatus.Unknown; - } else if (currentNanoTime - lastSendTime > HEARTBEAT_TIMEOUT_TIME_IN_NS) { - // TODO: Optimize Unknown judge logic - status = NodeStatus.Unknown; - } else { - status = lastSample.getStatus(); - statusReason = lastSample.getStatusReason(); + synchronized (slidingWindow) { + lastSample = (NodeHeartbeatSample) getLastSample(); + heartbeatHistory = Collections.unmodifiableList(slidingWindow); + /* Update load sample */ + if (lastSample != null && lastSample.isSetLoadSample()) { + latestLoadSample.set(lastSample.getLoadSample()); + } + + if (lastSample == null) { + /* First heartbeat not received from this DataNode, status is UNKNOWN */ + status = NodeStatus.Unknown; + } else if (!failureDetector.isAvailable(heartbeatHistory)) { + /* Failure detector decides that this DataNode is UNKNOWN */ + status = NodeStatus.Unknown; + } else { + status = lastSample.getStatus(); + statusReason = lastSample.getStatusReason(); + } } /* Update loadScore */ @@ -74,6 +83,11 @@ public synchronized void updateCurrentStatistics() { long loadScore = NodeStatus.isNormalStatus(status) ? 0 : Long.MAX_VALUE; currentStatistics.set(new NodeStatistics(currentNanoTime, status, statusReason, loadScore)); + + if (forceUpdate) { + LOGGER.debug( + "Force update NodeCache: status={}, currentNanoTime={}", status, currentNanoTime); + } } public double getFreeDiskSpace() { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/node/NodeHeartbeatSample.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/node/NodeHeartbeatSample.java index 0c8f2c23cb20c..8217593f5d67d 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/node/NodeHeartbeatSample.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/node/NodeHeartbeatSample.java @@ -19,11 +19,12 @@ package org.apache.iotdb.confignode.manager.load.cache.node; +import org.apache.iotdb.ainode.rpc.thrift.TAIHeartbeatResp; +import org.apache.iotdb.common.rpc.thrift.TLoadSample; import org.apache.iotdb.commons.cluster.NodeStatus; import org.apache.iotdb.confignode.manager.load.cache.AbstractHeartbeatSample; import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeHeartbeatResp; import org.apache.iotdb.mpp.rpc.thrift.TDataNodeHeartbeatResp; -import org.apache.iotdb.mpp.rpc.thrift.TLoadSample; /** NodeHeartbeatSample records the heartbeat sample of a Node. */ public class NodeHeartbeatSample extends AbstractHeartbeatSample { @@ -58,6 +59,18 @@ public NodeHeartbeatSample(TDataNodeHeartbeatResp heartbeatResp) { this.loadSample = heartbeatResp.isSetLoadSample() ? heartbeatResp.getLoadSample() : null; } + /** Constructor for AINode sample. */ + public NodeHeartbeatSample(TAIHeartbeatResp heartbeatResp) { + super(heartbeatResp.getHeartbeatTimestamp()); + this.status = NodeStatus.parse(heartbeatResp.getStatus()); + this.statusReason = heartbeatResp.isSetStatusReason() ? heartbeatResp.getStatusReason() : null; + if (heartbeatResp.isSetLoadSample()) { + this.loadSample = heartbeatResp.getLoadSample(); + } else { + this.loadSample = null; + } + } + /** Constructor for ConfigNode sample. */ public NodeHeartbeatSample(TConfigNodeHeartbeatResp heartbeatResp) { super(heartbeatResp.getTimestamp()); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/region/RegionCache.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/region/RegionCache.java index 8facf40ba5a5c..9210585428dd5 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/region/RegionCache.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/region/RegionCache.java @@ -20,8 +20,12 @@ package org.apache.iotdb.confignode.manager.load.cache.region; import org.apache.iotdb.commons.cluster.RegionStatus; +import org.apache.iotdb.confignode.manager.load.cache.AbstractHeartbeatSample; import org.apache.iotdb.confignode.manager.load.cache.AbstractLoadCache; +import java.util.Collections; +import java.util.List; + /** * RegionCache caches the RegionHeartbeatSamples of a Region. Update and cache the current * statistics of the Region based on the latest RegionHeartbeatSample. @@ -34,24 +38,26 @@ public RegionCache() { } @Override - public synchronized void updateCurrentStatistics() { + public synchronized void updateCurrentStatistics(boolean forceUpdate) { RegionHeartbeatSample lastSample; + List history; synchronized (slidingWindow) { lastSample = (RegionHeartbeatSample) getLastSample(); - } + history = Collections.unmodifiableList(slidingWindow); - RegionStatus status; - long currentNanoTime = System.nanoTime(); - if (lastSample == null) { - status = RegionStatus.Unknown; - } else if (currentNanoTime - lastSample.getSampleLogicalTimestamp() - > HEARTBEAT_TIMEOUT_TIME_IN_NS) { - // TODO: Optimize Unknown judge logic - status = RegionStatus.Unknown; - } else { - status = lastSample.getStatus(); + RegionStatus status; + long currentNanoTime = System.nanoTime(); + if (lastSample == null) { + /* First heartbeat not received from this region, status is UNKNOWN */ + status = RegionStatus.Unknown; + } else if (!failureDetector.isAvailable(history)) { + /* Failure detector decides that this region is UNKNOWN */ + status = RegionStatus.Unknown; + } else { + status = lastSample.getStatus(); + } + this.currentStatistics.set(new RegionStatistics(currentNanoTime, status)); } - this.currentStatistics.set(new RegionStatistics(currentNanoTime, status)); } public RegionStatistics getCurrentStatistics() { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/region/RegionGroupCache.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/region/RegionGroupCache.java index d166d4bceae37..0ee3f7d2cf859 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/region/RegionGroupCache.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/region/RegionGroupCache.java @@ -42,14 +42,16 @@ public class RegionGroupCache { private final Map regionCacheMap; // The current RegionGroupStatistics, used for providing statistics to other services private final AtomicReference currentStatistics; + private final boolean isStrongConsistency; /** Constructor for create RegionGroupCache with default RegionGroupStatistics. */ - public RegionGroupCache(String database, Set dataNodeIds) { + public RegionGroupCache(String database, Set dataNodeIds, boolean isStrongConsistency) { this.database = database; this.regionCacheMap = new ConcurrentHashMap<>(); dataNodeIds.forEach(dataNodeId -> regionCacheMap.put(dataNodeId, new RegionCache())); this.currentStatistics = new AtomicReference<>(RegionGroupStatistics.generateDefaultRegionGroupStatistics()); + this.isStrongConsistency = isStrongConsistency; } /** @@ -94,7 +96,7 @@ public void removeRegionCache(int dataNodeId) { * slidingWindow. */ public void updateCurrentStatistics() { - regionCacheMap.values().forEach(RegionCache::updateCurrentStatistics); + regionCacheMap.values().forEach(regionCache -> regionCache.updateCurrentStatistics(false)); Map regionStatisticsMap = regionCacheMap.entrySet().stream() .collect( @@ -108,37 +110,31 @@ public void updateCurrentStatistics() { private RegionGroupStatus caculateRegionGroupStatus( Map regionStatisticsMap) { - int unknownCount = 0; - int readonlyCount = 0; + + int runningCount = 0; + int addingCount = 0; + int removingCount = 0; for (RegionStatistics regionStatistics : regionStatisticsMap.values()) { - if (RegionStatus.Removing.equals(regionStatistics.getRegionStatus())) { - // The RegionGroup is considered as Disabled when - // at least one Region is in the ReadOnly or Removing status - return RegionGroupStatus.Disabled; - } - unknownCount += RegionStatus.Unknown.equals(regionStatistics.getRegionStatus()) ? 1 : 0; - readonlyCount += RegionStatus.ReadOnly.equals(regionStatistics.getRegionStatus()) ? 1 : 0; + runningCount += RegionStatus.Running.equals(regionStatistics.getRegionStatus()) ? 1 : 0; + addingCount += RegionStatus.Adding.equals(regionStatistics.getRegionStatus()) ? 1 : 0; + removingCount += RegionStatus.Removing.equals(regionStatistics.getRegionStatus()) ? 1 : 0; } + int baseCount = regionCacheMap.size() - addingCount - removingCount; - if (unknownCount + readonlyCount == 0) { - // The RegionGroup is considered as Running only if - // all Regions are in the Running status + if (runningCount == baseCount) { + // The RegionGroup is considered as Running only if all Regions are in the Running status. return RegionGroupStatus.Running; - } else if (readonlyCount == 0) { - return unknownCount <= ((regionCacheMap.size() - 1) / 2) - // The RegionGroup is considered as Available when the number of Unknown Regions is less - // than half + } + if (isStrongConsistency) { + // For strong consistency algorithms, the RegionGroup is considered as Available when the + // number of Regions in the Running status is greater than half. + return runningCount > (baseCount / 2) ? RegionGroupStatus.Available - // Disabled otherwise : RegionGroupStatus.Disabled; } else { - return unknownCount + readonlyCount <= ((regionCacheMap.size() - 1) / 2) - // The RegionGroup is considered as Discouraged when the number of Unknown or ReadOnly - // Regions is less - // than half, and there are at least 1 ReadOnly Region - ? RegionGroupStatus.Discouraged - // Disabled otherwise - : RegionGroupStatus.Disabled; + // For weak consistency algorithms, the RegionGroup is considered as Available when the number + // of Regions in the Running status is greater than or equal to 1. + return (runningCount >= 1) ? RegionGroupStatus.Available : RegionGroupStatus.Disabled; } } @@ -153,4 +149,8 @@ public String getDatabase() { public Set getRegionLocations() { return regionCacheMap.keySet(); } + + public RegionCache getRegionCache(int nodeId) { + return regionCacheMap.get(nodeId); + } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/HeartbeatService.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/HeartbeatService.java index 7bd24c2710b8c..c3ba7d2d8805c 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/HeartbeatService.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/HeartbeatService.java @@ -19,6 +19,8 @@ package org.apache.iotdb.confignode.manager.load.service; +import org.apache.iotdb.ainode.rpc.thrift.TAIHeartbeatReq; +import org.apache.iotdb.common.rpc.thrift.TAINodeConfiguration; import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; import org.apache.iotdb.common.rpc.thrift.TDataNodeConfiguration; import org.apache.iotdb.common.rpc.thrift.TEndPoint; @@ -26,8 +28,10 @@ import org.apache.iotdb.commons.concurrent.ThreadName; import org.apache.iotdb.commons.concurrent.threadpool.ScheduledExecutorUtil; import org.apache.iotdb.commons.pipe.config.PipeConfig; +import org.apache.iotdb.confignode.client.async.AsyncAINodeHeartbeatClientPool; import org.apache.iotdb.confignode.client.async.AsyncConfigNodeHeartbeatClientPool; import org.apache.iotdb.confignode.client.async.AsyncDataNodeHeartbeatClientPool; +import org.apache.iotdb.confignode.client.async.handlers.heartbeat.AINodeHeartbeatHandler; import org.apache.iotdb.confignode.client.async.handlers.heartbeat.ConfigNodeHeartbeatHandler; import org.apache.iotdb.confignode.client.async.handlers.heartbeat.DataNodeHeartbeatHandler; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; @@ -37,6 +41,7 @@ import org.apache.iotdb.confignode.manager.load.cache.node.ConfigNodeHeartbeatCache; import org.apache.iotdb.confignode.manager.node.NodeManager; import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeHeartbeatReq; +import org.apache.iotdb.db.protocol.client.ConfigNodeInfo; import org.apache.iotdb.mpp.rpc.thrift.TDataNodeHeartbeatReq; import org.apache.tsfile.utils.Pair; @@ -126,6 +131,8 @@ private void heartbeatLoopBody() { // Send heartbeat requests to all the registered DataNodes pingRegisteredDataNodes( genHeartbeatReq(), getNodeManager().getRegisteredDataNodes()); + // Send heartbeat requests to all the registered AINodes + pingRegisteredAINodes(genMLHeartbeatReq(), getNodeManager().getRegisteredAINodes()); } }); } @@ -134,6 +141,11 @@ private TDataNodeHeartbeatReq genHeartbeatReq() { /* Generate heartbeat request */ TDataNodeHeartbeatReq heartbeatReq = new TDataNodeHeartbeatReq(); heartbeatReq.setHeartbeatTimestamp(System.nanoTime()); + heartbeatReq.setLogicalClock( + configManager + .getConsensusManager() + .getConsensusImpl() + .getLogicalClock(ConfigNodeInfo.CONFIG_REGION_ID)); // Always sample RegionGroups' leadership as the Region heartbeat heartbeatReq.setNeedJudgeLeader(true); // We sample DataNode's load in every 10 heartbeat loop @@ -155,6 +167,12 @@ private TDataNodeHeartbeatReq genHeartbeatReq() { heartbeatReq.setSpaceQuotaUsage(configManager.getClusterQuotaManager().getSpaceQuotaUsage()); } + // We broadcast region operations list every 100 heartbeat loops + if (heartbeatCounter.get() % 100 == 0) { + heartbeatReq.setCurrentRegionOperations( + configManager.getProcedureManager().getRegionOperationConsensusIds()); + } + /* Update heartbeat counter */ heartbeatCounter.getAndIncrement(); @@ -187,6 +205,17 @@ private TConfigNodeHeartbeatReq genConfigNodeHeartbeatReq() { return req; } + private TAIHeartbeatReq genMLHeartbeatReq() { + /* Generate heartbeat request */ + TAIHeartbeatReq heartbeatReq = new TAIHeartbeatReq(); + heartbeatReq.setHeartbeatTimestamp(System.nanoTime()); + + // We sample AINode's load in every 10 heartbeat loop + heartbeatReq.setNeedSamplingLoad(heartbeatCounter.get() % 10 == 0); + + return heartbeatReq; + } + /** * Send heartbeat requests to all the Registered ConfigNodes. * @@ -245,6 +274,24 @@ private void pingRegisteredDataNodes( } } + /** + * Send heartbeat requests to all the Registered AINodes. + * + * @param registeredAINodes DataNodes that registered in cluster + */ + private void pingRegisteredAINodes( + TAIHeartbeatReq heartbeatReq, List registeredAINodes) { + // Send heartbeat requests + for (TAINodeConfiguration aiNodeInfo : registeredAINodes) { + AINodeHeartbeatHandler handler = + new AINodeHeartbeatHandler( + aiNodeInfo.getLocation().getAiNodeId(), configManager.getLoadManager()); + AsyncAINodeHeartbeatClientPool.getInstance() + .getAINodeHeartBeat( + aiNodeInfo.getLocation().getInternalEndPoint(), heartbeatReq, handler); + } + } + private ConsensusManager getConsensusManager() { return configManager.getConsensusManager(); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/StatisticsService.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/StatisticsService.java index d8accbcd48241..5db975d14af90 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/StatisticsService.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/StatisticsService.java @@ -37,7 +37,7 @@ public class StatisticsService { private static final Logger LOGGER = LoggerFactory.getLogger(StatisticsService.class); - private static final long HEARTBEAT_INTERVAL = + public static final long STATISTICS_UPDATE_INTERVAL = ConfigNodeDescriptor.getInstance().getConf().getHeartbeatIntervalInMs(); private final LoadCache loadCache; @@ -63,7 +63,7 @@ public void startLoadStatisticsService() { loadStatisticsExecutor, this::updateLoadStatistics, 0, - HEARTBEAT_INTERVAL, + STATISTICS_UPDATE_INTERVAL, TimeUnit.MILLISECONDS); LOGGER.info("LoadStatistics service is started successfully."); } @@ -82,7 +82,7 @@ public void stopLoadStatisticsService() { } private void updateLoadStatistics() { - loadCache.updateNodeStatistics(); + loadCache.updateNodeStatistics(false); loadCache.updateRegionGroupStatistics(); loadCache.updateConsensusGroupStatistics(); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/node/ClusterNodeStartUtils.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/node/ClusterNodeStartUtils.java index 035517172f352..60c806db32859 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/node/ClusterNodeStartUtils.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/node/ClusterNodeStartUtils.java @@ -19,6 +19,8 @@ package org.apache.iotdb.confignode.manager.node; +import org.apache.iotdb.common.rpc.thrift.TAINodeConfiguration; +import org.apache.iotdb.common.rpc.thrift.TAINodeLocation; import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; import org.apache.iotdb.common.rpc.thrift.TDataNodeConfiguration; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; @@ -30,6 +32,7 @@ import org.apache.iotdb.confignode.conf.ConfigNodeConfig; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; import org.apache.iotdb.confignode.manager.ConfigManager; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeRegisterReq; import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeRegisterReq; import org.apache.iotdb.confignode.rpc.thrift.TDataNodeRegisterReq; import org.apache.iotdb.rpc.TSStatusCode; @@ -101,7 +104,7 @@ public static TSStatus confirmClusterId(ConfigManager configManager) { configManager .getClusterManager() .getClusterIdWithRetry( - CommonDescriptor.getInstance().getConfig().getConnectionTimeoutInMS() / 2); + CommonDescriptor.getInstance().getConfig().getCnConnectionTimeoutInMS() / 2); if (clusterId == null) { status .setCode(TSStatusCode.GET_CLUSTER_ID_ERROR.getStatusCode()) @@ -159,9 +162,56 @@ public static TSStatus confirmConfigNodeRegistration( return ACCEPT_NODE_REGISTRATION; } + public static TSStatus confirmAINodeRegistration( + TAINodeRegisterReq req, ConfigManager configManager) { + // Confirm cluster name + TSStatus status = confirmClusterName(NodeType.AINode, req.getClusterName()); + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; + } + // Confirm end point conflicts + List conflictEndPoints = + checkConflictTEndPointForNewAINode( + req.getAiNodeConfiguration().getLocation(), + configManager.getNodeManager().getRegisteredAINodes()); + if (!conflictEndPoints.isEmpty()) { + return rejectRegistrationBecauseConflictEndPoints(NodeType.AINode, conflictEndPoints); + } + // Confirm whether cluster id has been generated + status = confirmClusterId(configManager); + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; + } + // Success + return ACCEPT_NODE_REGISTRATION; + } + + /** + * Check if there exist conflict TEndPoints on the DataNode to be registered. + * + * @param newAINodeLocation The TDataNodeLocation of the DataNode to be registered + * @param registeredAINodes All registered DataNodes + * @return The conflict TEndPoints if exist + */ + public static List checkConflictTEndPointForNewAINode( + TAINodeLocation newAINodeLocation, List registeredAINodes) { + Set conflictEndPointSet = new HashSet<>(); + for (TAINodeConfiguration registeredAINode : registeredAINodes) { + TAINodeLocation registeredLocation = registeredAINode.getLocation(); + if (registeredLocation + .getInternalEndPoint() + .equals(newAINodeLocation.getInternalEndPoint())) { + conflictEndPointSet.add(newAINodeLocation.getInternalEndPoint()); + } + } + + return new ArrayList<>(conflictEndPointSet); + } + public static TSStatus confirmNodeRestart( NodeType nodeType, String clusterName, + String clusterId, int nodeId, Object nodeLocation, ConfigManager configManager) { @@ -210,6 +260,14 @@ public static TSStatus confirmNodeRestart( configManager.getNodeManager().getRegisteredConfigNodes()); } break; + case AINode: + if (nodeLocation instanceof TAINodeLocation) { + matchedNodeLocation = + matchRegisteredAINode( + (TAINodeLocation) nodeLocation, + configManager.getNodeManager().getRegisteredAINodes()); + } + break; case DataNode: default: if (nodeLocation instanceof TDataNodeLocation) { @@ -265,6 +323,24 @@ public static TSStatus confirmNodeRestart( break; } + // check clusterId if not empty + if (clusterId != null + && !clusterId.isEmpty() + && !clusterId.equals(configManager.getClusterManager().getClusterId())) { + status.setCode(TSStatusCode.REJECT_NODE_START.getStatusCode()); + status.setMessage( + String.format( + "Reject %s restart. Because the clusterId of the current %s and the target cluster are inconsistent. " + + "ClusterId of the current Node: %s, ClusterId of the target cluster: %s." + + POSSIBLE_SOLUTIONS + + "\t1. Please check if the node configuration or path is correct.", + nodeType.getNodeType(), + nodeType.getNodeType(), + clusterId, + configManager.getClusterManager().getClusterId())); + return status; + } + if (!acceptRestart) { /* Reject restart because some internal TEndPoints have been changed */ status.setCode(TSStatusCode.REJECT_NODE_START.getStatusCode()); @@ -364,6 +440,24 @@ public static TConfigNodeLocation matchRegisteredConfigNode( return null; } + /** + * Check if there exists a registered AINode who has the same index of the given one. + * + * @param aiNodeLocation The given AINode + * @param registeredAINodes Registered AINodes + * @return The AINodeLocation who has the same index of the given one, null otherwise. + */ + public static TAINodeLocation matchRegisteredAINode( + TAINodeLocation aiNodeLocation, List registeredAINodes) { + for (TAINodeConfiguration registeredAINode : registeredAINodes) { + if (registeredAINode.getLocation().getAiNodeId() == aiNodeLocation.getAiNodeId()) { + return registeredAINode.getLocation(); + } + } + + return null; + } + /** * Check if there exists a registered DataNode who has the same index of the given one. * diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/node/NodeManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/node/NodeManager.java index 4cb7945cf9d24..8925375d573b5 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/node/NodeManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/node/NodeManager.java @@ -19,8 +19,8 @@ package org.apache.iotdb.confignode.manager.node; +import org.apache.iotdb.common.rpc.thrift.TAINodeConfiguration; import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; -import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; import org.apache.iotdb.common.rpc.thrift.TDataNodeConfiguration; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TFlushReq; @@ -36,25 +36,32 @@ import org.apache.iotdb.commons.consensus.ConsensusGroupId; import org.apache.iotdb.commons.service.metric.MetricService; import org.apache.iotdb.confignode.client.CnToCnNodeRequestType; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; +import org.apache.iotdb.confignode.client.sync.CnToDnSyncRequestType; import org.apache.iotdb.confignode.client.sync.SyncConfigNodeClientPool; import org.apache.iotdb.confignode.client.sync.SyncDataNodeClientPool; import org.apache.iotdb.confignode.conf.ConfigNodeConfig; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; +import org.apache.iotdb.confignode.consensus.request.read.ainode.GetAINodeConfigurationPlan; import org.apache.iotdb.confignode.consensus.request.read.datanode.GetDataNodeConfigurationPlan; +import org.apache.iotdb.confignode.consensus.request.write.ainode.RegisterAINodePlan; +import org.apache.iotdb.confignode.consensus.request.write.ainode.RemoveAINodePlan; +import org.apache.iotdb.confignode.consensus.request.write.ainode.UpdateAINodePlan; import org.apache.iotdb.confignode.consensus.request.write.confignode.ApplyConfigNodePlan; import org.apache.iotdb.confignode.consensus.request.write.confignode.RemoveConfigNodePlan; import org.apache.iotdb.confignode.consensus.request.write.confignode.UpdateVersionInfoPlan; import org.apache.iotdb.confignode.consensus.request.write.datanode.RegisterDataNodePlan; import org.apache.iotdb.confignode.consensus.request.write.datanode.RemoveDataNodePlan; import org.apache.iotdb.confignode.consensus.request.write.datanode.UpdateDataNodePlan; +import org.apache.iotdb.confignode.consensus.response.ainode.AINodeConfigurationResp; +import org.apache.iotdb.confignode.consensus.response.ainode.AINodeRegisterResp; import org.apache.iotdb.confignode.consensus.response.datanode.ConfigurationResp; import org.apache.iotdb.confignode.consensus.response.datanode.DataNodeConfigurationResp; import org.apache.iotdb.confignode.consensus.response.datanode.DataNodeRegisterResp; import org.apache.iotdb.confignode.consensus.response.datanode.DataNodeToStatusResp; -import org.apache.iotdb.confignode.manager.ConfigManager; +import org.apache.iotdb.confignode.manager.ClusterManager; import org.apache.iotdb.confignode.manager.IManager; import org.apache.iotdb.confignode.manager.TTLManager; import org.apache.iotdb.confignode.manager.TriggerManager; @@ -67,7 +74,11 @@ import org.apache.iotdb.confignode.manager.pipe.coordinator.PipeManager; import org.apache.iotdb.confignode.manager.schema.ClusterSchemaManager; import org.apache.iotdb.confignode.persistence.node.NodeInfo; -import org.apache.iotdb.confignode.procedure.env.RegionMaintainHandler; +import org.apache.iotdb.confignode.procedure.env.RemoveDataNodeHandler; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeInfo; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeRegisterReq; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeRestartReq; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeRestartResp; import org.apache.iotdb.confignode.rpc.thrift.TCQConfig; import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeInfo; import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeRegisterReq; @@ -91,6 +102,7 @@ import org.slf4j.LoggerFactory; import java.util.ArrayList; +import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.List; @@ -98,6 +110,7 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.ReentrantLock; import java.util.stream.Collectors; @@ -248,6 +261,7 @@ private TRuntimeConfiguration getRuntimeConfiguration() { getPipeManager().getPipePluginCoordinator().getPipePluginTable().getAllPipePluginMeta()); runtimeConfiguration.setAllTTLInformation( DataNodeRegisterResp.convertAllTTLInformation(getTTLManager().getAllTTL())); + runtimeConfiguration.setClusterId(getClusterManager().getClusterId()); return runtimeConfiguration; } finally { getTriggerManager().getTriggerInfo().releaseTriggerTableLock(); @@ -301,8 +315,7 @@ public DataSet registerDataNode(TDataNodeRegisterReq req) { resp.setStatus(ClusterNodeStartUtils.ACCEPT_NODE_REGISTRATION); resp.setDataNodeId( registerDataNodePlan.getDataNodeConfiguration().getLocation().getDataNodeId()); - String clusterId = configManager.getClusterManager().getClusterId(); - resp.setRuntimeConfiguration(getRuntimeConfiguration().setClusterId(clusterId)); + resp.setRuntimeConfiguration(getRuntimeConfiguration()); return resp; } @@ -311,7 +324,7 @@ public TDataNodeRestartResp updateDataNodeIfNecessary(TDataNodeRestartReq req) { configManager .getClusterManager() .getClusterIdWithRetry( - CommonDescriptor.getInstance().getConfig().getConnectionTimeoutInMS() / 2); + CommonDescriptor.getInstance().getConfig().getCnConnectionTimeoutInMS() / 2); TDataNodeRestartResp resp = new TDataNodeRestartResp(); resp.setConfigNodeList(getRegisteredConfigNodes()); if (clusterId == null) { @@ -346,61 +359,66 @@ public TDataNodeRestartResp updateDataNodeIfNecessary(TDataNodeRestartReq req) { } resp.setStatus(ClusterNodeStartUtils.ACCEPT_NODE_RESTART); - resp.setRuntimeConfiguration(getRuntimeConfiguration().setClusterId(clusterId)); - List consensusGroupIds = - getPartitionManager().getAllReplicaSets(nodeId).stream() - .map(TRegionReplicaSet::getRegionId) - .collect(Collectors.toList()); - resp.setConsensusGroupIds(consensusGroupIds); + resp.setRuntimeConfiguration(getRuntimeConfiguration()); + + resp.setCorrectConsensusGroups(getPartitionManager().getAllReplicaSets(nodeId)); return resp; } /** - * Remove DataNodes. + * Removes the specified DataNodes. * - * @param removeDataNodePlan removeDataNodePlan - * @return DataNodeToStatusResp, The TSStatus will be SUCCEED_STATUS if the request is accepted, - * DATANODE_NOT_EXIST when some datanode does not exist. + * @param removeDataNodePlan the plan detailing which DataNodes to remove + * @return DataNodeToStatusResp, where the TSStatus will be SUCCEED_STATUS if the request is + * accepted, or DATANODE_NOT_EXIST if any DataNode does not exist. */ public DataSet removeDataNode(RemoveDataNodePlan removeDataNodePlan) { + configManager.getProcedureManager().getEnv().getSubmitRegionMigrateLock().lock(); LOGGER.info("NodeManager start to remove DataNode {}", removeDataNodePlan); + try { + // Checks if the RemoveDataNode request is valid + RemoveDataNodeHandler removeDataNodeHandler = + configManager.getProcedureManager().getEnv().getRemoveDataNodeHandler(); + DataNodeToStatusResp preCheckStatus = + removeDataNodeHandler.checkRemoveDataNodeRequest(removeDataNodePlan); + if (preCheckStatus.getStatus().getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + LOGGER.error( + "The remove DataNode request check failed. req: {}, check result: {}", + removeDataNodePlan, + preCheckStatus.getStatus()); + return preCheckStatus; + } - RegionMaintainHandler handler = new RegionMaintainHandler((ConfigManager) configManager); - DataNodeToStatusResp preCheckStatus = handler.checkRemoveDataNodeRequest(removeDataNodePlan); - if (preCheckStatus.getStatus().getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - LOGGER.error( - "The remove DataNode request check failed. req: {}, check result: {}", - removeDataNodePlan, - preCheckStatus.getStatus()); - return preCheckStatus; - } + // Do transfer of the DataNodes before remove + DataNodeToStatusResp dataSet = new DataNodeToStatusResp(); + if (configManager.transfer(removeDataNodePlan.getDataNodeLocations()).getCode() + != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + dataSet.setStatus( + new TSStatus(TSStatusCode.REMOVE_DATANODE_ERROR.getStatusCode()) + .setMessage("Migrate the service on the removed DataNodes failed")); + return dataSet; + } - // Do transfer of the DataNodes before remove - DataNodeToStatusResp dataSet = new DataNodeToStatusResp(); - if (configManager.transfer(removeDataNodePlan.getDataNodeLocations()).getCode() - != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - dataSet.setStatus( - new TSStatus(TSStatusCode.REMOVE_DATANODE_ERROR.getStatusCode()) - .setMessage("Fail to do transfer of the DataNodes")); - return dataSet; - } + // Add request to queue, then return to client + boolean removeSucceed = + configManager.getProcedureManager().removeDataNode(removeDataNodePlan); + TSStatus status; + if (removeSucceed) { + status = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + status.setMessage("Server accepted the request"); + } else { + status = new TSStatus(TSStatusCode.REMOVE_DATANODE_ERROR.getStatusCode()); + status.setMessage("Server rejected the request, maybe requests are too many"); + } + dataSet.setStatus(status); - // Add request to queue, then return to client - boolean removeSucceed = configManager.getProcedureManager().removeDataNode(removeDataNodePlan); - TSStatus status; - if (removeSucceed) { - status = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); - status.setMessage("Server accepted the request"); - } else { - status = new TSStatus(TSStatusCode.REMOVE_DATANODE_ERROR.getStatusCode()); - status.setMessage("Server rejected the request, maybe requests are too many"); + LOGGER.info( + "NodeManager submit RemoveDataNodePlan finished, removeDataNodePlan: {}", + removeDataNodePlan); + return dataSet; + } finally { + configManager.getProcedureManager().getEnv().getSubmitRegionMigrateLock().unlock(); } - dataSet.setStatus(status); - - LOGGER.info( - "NodeManager submit RemoveDataNodePlan finished, removeDataNodePlan: {}", - removeDataNodePlan); - return dataSet; } public TConfigNodeRegisterResp registerConfigNode(TConfigNodeRegisterReq req) { @@ -427,6 +445,148 @@ public TSStatus updateConfigNodeIfNecessary(int configNodeId, TNodeVersionInfo v return ClusterNodeStartUtils.ACCEPT_NODE_RESTART; } + public List getRegisteredAINodeInfoList() { + List aiNodeInfoList = new ArrayList<>(); + for (TAINodeConfiguration aiNodeConfiguration : getRegisteredAINodes()) { + TAINodeInfo aiNodeInfo = new TAINodeInfo(); + aiNodeInfo.setAiNodeId(aiNodeConfiguration.getLocation().getAiNodeId()); + aiNodeInfo.setStatus(getLoadManager().getNodeStatusWithReason(aiNodeInfo.getAiNodeId())); + aiNodeInfo.setInternalAddress(aiNodeConfiguration.getLocation().getInternalEndPoint().ip); + aiNodeInfo.setInternalPort(aiNodeConfiguration.getLocation().getInternalEndPoint().port); + aiNodeInfoList.add(aiNodeInfo); + } + return aiNodeInfoList; + } + + /** + * @return All registered AINodes + */ + public List getRegisteredAINodes() { + return nodeInfo.getRegisteredAINodes(); + } + + public TAINodeConfiguration getRegisteredAINode(int aiNodeId) { + return nodeInfo.getRegisteredAINode(aiNodeId); + } + + /** + * Register AINode. Use synchronized to make sure + * + * @param req TAINodeRegisterReq + * @return AINodeConfigurationDataSet. The {@link TSStatus} will be set to {@link + * TSStatusCode#SUCCESS_STATUS} when register success. + */ + public synchronized DataSet registerAINode(TAINodeRegisterReq req) { + + if (!nodeInfo.getRegisteredAINodes().isEmpty()) { + AINodeRegisterResp dataSet = new AINodeRegisterResp(); + dataSet.setConfigNodeList(Collections.emptyList()); + dataSet.setStatus( + new TSStatus(TSStatusCode.REGISTER_AI_NODE_ERROR.getStatusCode()) + .setMessage("There is already one AINode in the cluster.")); + return dataSet; + } + + int aiNodeId = nodeInfo.generateNextNodeId(); + getLoadManager().getLoadCache().createNodeHeartbeatCache(NodeType.AINode, aiNodeId); + RegisterAINodePlan registerAINodePlan = new RegisterAINodePlan(req.getAiNodeConfiguration()); + // Register new DataNode + registerAINodePlan.getAINodeConfiguration().getLocation().setAiNodeId(aiNodeId); + try { + getConsensusManager().write(registerAINodePlan); + } catch (ConsensusException e) { + LOGGER.warn(CONSENSUS_WRITE_ERROR, e); + } + + // update datanode's versionInfo + UpdateVersionInfoPlan updateVersionInfoPlan = + new UpdateVersionInfoPlan(req.getVersionInfo(), aiNodeId); + try { + getConsensusManager().write(updateVersionInfoPlan); + } catch (ConsensusException e) { + LOGGER.warn(CONSENSUS_WRITE_ERROR, e); + } + + AINodeRegisterResp resp = new AINodeRegisterResp(); + resp.setStatus(ClusterNodeStartUtils.ACCEPT_NODE_REGISTRATION); + resp.setConfigNodeList(getRegisteredConfigNodes()); + resp.setAINodeId(registerAINodePlan.getAINodeConfiguration().getLocation().getAiNodeId()); + return resp; + } + + /** + * Remove AINodes. + * + * @param removeAINodePlan removeDataNodePlan + */ + public TSStatus removeAINode(RemoveAINodePlan removeAINodePlan) { + LOGGER.info("NodeManager start to remove AINode {}", removeAINodePlan); + + // check if the node exists + if (!nodeInfo.containsAINode(removeAINodePlan.getAINodeLocation().getAiNodeId())) { + return new TSStatus(TSStatusCode.REMOVE_AI_NODE_ERROR.getStatusCode()) + .setMessage("AINode doesn't exist."); + } + + // Add request to queue, then return to client + boolean removeSucceed = configManager.getProcedureManager().removeAINode(removeAINodePlan); + TSStatus status; + if (removeSucceed) { + status = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + status.setMessage("Server accepted the request"); + } else { + status = new TSStatus(TSStatusCode.REMOVE_AI_NODE_ERROR.getStatusCode()); + status.setMessage("Server rejected the request, maybe requests are too many"); + } + + LOGGER.info( + "NodeManager submit RemoveAINodePlan finished, removeAINodePlan: {}", removeAINodePlan); + return status; + } + + public TAINodeRestartResp updateAINodeIfNecessary(TAINodeRestartReq req) { + int nodeId = req.getAiNodeConfiguration().getLocation().getAiNodeId(); + TAINodeConfiguration aiNodeConfiguration = getRegisteredAINode(nodeId); + if (!req.getAiNodeConfiguration().equals(aiNodeConfiguration)) { + // Update AINodeConfiguration when modified during restart + UpdateAINodePlan updateAINodePlan = new UpdateAINodePlan(req.getAiNodeConfiguration()); + try { + getConsensusManager().write(updateAINodePlan); + } catch (ConsensusException e) { + LOGGER.warn(CONSENSUS_WRITE_ERROR, e); + } + } + TNodeVersionInfo versionInfo = nodeInfo.getVersionInfo(nodeId); + if (!req.getVersionInfo().equals(versionInfo)) { + // Update versionInfo when modified during restart + UpdateVersionInfoPlan updateVersionInfoPlan = + new UpdateVersionInfoPlan(req.getVersionInfo(), nodeId); + try { + getConsensusManager().write(updateVersionInfoPlan); + } catch (ConsensusException e) { + LOGGER.warn(CONSENSUS_WRITE_ERROR, e); + } + } + + TAINodeRestartResp resp = new TAINodeRestartResp(); + resp.setStatus(ClusterNodeStartUtils.ACCEPT_NODE_RESTART); + resp.setConfigNodeList(getRegisteredConfigNodes()); + return resp; + } + + public AINodeConfigurationResp getAINodeConfiguration(GetAINodeConfigurationPlan req) { + try { + return (AINodeConfigurationResp) getConsensusManager().read(req); + } catch (ConsensusException e) { + LOGGER.warn("Failed in the read API executing the consensus layer due to: ", e); + TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); + res.setMessage(e.getMessage()); + AINodeConfigurationResp response = new AINodeConfigurationResp(); + response.setStatus(res); + return response; + } + } + /** * Get TDataNodeConfiguration. * @@ -630,13 +790,33 @@ public void applyConfigNode( public TSStatus checkConfigNodeBeforeRemove(RemoveConfigNodePlan removeConfigNodePlan) { removeConfigNodeLock.lock(); try { - // Check OnlineConfigNodes number - if (filterConfigNodeThroughStatus(NodeStatus.Running).size() <= 1) { + // Check ConfigNodes number + if (getRegisteredConfigNodes().size() <= 1) { return new TSStatus(TSStatusCode.REMOVE_CONFIGNODE_ERROR.getStatusCode()) .setMessage( "Remove ConfigNode failed because there is only one ConfigNode in current Cluster."); } + // Check OnlineConfigNodes number + final long deadline = + System.nanoTime() + + TimeUnit.MILLISECONDS.toNanos( + CommonDescriptor.getInstance().getConfig().getDnConnectionTimeoutInMS() / 2); + while (filterConfigNodeThroughStatus(NodeStatus.Running).size() <= 1) { + if (System.nanoTime() > deadline) { + return new TSStatus(TSStatusCode.REMOVE_CONFIGNODE_ERROR.getStatusCode()) + .setMessage( + "Remove ConfigNode failed because there is no other ConfigNode in Running status in current Cluster."); + } + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return new TSStatus(TSStatusCode.REMOVE_CONFIGNODE_ERROR.getStatusCode()) + .setMessage("Remove ConfigNode failed due to thread interruption."); + } + } + // Check whether the registeredConfigNodes contain the ConfigNode to be removed. if (!getRegisteredConfigNodes().contains(removeConfigNodePlan.getConfigNodeLocation())) { return new TSStatus(TSStatusCode.REMOVE_CONFIGNODE_ERROR.getStatusCode()) @@ -702,7 +882,7 @@ public List merge() { Map dataNodeLocationMap = configManager.getNodeManager().getRegisteredDataNodeLocations(); DataNodeAsyncRequestContext clientHandler = - new DataNodeAsyncRequestContext<>(CnToDnRequestType.MERGE, dataNodeLocationMap); + new DataNodeAsyncRequestContext<>(CnToDnAsyncRequestType.MERGE, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); return clientHandler.getResponseList(); } @@ -711,7 +891,15 @@ public List flush(TFlushReq req) { Map dataNodeLocationMap = configManager.getNodeManager().getRegisteredDataNodeLocations(); DataNodeAsyncRequestContext clientHandler = - new DataNodeAsyncRequestContext<>(CnToDnRequestType.FLUSH, req, dataNodeLocationMap); + new DataNodeAsyncRequestContext<>(CnToDnAsyncRequestType.FLUSH, req, dataNodeLocationMap); + CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); + return clientHandler.getResponseList(); + } + + public List flushOnSpecificDN( + TFlushReq req, Map dataNodeLocationMap) { + DataNodeAsyncRequestContext clientHandler = + new DataNodeAsyncRequestContext<>(CnToDnAsyncRequestType.FLUSH, req, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); return clientHandler.getResponseList(); } @@ -720,7 +908,7 @@ public List clearCache() { Map dataNodeLocationMap = configManager.getNodeManager().getRegisteredDataNodeLocations(); DataNodeAsyncRequestContext clientHandler = - new DataNodeAsyncRequestContext<>(CnToDnRequestType.CLEAR_CACHE, dataNodeLocationMap); + new DataNodeAsyncRequestContext<>(CnToDnAsyncRequestType.CLEAR_CACHE, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); return clientHandler.getResponseList(); } @@ -741,7 +929,7 @@ public List setConfiguration(TSetConfigurationReq req) { if (!targetDataNodes.isEmpty()) { DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.SET_CONFIGURATION, req, dataNodeLocationMap); + CnToDnAsyncRequestType.SET_CONFIGURATION, req, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance() .sendAsyncRequestWithRetry(clientHandler); responseList.addAll(clientHandler.getResponseList()); @@ -779,7 +967,8 @@ public List startRpairData() { Map dataNodeLocationMap = configManager.getNodeManager().getRegisteredDataNodeLocations(); DataNodeAsyncRequestContext clientHandler = - new DataNodeAsyncRequestContext<>(CnToDnRequestType.START_REPAIR_DATA, dataNodeLocationMap); + new DataNodeAsyncRequestContext<>( + CnToDnAsyncRequestType.START_REPAIR_DATA, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); return clientHandler.getResponseList(); } @@ -788,7 +977,8 @@ public List stopRepairData() { Map dataNodeLocationMap = configManager.getNodeManager().getRegisteredDataNodeLocations(); DataNodeAsyncRequestContext clientHandler = - new DataNodeAsyncRequestContext<>(CnToDnRequestType.STOP_REPAIR_DATA, dataNodeLocationMap); + new DataNodeAsyncRequestContext<>( + CnToDnAsyncRequestType.STOP_REPAIR_DATA, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); return clientHandler.getResponseList(); } @@ -798,7 +988,7 @@ public List submitLoadConfigurationTask() { configManager.getNodeManager().getRegisteredDataNodeLocations(); DataNodeAsyncRequestContext dataNodeRequestContext = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.LOAD_CONFIGURATION, dataNodeLocationMap); + CnToDnAsyncRequestType.LOAD_CONFIGURATION, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance() .sendAsyncRequestWithRetry(dataNodeRequestContext); return dataNodeRequestContext.getResponseList(); @@ -817,7 +1007,7 @@ public TShowConfigurationResp showConfiguration(int nodeId) { .sendSyncRequestToDataNodeWithRetry( dataNodeLocation.getInternalEndPoint(), null, - CnToDnRequestType.SHOW_CONFIGURATION); + CnToDnSyncRequestType.SHOW_CONFIGURATION); } // other config node @@ -842,7 +1032,7 @@ public List setSystemStatus(String status) { configManager.getNodeManager().getRegisteredDataNodeLocations(); DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.SET_SYSTEM_STATUS, status, dataNodeLocationMap); + CnToDnAsyncRequestType.SET_SYSTEM_STATUS, status, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); return clientHandler.getResponseList(); } @@ -853,7 +1043,7 @@ public TSStatus setDataNodeStatus(TSetDataNodeStatusReq setDataNodeStatusReq) { .sendSyncRequestToDataNodeWithRetry( setDataNodeStatusReq.getTargetDataNode().getInternalEndPoint(), setDataNodeStatusReq.getStatus(), - CnToDnRequestType.SET_SYSTEM_STATUS); + CnToDnSyncRequestType.SET_SYSTEM_STATUS); } /** @@ -876,7 +1066,7 @@ private TSStatus killAllQueries() { configManager.getNodeManager().getRegisteredDataNodeLocations(); DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.KILL_QUERY_INSTANCE, dataNodeLocationMap); + CnToDnAsyncRequestType.KILL_QUERY_INSTANCE, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); return RpcUtils.squashResponseStatusList(clientHandler.getResponseList()); } @@ -892,7 +1082,7 @@ private TSStatus killSpecificQuery(String queryId, TDataNodeLocation dataNodeLoc .sendSyncRequestToDataNodeWithRetry( dataNodeLocation.getInternalEndPoint(), queryId, - CnToDnRequestType.KILL_QUERY_INSTANCE); + CnToDnSyncRequestType.KILL_QUERY_INSTANCE); } } @@ -948,6 +1138,10 @@ private ClusterSchemaManager getClusterSchemaManager() { return configManager.getClusterSchemaManager(); } + private ClusterManager getClusterManager() { + return configManager.getClusterManager(); + } + private PartitionManager getPartitionManager() { return configManager.getPartitionManager(); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java index c7b0c4bc19b91..632a5b5941974 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java @@ -30,11 +30,13 @@ import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory; import org.apache.iotdb.commons.concurrent.ThreadName; import org.apache.iotdb.commons.concurrent.threadpool.ScheduledExecutorUtil; +import org.apache.iotdb.commons.conf.CommonConfig; +import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.partition.DataPartitionTable; import org.apache.iotdb.commons.partition.SchemaPartitionTable; import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor; import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; import org.apache.iotdb.confignode.conf.ConfigNodeConfig; @@ -70,6 +72,7 @@ import org.apache.iotdb.confignode.exception.NotEnoughDataNodeException; import org.apache.iotdb.confignode.manager.IManager; import org.apache.iotdb.confignode.manager.ProcedureManager; +import org.apache.iotdb.confignode.manager.TTLManager; import org.apache.iotdb.confignode.manager.consensus.ConsensusManager; import org.apache.iotdb.confignode.manager.load.LoadManager; import org.apache.iotdb.confignode.manager.node.NodeManager; @@ -122,6 +125,7 @@ public class PartitionManager { CONF.getSchemaRegionGroupExtensionPolicy(); private static final RegionGroupExtensionPolicy DATA_REGION_GROUP_EXTENSION_POLICY = CONF.getDataRegionGroupExtensionPolicy(); + private static final CommonConfig COMMON_CONFIG = CommonDescriptor.getInstance().getConfig(); private final IManager configManager; private final PartitionInfo partitionInfo; @@ -131,15 +135,16 @@ public class PartitionManager { private static final String CONSENSUS_READ_ERROR = "Failed in the read API executing the consensus layer due to: "; - private static final String CONSENSUS_WRITE_ERROR = + public static final String CONSENSUS_WRITE_ERROR = "Failed in the write API executing the consensus layer due to: "; - /** Region cleaner. */ // Monitor for leadership change private final Object scheduleMonitor = new Object(); + /** Region cleaner. */ // Try to delete Regions in every 10s private static final int REGION_MAINTAINER_WORK_INTERVAL = 10; + private final ScheduledExecutorService regionMaintainer; private Future currentRegionMaintainerFuture; @@ -323,15 +328,17 @@ public SchemaPartitionResp getOrCreateSchemaPartition(GetOrCreateSchemaPartition /** * Get DataPartition and create a new one if it does not exist. * - * @param req DataPartitionPlan with Map>> - * @return DataPartitionResp with DataPartition and TSStatus. SUCCESS_STATUS if all process - * finish. NOT_ENOUGH_DATA_NODE if the DataNodes is not enough to create new Regions. - * STORAGE_GROUP_NOT_EXIST if some StorageGroup don't exist. + * @param req DataPartitionPlan with Map{@literal <}StorageGroupName, Map{@literal + * <}SeriesPartitionSlot, List{@literal <}TimePartitionSlot{@literal >}{@literal >}{@literal + * >} + * @return DataPartitionResp with DataPartition and {@link TSStatus}. {@link + * TSStatusCode#SUCCESS_STATUS} if all process finish. {@link TSStatusCode#NO_ENOUGH_DATANODE} + * if the DataNodes is not enough to create new Regions. {@link + * TSStatusCode#DATABASE_NOT_EXIST} if some database does not exist. */ - public DataPartitionResp getOrCreateDataPartition(GetOrCreateDataPartitionPlan req) { + public DataPartitionResp getOrCreateDataPartition(final GetOrCreateDataPartitionPlan req) { // Check if the related Databases exist - for (String database : req.getPartitionSlotsMap().keySet()) { + for (final String database : req.getPartitionSlotsMap().keySet()) { if (!isDatabaseExist(database)) { return new DataPartitionResp( new TSStatus(TSStatusCode.DATABASE_NOT_EXIST.getStatusCode()) @@ -514,11 +521,11 @@ private TSStatus extendRegionGroupIfNecessary( } } } catch (NotEnoughDataNodeException e) { - LOGGER.error(e.getMessage()); + LOGGER.error("Extend region group failed", e); result.setCode(TSStatusCode.NO_ENOUGH_DATANODE.getStatusCode()); result.setMessage(e.getMessage()); } catch (DatabaseNotExistsException e) { - LOGGER.error(e.getMessage()); + LOGGER.error("Extend region group failed", e); result.setCode(TSStatusCode.DATABASE_NOT_EXIST.getStatusCode()); result.setMessage(e.getMessage()); } @@ -1033,6 +1040,14 @@ public RegionInfoListResp getRegionInfoList(GetRegionInfoListPlan req) { ? RegionRoleType.Leader.toString() : RegionRoleType.Follower.toString(); regionInfo.setRoleType(regionType); + + long regionSize = + getLoadManager() + .getLoadCache() + .getRegionSizeMap() + .getOrDefault(regionInfo.getDataNodeId(), Collections.emptyMap()) + .getOrDefault(regionInfo.getConsensusGroupId().getId(), -1L); + regionInfo.setTsFileSize(regionSize); }); return regionInfoListResp; @@ -1183,8 +1198,8 @@ public GetSeriesSlotListResp getSeriesSlotList(TGetSeriesSlotListReq req) { * @param regionId regionId * @return database name */ - public String getRegionStorageGroup(TConsensusGroupId regionId) { - return partitionInfo.getRegionStorageGroup(regionId); + public String getRegionDatabase(TConsensusGroupId regionId) { + return partitionInfo.getRegionDatabase(regionId); } /** @@ -1258,7 +1273,7 @@ public void maintainRegionReplicas() { DataNodeAsyncRequestContext createSchemaRegionHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.CREATE_SCHEMA_REGION); + CnToDnAsyncRequestType.CREATE_SCHEMA_REGION); for (RegionMaintainTask regionMaintainTask : selectedRegionMaintainTask) { RegionCreateTask schemaRegionCreateTask = (RegionCreateTask) regionMaintainTask; @@ -1294,7 +1309,7 @@ public void maintainRegionReplicas() { DataNodeAsyncRequestContext createDataRegionHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.CREATE_DATA_REGION); + CnToDnAsyncRequestType.CREATE_DATA_REGION); for (RegionMaintainTask regionMaintainTask : selectedRegionMaintainTask) { RegionCreateTask dataRegionCreateTask = (RegionCreateTask) regionMaintainTask; @@ -1330,7 +1345,7 @@ public void maintainRegionReplicas() { case DELETE: // delete region DataNodeAsyncRequestContext deleteRegionHandler = - new DataNodeAsyncRequestContext<>(CnToDnRequestType.DELETE_REGION); + new DataNodeAsyncRequestContext<>(CnToDnAsyncRequestType.DELETE_REGION); Map regionIdMap = new HashMap<>(); for (RegionMaintainTask regionMaintainTask : selectedRegionMaintainTask) { RegionDeleteTask regionDeleteTask = (RegionDeleteTask) regionMaintainTask; @@ -1497,4 +1512,8 @@ private ProcedureManager getProcedureManager() { private NodeManager getNodeManager() { return configManager.getNodeManager(); } + + private TTLManager getTTLManager() { + return configManager.getTTLManager(); + } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionMetrics.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionMetrics.java index 3e7426b3786b8..771856a465d44 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionMetrics.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionMetrics.java @@ -345,8 +345,8 @@ private static void bindDatabasePartitionMetricsWhenUpdate( try { return manager.getRegionGroupCount(database, TConsensusGroupType.SchemaRegion); } catch (DatabaseNotExistsException e) { - LOGGER.warn("Error when counting SchemaRegionGroups in Database: {}", database, e); - return -1; + LOGGER.info("Error when counting SchemaRegionGroups in Database: {}", database, e); + return 0; } }, Tag.NAME.toString(), @@ -361,8 +361,8 @@ private static void bindDatabasePartitionMetricsWhenUpdate( try { return manager.getRegionGroupCount(database, TConsensusGroupType.DataRegion); } catch (DatabaseNotExistsException e) { - LOGGER.warn("Error when counting DataRegionGroups in Database: {}", database, e); - return -1; + LOGGER.info("Error when counting DataRegionGroups in Database: {}", database, e); + return 0; } }, Tag.NAME.toString(), diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/RegionGroupStatus.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/RegionGroupStatus.java index b4cf1e037dbd1..a2c4bea6a736f 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/RegionGroupStatus.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/RegionGroupStatus.java @@ -24,26 +24,19 @@ public enum RegionGroupStatus { Running("Running", 1), /** - * All Regions in RegionGroup are in the Running or Unknown status, and the number of Regions in - * the Unknown status is less than half + * For strong consistency algorithms, the RegionGroup is considered as Available when the number + * of Regions in the Running status is greater than half. For weak consistency algorithms, the + * RegionGroup is considered as Available when the number of Regions in the Running status is + * greater than or equal to 1. To avoid the impact of Removing and Adding region status on region + * group status evaluation, this status, which only occurs during region migration and + * reconstruction, can be excluded. The denominator uses the number of regions excluding Removing + * and Adding status, while the numerator uses regions in the Running status, ensuring high + * availability evaluation remains unaffected. */ Available("Available", 2), - /** - * All Regions in RegionGroup are in the Running, Unknown or ReadOnly status, and at least 1 node - * is in ReadOnly status, the number of Regions in the Unknown or ReadOnly status is less than - * half - */ - Discouraged("Discouraged", 3), - - /** - * The following cases will lead to Disabled RegionGroup: - * - *

1. There is a Region in Removing status - * - *

2. More than half of the Regions are in Unknown or ReadOnly status - */ - Disabled("Disabled", 4); + /** In scenarios other than the two mentioned above. */ + Disabled("Disabled", 3); private final String status; private final int weight; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/plugin/PipeConfigNodePluginAgent.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/plugin/PipeConfigNodePluginAgent.java index 8e748c38b684a..8ddbb73398c5c 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/plugin/PipeConfigNodePluginAgent.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/plugin/PipeConfigNodePluginAgent.java @@ -19,11 +19,11 @@ package org.apache.iotdb.confignode.manager.pipe.agent.plugin; -import org.apache.iotdb.commons.pipe.agent.plugin.PipeConnectorConstructor; -import org.apache.iotdb.commons.pipe.agent.plugin.PipeExtractorConstructor; import org.apache.iotdb.commons.pipe.agent.plugin.PipePluginAgent; -import org.apache.iotdb.commons.pipe.agent.plugin.PipeProcessorConstructor; -import org.apache.iotdb.commons.pipe.plugin.meta.PipePluginMetaKeeper; +import org.apache.iotdb.commons.pipe.agent.plugin.constructor.PipeProcessorConstructor; +import org.apache.iotdb.commons.pipe.agent.plugin.constructor.PipeSinkConstructor; +import org.apache.iotdb.commons.pipe.agent.plugin.constructor.PipeSourceConstructor; +import org.apache.iotdb.commons.pipe.agent.plugin.meta.PipePluginMetaKeeper; public class PipeConfigNodePluginAgent extends PipePluginAgent { @@ -32,9 +32,9 @@ public PipeConfigNodePluginAgent(PipePluginMetaKeeper pipePluginMetaKeeper) { } @Override - protected PipeExtractorConstructor createPipeExtractorConstructor( + protected PipeSourceConstructor createPipeExtractorConstructor( PipePluginMetaKeeper pipePluginMetaKeeper) { - return new PipeConfigRegionExtractorConstructor(); + return new PipeConfigRegionSourceConstructor(); } @Override @@ -44,8 +44,8 @@ protected PipeProcessorConstructor createPipeProcessorConstructor( } @Override - protected PipeConnectorConstructor createPipeConnectorConstructor( + protected PipeSinkConstructor createPipeConnectorConstructor( PipePluginMetaKeeper pipePluginMetaKeeper) { - return new PipeConfigRegionConnectorConstructor(); + return new PipeConfigRegionSinkConstructor(); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/plugin/PipeConfigRegionProcessorConstructor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/plugin/PipeConfigRegionProcessorConstructor.java index 1f9f84ffdefca..8ea9576b7976d 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/plugin/PipeConfigRegionProcessorConstructor.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/plugin/PipeConfigRegionProcessorConstructor.java @@ -19,9 +19,9 @@ package org.apache.iotdb.confignode.manager.pipe.agent.plugin; -import org.apache.iotdb.commons.pipe.agent.plugin.PipeProcessorConstructor; -import org.apache.iotdb.commons.pipe.plugin.builtin.BuiltinPipePlugin; -import org.apache.iotdb.commons.pipe.plugin.builtin.processor.donothing.DoNothingProcessor; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.BuiltinPipePlugin; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.processor.donothing.DoNothingProcessor; +import org.apache.iotdb.commons.pipe.agent.plugin.constructor.PipeProcessorConstructor; import org.apache.iotdb.pipe.api.PipeProcessor; class PipeConfigRegionProcessorConstructor extends PipeProcessorConstructor { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/plugin/PipeConfigRegionConnectorConstructor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/plugin/PipeConfigRegionSinkConstructor.java similarity index 66% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/plugin/PipeConfigRegionConnectorConstructor.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/plugin/PipeConfigRegionSinkConstructor.java index 3bcd1d6b7498f..c7222e9a1bc97 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/plugin/PipeConfigRegionConnectorConstructor.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/plugin/PipeConfigRegionSinkConstructor.java @@ -19,57 +19,51 @@ package org.apache.iotdb.confignode.manager.pipe.agent.plugin; -import org.apache.iotdb.commons.pipe.agent.plugin.PipeConnectorConstructor; -import org.apache.iotdb.commons.pipe.plugin.builtin.BuiltinPipePlugin; -import org.apache.iotdb.commons.pipe.plugin.builtin.connector.donothing.DoNothingConnector; -import org.apache.iotdb.confignode.manager.pipe.connector.protocol.IoTDBConfigRegionAirGapConnector; -import org.apache.iotdb.confignode.manager.pipe.connector.protocol.IoTDBConfigRegionConnector; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.BuiltinPipePlugin; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.sink.donothing.DoNothingSink; +import org.apache.iotdb.commons.pipe.agent.plugin.constructor.PipeSinkConstructor; +import org.apache.iotdb.confignode.manager.pipe.sink.protocol.IoTDBConfigRegionAirGapSink; +import org.apache.iotdb.confignode.manager.pipe.sink.protocol.IoTDBConfigRegionSink; import org.apache.iotdb.pipe.api.PipeConnector; -class PipeConfigRegionConnectorConstructor extends PipeConnectorConstructor { +class PipeConfigRegionSinkConstructor extends PipeSinkConstructor { @Override protected void initConstructors() { pluginConstructors.put( - BuiltinPipePlugin.IOTDB_THRIFT_CONNECTOR.getPipePluginName(), - IoTDBConfigRegionConnector::new); + BuiltinPipePlugin.IOTDB_THRIFT_CONNECTOR.getPipePluginName(), IoTDBConfigRegionSink::new); pluginConstructors.put( BuiltinPipePlugin.IOTDB_THRIFT_SSL_CONNECTOR.getPipePluginName(), - IoTDBConfigRegionConnector::new); + IoTDBConfigRegionSink::new); pluginConstructors.put( BuiltinPipePlugin.IOTDB_THRIFT_SYNC_CONNECTOR.getPipePluginName(), - IoTDBConfigRegionConnector::new); + IoTDBConfigRegionSink::new); pluginConstructors.put( BuiltinPipePlugin.IOTDB_THRIFT_ASYNC_CONNECTOR.getPipePluginName(), - IoTDBConfigRegionConnector::new); + IoTDBConfigRegionSink::new); pluginConstructors.put( BuiltinPipePlugin.IOTDB_AIR_GAP_CONNECTOR.getPipePluginName(), - IoTDBConfigRegionAirGapConnector::new); + IoTDBConfigRegionAirGapSink::new); pluginConstructors.put( - BuiltinPipePlugin.DO_NOTHING_CONNECTOR.getPipePluginName(), DoNothingConnector::new); + BuiltinPipePlugin.DO_NOTHING_CONNECTOR.getPipePluginName(), DoNothingSink::new); pluginConstructors.put( - BuiltinPipePlugin.IOTDB_THRIFT_SINK.getPipePluginName(), IoTDBConfigRegionConnector::new); + BuiltinPipePlugin.IOTDB_THRIFT_SINK.getPipePluginName(), IoTDBConfigRegionSink::new); pluginConstructors.put( - BuiltinPipePlugin.IOTDB_THRIFT_SSL_SINK.getPipePluginName(), - IoTDBConfigRegionConnector::new); + BuiltinPipePlugin.IOTDB_THRIFT_SSL_SINK.getPipePluginName(), IoTDBConfigRegionSink::new); pluginConstructors.put( - BuiltinPipePlugin.IOTDB_THRIFT_SYNC_SINK.getPipePluginName(), - IoTDBConfigRegionConnector::new); + BuiltinPipePlugin.IOTDB_THRIFT_SYNC_SINK.getPipePluginName(), IoTDBConfigRegionSink::new); pluginConstructors.put( - BuiltinPipePlugin.IOTDB_THRIFT_ASYNC_SINK.getPipePluginName(), - IoTDBConfigRegionConnector::new); + BuiltinPipePlugin.IOTDB_THRIFT_ASYNC_SINK.getPipePluginName(), IoTDBConfigRegionSink::new); pluginConstructors.put( - BuiltinPipePlugin.IOTDB_AIR_GAP_SINK.getPipePluginName(), - IoTDBConfigRegionAirGapConnector::new); + BuiltinPipePlugin.IOTDB_AIR_GAP_SINK.getPipePluginName(), IoTDBConfigRegionAirGapSink::new); pluginConstructors.put( - BuiltinPipePlugin.DO_NOTHING_SINK.getPipePluginName(), DoNothingConnector::new); + BuiltinPipePlugin.DO_NOTHING_SINK.getPipePluginName(), DoNothingSink::new); } @Override public PipeConnector reflectPluginByKey(String pluginKey) { // TODO: support constructing plugin by reflection - return (PipeConnector) - pluginConstructors.getOrDefault(pluginKey, DoNothingConnector::new).get(); + return (PipeConnector) pluginConstructors.getOrDefault(pluginKey, DoNothingSink::new).get(); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/plugin/PipeConfigRegionExtractorConstructor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/plugin/PipeConfigRegionSourceConstructor.java similarity index 70% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/plugin/PipeConfigRegionExtractorConstructor.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/plugin/PipeConfigRegionSourceConstructor.java index beae07642fd4a..79982ba28242d 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/plugin/PipeConfigRegionExtractorConstructor.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/plugin/PipeConfigRegionSourceConstructor.java @@ -19,31 +19,30 @@ package org.apache.iotdb.confignode.manager.pipe.agent.plugin; -import org.apache.iotdb.commons.pipe.agent.plugin.PipeExtractorConstructor; -import org.apache.iotdb.commons.pipe.plugin.builtin.BuiltinPipePlugin; -import org.apache.iotdb.commons.pipe.plugin.builtin.extractor.donothing.DoNothingExtractor; -import org.apache.iotdb.confignode.manager.pipe.extractor.IoTDBConfigRegionExtractor; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.BuiltinPipePlugin; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.source.donothing.DoNothingSource; +import org.apache.iotdb.commons.pipe.agent.plugin.constructor.PipeSourceConstructor; +import org.apache.iotdb.confignode.manager.pipe.source.IoTDBConfigRegionSource; import org.apache.iotdb.pipe.api.PipeExtractor; -class PipeConfigRegionExtractorConstructor extends PipeExtractorConstructor { +class PipeConfigRegionSourceConstructor extends PipeSourceConstructor { @Override protected void initConstructors() { pluginConstructors.put( - BuiltinPipePlugin.DO_NOTHING_EXTRACTOR.getPipePluginName(), DoNothingExtractor::new); + BuiltinPipePlugin.DO_NOTHING_EXTRACTOR.getPipePluginName(), DoNothingSource::new); pluginConstructors.put( - BuiltinPipePlugin.IOTDB_EXTRACTOR.getPipePluginName(), IoTDBConfigRegionExtractor::new); + BuiltinPipePlugin.IOTDB_EXTRACTOR.getPipePluginName(), IoTDBConfigRegionSource::new); pluginConstructors.put( - BuiltinPipePlugin.DO_NOTHING_SOURCE.getPipePluginName(), DoNothingExtractor::new); + BuiltinPipePlugin.DO_NOTHING_SOURCE.getPipePluginName(), DoNothingSource::new); pluginConstructors.put( - BuiltinPipePlugin.IOTDB_SOURCE.getPipePluginName(), IoTDBConfigRegionExtractor::new); + BuiltinPipePlugin.IOTDB_SOURCE.getPipePluginName(), IoTDBConfigRegionSource::new); } @Override public PipeExtractor reflectPluginByKey(String pluginKey) { // TODO: support constructing plugin by reflection - return (PipeExtractor) - pluginConstructors.getOrDefault(pluginKey, DoNothingExtractor::new).get(); + return (PipeExtractor) pluginConstructors.getOrDefault(pluginKey, DoNothingSource::new).get(); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/receiver/IoTDBConfigNodeReceiverAgent.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/receiver/IoTDBConfigNodeReceiverAgent.java index fce5367398f59..5f6b0db8e50c0 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/receiver/IoTDBConfigNodeReceiverAgent.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/receiver/IoTDBConfigNodeReceiverAgent.java @@ -19,9 +19,9 @@ package org.apache.iotdb.confignode.manager.pipe.agent.receiver; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.IoTDBConnectorRequestVersion; import org.apache.iotdb.commons.pipe.receiver.IoTDBReceiver; import org.apache.iotdb.commons.pipe.receiver.IoTDBReceiverAgent; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.IoTDBSinkRequestVersion; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; import org.apache.iotdb.confignode.manager.pipe.receiver.protocol.IoTDBConfigNodeReceiver; @@ -37,7 +37,7 @@ public class IoTDBConfigNodeReceiverAgent extends IoTDBReceiverAgent { @Override protected void initConstructors() { RECEIVER_CONSTRUCTORS.put( - IoTDBConnectorRequestVersion.VERSION_1.getVersion(), IoTDBConfigNodeReceiver::new); + IoTDBSinkRequestVersion.VERSION_1.getVersion(), IoTDBConfigNodeReceiver::new); } @Override diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/runtime/PipeConfigNodeRuntimeAgent.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/runtime/PipeConfigNodeRuntimeAgent.java index 160c710ea4d2f..372b0334a24c3 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/runtime/PipeConfigNodeRuntimeAgent.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/runtime/PipeConfigNodeRuntimeAgent.java @@ -22,20 +22,21 @@ import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.exception.pipe.PipeRuntimeCriticalException; import org.apache.iotdb.commons.exception.pipe.PipeRuntimeException; +import org.apache.iotdb.commons.pipe.agent.runtime.PipePeriodicalJobExecutor; +import org.apache.iotdb.commons.pipe.agent.runtime.PipePeriodicalPhantomReferenceCleaner; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; import org.apache.iotdb.commons.pipe.config.PipeConfig; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; import org.apache.iotdb.commons.service.IService; import org.apache.iotdb.commons.service.ServiceType; import org.apache.iotdb.confignode.manager.pipe.agent.PipeConfigNodeAgent; -import org.apache.iotdb.confignode.manager.pipe.extractor.ConfigRegionListeningQueue; import org.apache.iotdb.confignode.manager.pipe.resource.PipeConfigNodeCopiedFileDirStartupCleaner; +import org.apache.iotdb.confignode.manager.pipe.source.ConfigRegionListeningQueue; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; public class PipeConfigNodeRuntimeAgent implements IService { @@ -46,6 +47,12 @@ public class PipeConfigNodeRuntimeAgent implements IService { private final AtomicBoolean isShutdown = new AtomicBoolean(false); + private final PipePeriodicalJobExecutor pipePeriodicalJobExecutor = + new PipePeriodicalJobExecutor(); + + private final PipePeriodicalPhantomReferenceCleaner pipePeriodicalPhantomReferenceCleaner = + new PipePeriodicalPhantomReferenceCleaner(); + @Override public synchronized void start() { PipeConfig.getInstance().printAllConfigs(); @@ -59,6 +66,13 @@ public synchronized void start() { // Clean receiver file dir PipeConfigNodeAgent.receiver().cleanPipeReceiverDir(); + // Start periodical job executor + pipePeriodicalJobExecutor.start(); + + if (PipeConfig.getInstance().getPipeEventReferenceTrackingEnabled()) { + pipePeriodicalPhantomReferenceCleaner.start(); + } + isShutdown.set(false); LOGGER.info("PipeRuntimeConfigNodeAgent started"); } @@ -70,6 +84,9 @@ public synchronized void stop() { } isShutdown.set(true); + // Stop periodical job executor + pipePeriodicalJobExecutor.stop(); + PipeConfigNodeAgent.task().dropAllPipeTasks(); LOGGER.info("PipeRuntimeConfigNodeAgent stopped"); @@ -90,12 +107,13 @@ public ConfigRegionListeningQueue listener() { return regionListener.listener(); } - public void increaseListenerReference(PipeParameters parameters) throws IllegalPathException { + public void increaseListenerReference(final PipeParameters parameters) + throws IllegalPathException { regionListener.increaseReference(parameters); } - public void decreaseListenerReference(PipeParameters parameters) - throws IllegalPathException, IOException { + public void decreaseListenerReference(final PipeParameters parameters) + throws IllegalPathException { regionListener.decreaseReference(parameters); } @@ -120,7 +138,7 @@ public boolean isLeaderReady() { //////////////////////////// Runtime Exception Handlers //////////////////////////// - public void report(EnrichedEvent event, PipeRuntimeException pipeRuntimeException) { + public void report(final EnrichedEvent event, final PipeRuntimeException pipeRuntimeException) { if (event.getPipeTaskMeta() != null) { report(event.getPipeTaskMeta(), pipeRuntimeException); } else { @@ -128,7 +146,8 @@ public void report(EnrichedEvent event, PipeRuntimeException pipeRuntimeExceptio } } - private void report(PipeTaskMeta pipeTaskMeta, PipeRuntimeException pipeRuntimeException) { + private void report( + final PipeTaskMeta pipeTaskMeta, final PipeRuntimeException pipeRuntimeException) { LOGGER.warn( "Report PipeRuntimeException to local PipeTaskMeta({}), exception message: {}", pipeTaskMeta, @@ -142,4 +161,15 @@ private void report(PipeTaskMeta pipeTaskMeta, PipeRuntimeException pipeRuntimeE PipeConfigNodeAgent.task().stopAllPipesWithCriticalException(); } } + + /////////////////////////// Periodical Job Executor /////////////////////////// + + public void registerPeriodicalJob(String id, Runnable periodicalJob, long intervalInSeconds) { + pipePeriodicalJobExecutor.register(id, periodicalJob, intervalInSeconds); + } + + public void registerPhantomReferenceCleanJob( + String id, Runnable periodicalJob, long intervalInSeconds) { + pipePeriodicalPhantomReferenceCleaner.register(id, periodicalJob, intervalInSeconds); + } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/runtime/PipeConfigRegionListener.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/runtime/PipeConfigRegionListener.java index 458ed1c57a828..278e133494b50 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/runtime/PipeConfigRegionListener.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/runtime/PipeConfigRegionListener.java @@ -20,11 +20,10 @@ package org.apache.iotdb.confignode.manager.pipe.agent.runtime; import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.confignode.manager.pipe.extractor.ConfigRegionListeningFilter; -import org.apache.iotdb.confignode.manager.pipe.extractor.ConfigRegionListeningQueue; +import org.apache.iotdb.confignode.manager.pipe.source.ConfigRegionListeningFilter; +import org.apache.iotdb.confignode.manager.pipe.source.ConfigRegionListeningQueue; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; -import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; public class PipeConfigRegionListener { @@ -38,7 +37,7 @@ public synchronized ConfigRegionListeningQueue listener() { return listeningQueue; } - public synchronized void increaseReference(PipeParameters parameters) + public synchronized void increaseReference(final PipeParameters parameters) throws IllegalPathException { if (!ConfigRegionListeningFilter.parseListeningPlanTypeSet(parameters).isEmpty()) { listeningQueueReferenceCount++; @@ -48,8 +47,8 @@ public synchronized void increaseReference(PipeParameters parameters) } } - public synchronized void decreaseReference(PipeParameters parameters) - throws IllegalPathException, IOException { + public synchronized void decreaseReference(final PipeParameters parameters) + throws IllegalPathException { if (!ConfigRegionListeningFilter.parseListeningPlanTypeSet(parameters).isEmpty()) { listeningQueueReferenceCount--; if (listeningQueueReferenceCount == 0) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/execution/PipeConfigNodeSubtask.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeSubtask.java similarity index 82% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/execution/PipeConfigNodeSubtask.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeSubtask.java index 02cb537335973..890749f416eed 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/execution/PipeConfigNodeSubtask.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeSubtask.java @@ -17,21 +17,23 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.execution; +package org.apache.iotdb.confignode.manager.pipe.agent.task; import org.apache.iotdb.commons.exception.pipe.PipeRuntimeException; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.BuiltinPipePlugin; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.agent.task.progress.PipeEventCommitManager; +import org.apache.iotdb.commons.pipe.agent.task.subtask.PipeAbstractSinkSubtask; import org.apache.iotdb.commons.pipe.config.constant.PipeProcessorConstant; import org.apache.iotdb.commons.pipe.config.plugin.configuraion.PipeTaskRuntimeConfiguration; -import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskExtractorRuntimeEnvironment; -import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskRuntimeEnvironment; +import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskProcessorRuntimeEnvironment; +import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskSinkRuntimeEnvironment; +import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskSourceRuntimeEnvironment; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.commons.pipe.plugin.builtin.BuiltinPipePlugin; -import org.apache.iotdb.commons.pipe.progress.PipeEventCommitManager; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; -import org.apache.iotdb.commons.pipe.task.subtask.PipeAbstractConnectorSubtask; +import org.apache.iotdb.commons.pipe.event.ProgressReportEvent; import org.apache.iotdb.confignode.manager.pipe.agent.PipeConfigNodeAgent; -import org.apache.iotdb.confignode.manager.pipe.extractor.IoTDBConfigRegionExtractor; -import org.apache.iotdb.confignode.manager.pipe.metric.PipeConfigRegionConnectorMetrics; +import org.apache.iotdb.confignode.manager.pipe.metric.sink.PipeConfigRegionSinkMetrics; +import org.apache.iotdb.confignode.manager.pipe.source.IoTDBConfigRegionSource; import org.apache.iotdb.pipe.api.PipeExtractor; import org.apache.iotdb.pipe.api.PipeProcessor; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator; @@ -46,7 +48,7 @@ import static org.apache.iotdb.db.protocol.client.ConfigNodeInfo.CONFIG_REGION_ID; -public class PipeConfigNodeSubtask extends PipeAbstractConnectorSubtask { +public class PipeConfigNodeSubtask extends PipeAbstractSinkSubtask { private static final Logger LOGGER = LoggerFactory.getLogger(PipeConfigNodeSubtask.class); @@ -77,7 +79,7 @@ public PipeConfigNodeSubtask( initProcessor(processorAttributes); initConnector(connectorAttributes); - PipeConfigRegionConnectorMetrics.getInstance().register(this); + PipeConfigRegionSinkMetrics.getInstance().register(this); PipeEventCommitManager.getInstance() .register(pipeName, creationTime, CONFIG_REGION_ID.getId(), pipeName + "_" + creationTime); } @@ -95,7 +97,7 @@ private void initExtractor(final Map extractorAttributes) throws // 3. Customize extractor final PipeTaskRuntimeConfiguration runtimeConfiguration = new PipeTaskRuntimeConfiguration( - new PipeTaskExtractorRuntimeEnvironment( + new PipeTaskSourceRuntimeEnvironment( pipeName, creationTime, CONFIG_REGION_ID.getId(), pipeTaskMeta)); extractor.customize(extractorParameters, runtimeConfiguration); } catch (final Exception e) { @@ -116,7 +118,8 @@ private void initProcessor(final Map processorAttributes) { final PipeTaskRuntimeConfiguration runtimeConfiguration = new PipeTaskRuntimeConfiguration( - new PipeTaskRuntimeEnvironment(pipeName, creationTime, CONFIG_REGION_ID.getId())); + new PipeTaskProcessorRuntimeEnvironment( + pipeName, creationTime, CONFIG_REGION_ID.getId(), null)); processor = PipeConfigNodeAgent.plugin() @@ -141,7 +144,7 @@ private void initConnector(final Map connectorAttributes) throws // 3. Customize connector final PipeTaskRuntimeConfiguration runtimeConfiguration = new PipeTaskRuntimeConfiguration( - new PipeTaskRuntimeEnvironment(pipeName, creationTime, CONFIG_REGION_ID.getId())); + new PipeTaskSinkRuntimeEnvironment(pipeName, creationTime, CONFIG_REGION_ID.getId())); outputPipeConnector.customize(connectorParameters, runtimeConfiguration); // 4. Handshake @@ -160,7 +163,7 @@ private void initConnector(final Map connectorAttributes) throws } /** - * Try to consume an {@link Event} by the {@link IoTDBConfigRegionExtractor}. + * Try to consume an {@link Event} by the {@link IoTDBConfigRegionSource}. * * @return {@code true} if the {@link Event} is consumed successfully, {@code false} if no more * {@link Event} can be consumed @@ -181,10 +184,11 @@ protected boolean executeOnce() throws Exception { return false; } - outputPipeConnector.transfer(event); - decreaseReferenceCountAndReleaseLastEvent(true); - - PipeConfigRegionConnectorMetrics.getInstance().markConfigEvent(taskID); + if (!(event instanceof ProgressReportEvent)) { + outputPipeConnector.transfer(event); + PipeConfigRegionSinkMetrics.getInstance().markConfigEvent(taskID); + } + decreaseReferenceCountAndReleaseLastEvent(event, true); } catch (final PipeException e) { setLastExceptionEvent(event); if (!isClosed.get()) { @@ -194,7 +198,7 @@ protected boolean executeOnce() throws Exception { "{} in pipe transfer, ignored because pipe is dropped.", e.getClass().getSimpleName(), e); - clearReferenceCountAndReleaseLastEvent(); + clearReferenceCountAndReleaseLastEvent(event); } } catch (final Exception e) { setLastExceptionEvent(event); @@ -205,7 +209,7 @@ protected boolean executeOnce() throws Exception { e); } else { LOGGER.info("Exception in pipe transfer, ignored because pipe is dropped.", e); - clearReferenceCountAndReleaseLastEvent(); + clearReferenceCountAndReleaseLastEvent(event); } } @@ -218,7 +222,7 @@ public void close() { PipeEventCommitManager.getInstance() .deregister(pipeName, creationTime, CONFIG_REGION_ID.getId()); - PipeConfigRegionConnectorMetrics.getInstance().deregister(taskID); + PipeConfigRegionSinkMetrics.getInstance().deregister(taskID); try { extractor.close(); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/execution/PipeConfigNodeSubtaskExecutor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeSubtaskExecutor.java similarity index 84% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/execution/PipeConfigNodeSubtaskExecutor.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeSubtaskExecutor.java index 6041c5bf82da8..1ac64eb246f06 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/execution/PipeConfigNodeSubtaskExecutor.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeSubtaskExecutor.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.execution; +package org.apache.iotdb.confignode.manager.pipe.agent.task; import org.apache.iotdb.commons.concurrent.ThreadName; -import org.apache.iotdb.commons.pipe.execution.executor.PipeSubtaskExecutor; +import org.apache.iotdb.commons.pipe.agent.task.execution.PipeSubtaskExecutor; import org.apache.iotdb.commons.utils.TestOnly; public class PipeConfigNodeSubtaskExecutor extends PipeSubtaskExecutor { @@ -28,7 +28,7 @@ public class PipeConfigNodeSubtaskExecutor extends PipeSubtaskExecutor { private static final int THREAD_NUM = 1; private PipeConfigNodeSubtaskExecutor() { - super(THREAD_NUM, ThreadName.PIPE_CONFIGNODE_EXECUTOR_POOL, true); + super(THREAD_NUM, ThreadName.PIPE_CONFIGNODE_EXECUTOR_POOL.getName(), true); } /** @@ -36,7 +36,7 @@ private PipeConfigNodeSubtaskExecutor() { */ @TestOnly public PipeConfigNodeSubtaskExecutor(final Object ignored) { - super(THREAD_NUM, ThreadName.PIPE_CONFIGNODE_EXECUTOR_POOL, true); + super(THREAD_NUM, ThreadName.PIPE_CONFIGNODE_EXECUTOR_POOL.getName(), true); } private static class PipeSchemaSubtaskExecutorHolder { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/task/PipeConfigNodeTask.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTask.java similarity index 91% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/task/PipeConfigNodeTask.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTask.java index 7eea3b8482e2b..10e2c79df09f4 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/task/PipeConfigNodeTask.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTask.java @@ -17,9 +17,9 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.task; +package org.apache.iotdb.confignode.manager.pipe.agent.task; -import org.apache.iotdb.commons.pipe.task.PipeTask; +import org.apache.iotdb.commons.pipe.agent.task.PipeTask; public class PipeConfigNodeTask implements PipeTask { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskAgent.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskAgent.java index 4536789daf617..3f66d8aa7c057 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskAgent.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskAgent.java @@ -19,26 +19,23 @@ package org.apache.iotdb.confignode.manager.pipe.agent.task; +import org.apache.iotdb.common.rpc.thrift.TPipeHeartbeatResp; import org.apache.iotdb.commons.consensus.index.ProgressIndex; import org.apache.iotdb.commons.consensus.index.impl.MetaProgressIndex; import org.apache.iotdb.commons.exception.IllegalPathException; +import org.apache.iotdb.commons.pipe.agent.task.PipeTask; import org.apache.iotdb.commons.pipe.agent.task.PipeTaskAgent; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStaticMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.pipe.task.PipeTask; -import org.apache.iotdb.commons.pipe.task.meta.PipeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeStaticMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; import org.apache.iotdb.confignode.manager.pipe.agent.PipeConfigNodeAgent; -import org.apache.iotdb.confignode.manager.pipe.extractor.ConfigRegionListeningFilter; -import org.apache.iotdb.confignode.manager.pipe.metric.PipeConfigNodeRemainingTimeMetrics; -import org.apache.iotdb.confignode.manager.pipe.metric.PipeConfigRegionExtractorMetrics; +import org.apache.iotdb.confignode.manager.pipe.metric.overview.PipeConfigNodeRemainingTimeMetrics; +import org.apache.iotdb.confignode.manager.pipe.metric.source.PipeConfigRegionSourceMetrics; import org.apache.iotdb.confignode.manager.pipe.resource.PipeConfigNodeResourceManager; -import org.apache.iotdb.confignode.manager.pipe.task.PipeConfigNodeTask; -import org.apache.iotdb.confignode.manager.pipe.task.PipeConfigNodeTaskBuilder; -import org.apache.iotdb.confignode.manager.pipe.task.PipeConfigNodeTaskStage; +import org.apache.iotdb.confignode.manager.pipe.source.ConfigRegionListeningFilter; import org.apache.iotdb.mpp.rpc.thrift.TPipeHeartbeatReq; -import org.apache.iotdb.mpp.rpc.thrift.TPipeHeartbeatResp; import org.apache.iotdb.mpp.rpc.thrift.TPushPipeMetaRespExceptionMessage; import org.apache.iotdb.pipe.api.exception.PipeException; @@ -49,11 +46,13 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Consumer; import java.util.stream.Collectors; public class PipeConfigNodeTaskAgent extends PipeTaskAgent { @@ -124,7 +123,7 @@ protected TPushPipeMetaRespExceptionMessage handleSinglePipeMetaChangesInternal( final PipeMeta pipeMetaFromCoordinator) { try { return PipeConfigNodeAgent.runtime().isLeaderReady() - ? super.handleSinglePipeMetaChangesInternal(pipeMetaFromCoordinator.deepCopy()) + ? super.handleSinglePipeMetaChangesInternal(pipeMetaFromCoordinator.deepCopy4TaskAgent()) : null; } catch (final Exception e) { return new TPushPipeMetaRespExceptionMessage( @@ -155,7 +154,7 @@ protected List handlePipeMetaChangesInternal( .map( pipeMeta -> { try { - return pipeMeta.deepCopy(); + return pipeMeta.deepCopy4TaskAgent(); } catch (Exception e) { throw new PipeException("failed to deep copy pipeMeta", e); } @@ -208,27 +207,25 @@ protected void collectPipeMetaListInternal( if (isShutdown() || !PipeConfigNodeAgent.runtime().isLeaderReady()) { return; } - - LOGGER.info("Received pipe heartbeat request {} from config coordinator.", req.heartbeatId); + final Optional logger = + PipeConfigNodeResourceManager.log() + .schedule( + PipeConfigNodeTaskAgent.class, + PipeConfig.getInstance().getPipeMetaReportMaxLogNumPerRound(), + PipeConfig.getInstance().getPipeMetaReportMaxLogIntervalRounds(), + pipeMetaKeeper.getPipeMetaCount()); + LOGGER.debug("Received pipe heartbeat request {} from config coordinator.", req.heartbeatId); final List pipeMetaBinaryList = new ArrayList<>(); final List pipeRemainingEventCountList = new ArrayList<>(); final List pipeRemainingTimeList = new ArrayList<>(); try { - final Optional logger = - PipeConfigNodeResourceManager.log() - .schedule( - PipeConfigNodeTaskAgent.class, - PipeConfig.getInstance().getPipeMetaReportMaxLogNumPerRound(), - PipeConfig.getInstance().getPipeMetaReportMaxLogIntervalRounds(), - pipeMetaKeeper.getPipeMetaCount()); - for (final PipeMeta pipeMeta : pipeMetaKeeper.getPipeMetaList()) { pipeMetaBinaryList.add(pipeMeta.serialize()); final PipeStaticMeta staticMeta = pipeMeta.getStaticMeta(); final long remainingEventCount = - PipeConfigRegionExtractorMetrics.getInstance() + PipeConfigRegionSourceMetrics.getInstance() .getRemainingEventCount(staticMeta.getPipeName(), staticMeta.getCreationTime()); final double estimatedRemainingTime = PipeConfigNodeRemainingTimeMetrics.getInstance() @@ -245,7 +242,7 @@ protected void collectPipeMetaListInternal( remainingEventCount, estimatedRemainingTime)); } - LOGGER.info("Reported {} pipe metas.", pipeMetaBinaryList.size()); + logger.ifPresent(l -> l.info("Reported {} pipe metas.", pipeMetaBinaryList.size())); } catch (final IOException e) { throw new TException(e); } @@ -253,4 +250,10 @@ protected void collectPipeMetaListInternal( resp.setPipeRemainingEventCountList(pipeRemainingEventCountList); resp.setPipeRemainingTimeList(pipeRemainingTimeList); } + + @Override + public void runPipeTasks( + final Collection pipeTasks, final Consumer runSingle) { + pipeTasks.forEach(runSingle); + } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/task/PipeConfigNodeTaskBuilder.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskBuilder.java similarity index 84% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/task/PipeConfigNodeTaskBuilder.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskBuilder.java index 31f5aa1ea9b52..bab8badcb59c9 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/task/PipeConfigNodeTaskBuilder.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskBuilder.java @@ -17,16 +17,16 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.task; +package org.apache.iotdb.confignode.manager.pipe.agent.task; import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.pipe.task.PipeTask; -import org.apache.iotdb.commons.pipe.task.meta.PipeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeRuntimeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeStaticMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.agent.task.PipeTask; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeRuntimeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStaticMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; -import org.apache.iotdb.confignode.manager.pipe.extractor.ConfigRegionListeningFilter; +import org.apache.iotdb.confignode.manager.pipe.source.ConfigRegionListeningFilter; import java.util.HashMap; import java.util.Map; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/task/PipeConfigNodeTaskStage.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskStage.java similarity index 86% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/task/PipeConfigNodeTaskStage.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskStage.java index 7e676903857ea..670062be6ba61 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/task/PipeConfigNodeTaskStage.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskStage.java @@ -17,12 +17,10 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.task; +package org.apache.iotdb.confignode.manager.pipe.agent.task; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; -import org.apache.iotdb.commons.pipe.task.stage.PipeTaskStage; -import org.apache.iotdb.confignode.manager.pipe.execution.PipeConfigNodeSubtask; -import org.apache.iotdb.confignode.manager.pipe.execution.PipeConfigNodeSubtaskExecutor; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.agent.task.stage.PipeTaskStage; import org.apache.iotdb.pipe.api.exception.PipeException; import java.util.Map; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/plugin/PipePluginCoordinator.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/plugin/PipePluginCoordinator.java index 07259f842fbe8..b26e8ab3ce14c 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/plugin/PipePluginCoordinator.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/plugin/PipePluginCoordinator.java @@ -20,7 +20,7 @@ package org.apache.iotdb.confignode.manager.pipe.coordinator.plugin; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.pipe.plugin.meta.PipePluginMeta; +import org.apache.iotdb.commons.pipe.agent.plugin.meta.PipePluginMeta; import org.apache.iotdb.confignode.consensus.request.read.pipe.plugin.GetPipePluginJarPlan; import org.apache.iotdb.confignode.consensus.request.read.pipe.plugin.GetPipePluginTablePlan; import org.apache.iotdb.confignode.consensus.response.JarResp; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/PipeLeaderChangeHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/PipeLeaderChangeHandler.java index 284779fcb0395..9121ba3caa444 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/PipeLeaderChangeHandler.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/PipeLeaderChangeHandler.java @@ -83,7 +83,7 @@ public void onConsensusGroupStatisticsChanged(ConsensusGroupStatisticsChangeEven .forEach( (regionGroupId, pair) -> { final String databaseName = - configManager.getPartitionManager().getRegionStorageGroup(regionGroupId); + configManager.getPartitionManager().getRegionDatabase(regionGroupId); // Pipe only collect user's data, filter metric database here. // DatabaseName may be null for config region group if (Objects.isNull(databaseName) diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/PipeRuntimeCoordinator.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/PipeRuntimeCoordinator.java index d960c3fe88a44..5b6369a2c3c90 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/PipeRuntimeCoordinator.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/PipeRuntimeCoordinator.java @@ -29,8 +29,6 @@ import org.apache.iotdb.confignode.manager.pipe.coordinator.runtime.heartbeat.PipeHeartbeat; import org.apache.iotdb.confignode.manager.pipe.coordinator.runtime.heartbeat.PipeHeartbeatScheduler; -import javax.validation.constraints.NotNull; - import java.nio.ByteBuffer; import java.util.List; import java.util.concurrent.ExecutorService; @@ -106,7 +104,7 @@ public void stopPipeHeartbeat() { public void parseHeartbeat( final int dataNodeId, - @NotNull final List pipeMetaByteBufferListFromDataNode, + /* @Nullable */ final List pipeMetaByteBufferListFromDataNode, /* @Nullable */ final List pipeCompletedListFromAgent, /* @Nullable */ final List pipeRemainingEventCountListFromAgent, /* @Nullable */ final List pipeRemainingTimeListFromAgent) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/heartbeat/PipeHeartbeat.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/heartbeat/PipeHeartbeat.java index 1f54b8745d53e..547310ce49ca2 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/heartbeat/PipeHeartbeat.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/heartbeat/PipeHeartbeat.java @@ -19,10 +19,8 @@ package org.apache.iotdb.confignode.manager.pipe.coordinator.runtime.heartbeat; -import org.apache.iotdb.commons.pipe.task.meta.PipeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeStaticMeta; - -import javax.validation.constraints.NotNull; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStaticMeta; import java.nio.ByteBuffer; import java.util.HashMap; @@ -37,12 +35,17 @@ public class PipeHeartbeat { private final Map remainingTimeMap = new HashMap<>(); public PipeHeartbeat( - @NotNull final List pipeMetaByteBufferListFromAgent, + /* @Nullable */ final List pipeMetaByteBufferListFromAgent, /* @Nullable */ final List pipeCompletedListFromAgent, /* @Nullable */ final List pipeRemainingEventCountListFromAgent, /* @Nullable */ final List pipeRemainingTimeListFromAgent) { + // Pipe meta may be null for nodes shutting down, return empty heartbeat + if (Objects.isNull(pipeMetaByteBufferListFromAgent)) { + return; + } for (int i = 0; i < pipeMetaByteBufferListFromAgent.size(); ++i) { - final PipeMeta pipeMeta = PipeMeta.deserialize(pipeMetaByteBufferListFromAgent.get(i)); + final PipeMeta pipeMeta = + PipeMeta.deserialize4TaskAgent(pipeMetaByteBufferListFromAgent.get(i)); pipeMetaMap.put(pipeMeta.getStaticMeta(), pipeMeta); isCompletedMap.put( pipeMeta.getStaticMeta(), diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/heartbeat/PipeHeartbeatParser.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/heartbeat/PipeHeartbeatParser.java index 18c964f06c10a..56f1a0492def5 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/heartbeat/PipeHeartbeatParser.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/heartbeat/PipeHeartbeatParser.java @@ -20,16 +20,16 @@ package org.apache.iotdb.confignode.manager.pipe.coordinator.runtime.heartbeat; import org.apache.iotdb.commons.consensus.index.ProgressIndex; -import org.apache.iotdb.commons.exception.pipe.PipeRuntimeConnectorCriticalException; import org.apache.iotdb.commons.exception.pipe.PipeRuntimeCriticalException; import org.apache.iotdb.commons.exception.pipe.PipeRuntimeException; +import org.apache.iotdb.commons.exception.pipe.PipeRuntimeSinkCriticalException; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeRuntimeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStaticMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStatus; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTemporaryMetaInCoordinator; import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.pipe.task.meta.PipeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeRuntimeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeStaticMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeStatus; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeTemporaryMeta; import org.apache.iotdb.confignode.consensus.response.pipe.task.PipeTableResp; import org.apache.iotdb.confignode.manager.ConfigManager; import org.apache.iotdb.confignode.manager.pipe.resource.PipeConfigNodeResourceManager; @@ -144,7 +144,8 @@ private void parseHeartbeatAndSaveMetaChangeLocally( continue; } - final PipeTemporaryMeta temporaryMeta = pipeMetaFromCoordinator.getTemporaryMeta(); + final PipeTemporaryMetaInCoordinator temporaryMeta = + (PipeTemporaryMetaInCoordinator) pipeMetaFromCoordinator.getTemporaryMeta(); // Remove completed pipes final Boolean isPipeCompletedFromAgent = pipeHeartbeat.isCompleted(staticMeta); @@ -184,9 +185,8 @@ private void parseHeartbeatAndSaveMetaChangeLocally( final PipeTaskMeta runtimeMetaFromAgent = pipeTaskMetaMapFromAgent.get(runtimeMetaFromCoordinator.getKey()); if (runtimeMetaFromAgent == null) { - LOGGER.warn( - "PipeRuntimeCoordinator meets error in updating pipeMetaKeeper, " - + "runtimeMetaFromAgent is null, runtimeMetaFromCoordinator: {}", + LOGGER.info( + "No corresponding Pipe is running in the reported DataRegion. runtimeMetaFromAgent is null, runtimeMetaFromCoordinator: {}", runtimeMetaFromCoordinator); continue; } @@ -254,7 +254,7 @@ private void parseHeartbeatAndSaveMetaChangeLocally( pipeName); } - if (exception instanceof PipeRuntimeConnectorCriticalException) { + if (exception instanceof PipeRuntimeSinkCriticalException) { ((PipeTableResp) pipeTaskInfo.get().showPipes()) .filter(true, pipeName).getAllPipeMeta().stream() .filter(pipeMeta -> !pipeMeta.getStaticMeta().getPipeName().equals(pipeName)) diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/heartbeat/PipeHeartbeatScheduler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/heartbeat/PipeHeartbeatScheduler.java index b21fbc815f98c..120ddb65f8621 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/heartbeat/PipeHeartbeatScheduler.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/heartbeat/PipeHeartbeatScheduler.java @@ -20,18 +20,18 @@ package org.apache.iotdb.confignode.manager.pipe.coordinator.runtime.heartbeat; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TPipeHeartbeatResp; import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory; import org.apache.iotdb.commons.concurrent.ThreadName; import org.apache.iotdb.commons.concurrent.threadpool.ScheduledExecutorUtil; import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; import org.apache.iotdb.confignode.manager.ConfigManager; import org.apache.iotdb.confignode.manager.pipe.agent.PipeConfigNodeAgent; import org.apache.iotdb.mpp.rpc.thrift.TPipeHeartbeatReq; -import org.apache.iotdb.mpp.rpc.thrift.TPipeHeartbeatResp; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -96,7 +96,7 @@ private synchronized void heartbeat() { final DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.PIPE_HEARTBEAT, request, dataNodeLocationMap); + CnToDnAsyncRequestType.PIPE_HEARTBEAT, request, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance() .sendAsyncRequestToNodeWithRetryAndTimeoutInMs( clientHandler, diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/event/PipeConfigRegionSnapshotEvent.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/event/PipeConfigRegionSnapshotEvent.java index 40918e1b6a7a6..1f6f2ff0f9462 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/event/PipeConfigRegionSnapshotEvent.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/event/PipeConfigRegionSnapshotEvent.java @@ -19,13 +19,16 @@ package org.apache.iotdb.confignode.manager.pipe.event; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; import org.apache.iotdb.commons.pipe.event.PipeSnapshotEvent; -import org.apache.iotdb.commons.pipe.pattern.PipePattern; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.resource.ref.PipePhantomReferenceManager.PipeEventResource; +import org.apache.iotdb.commons.pipe.resource.snapshot.PipeSnapshotResourceManager; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; import org.apache.iotdb.confignode.manager.pipe.resource.PipeConfigNodeResourceManager; import org.apache.iotdb.confignode.persistence.schema.CNSnapshotFileType; +import org.apache.iotdb.db.pipe.event.ReferenceTrackableEvent; import org.apache.tsfile.utils.ReadWriteIOUtils; import org.slf4j.Logger; @@ -41,9 +44,12 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; -public class PipeConfigRegionSnapshotEvent extends PipeSnapshotEvent { +public class PipeConfigRegionSnapshotEvent extends PipeSnapshotEvent + implements ReferenceTrackableEvent { private static final Logger LOGGER = LoggerFactory.getLogger(PipeConfigRegionSnapshotEvent.class); private String snapshotPath; @@ -243,4 +249,52 @@ public String coreReportMessage() { + " - " + super.coreReportMessage(); } + + /////////////////////////// ReferenceTrackableEvent /////////////////////////// + + @Override + protected void trackResource() { + PipeConfigNodeResourceManager.ref().trackPipeEventResource(this, eventResourceBuilder()); + } + + @Override + public PipeEventResource eventResourceBuilder() { + return new PipeConfigRegionSnapshotEventResource( + this.isReleased, + this.referenceCount, + this.resourceManager, + this.snapshotPath, + this.templateFilePath); + } + + private static class PipeConfigRegionSnapshotEventResource extends PipeEventResource { + + private final PipeSnapshotResourceManager resourceManager; + private final String snapshotPath; + private final String templateFilePath; + + private PipeConfigRegionSnapshotEventResource( + final AtomicBoolean isReleased, + final AtomicInteger referenceCount, + final PipeSnapshotResourceManager resourceManager, + final String snapshotPath, + final String templateFilePath) { + super(isReleased, referenceCount); + this.resourceManager = resourceManager; + this.snapshotPath = snapshotPath; + this.templateFilePath = templateFilePath; + } + + @Override + protected void finalizeResource() { + try { + resourceManager.decreaseSnapshotReference(snapshotPath); + if (!templateFilePath.isEmpty()) { + resourceManager.decreaseSnapshotReference(templateFilePath); + } + } catch (final Exception e) { + LOGGER.warn("Decrease reference count for snapshot {} error.", snapshotPath, e); + } + } + } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/event/PipeConfigRegionWritePlanEvent.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/event/PipeConfigRegionWritePlanEvent.java index 0d780bafca1e5..1515724c1e572 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/event/PipeConfigRegionWritePlanEvent.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/event/PipeConfigRegionWritePlanEvent.java @@ -19,10 +19,10 @@ package org.apache.iotdb.confignode.manager.pipe.event; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; import org.apache.iotdb.commons.pipe.event.PipeWritePlanEvent; -import org.apache.iotdb.commons.pipe.pattern.PipePattern; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.tsfile.utils.ReadWriteIOUtils; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigNodeMetrics.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigNodeMetrics.java index a7a41f4f3593e..912e6ae25aa73 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigNodeMetrics.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigNodeMetrics.java @@ -20,6 +20,15 @@ package org.apache.iotdb.confignode.manager.pipe.metric; import org.apache.iotdb.confignode.manager.pipe.coordinator.PipeManager; +import org.apache.iotdb.confignode.manager.pipe.metric.overview.PipeConfigNodeRemainingTimeMetrics; +import org.apache.iotdb.confignode.manager.pipe.metric.overview.PipeConfigNodeResourceMetrics; +import org.apache.iotdb.confignode.manager.pipe.metric.overview.PipeProcedureMetrics; +import org.apache.iotdb.confignode.manager.pipe.metric.overview.PipeTaskInfoMetrics; +import org.apache.iotdb.confignode.manager.pipe.metric.overview.PipeTemporaryMetaInCoordinatorMetrics; +import org.apache.iotdb.confignode.manager.pipe.metric.receiver.PipeConfigNodeReceiverMetrics; +import org.apache.iotdb.confignode.manager.pipe.metric.sink.PipeConfigRegionSinkMetrics; +import org.apache.iotdb.confignode.manager.pipe.metric.source.PipeConfigNodeListenerMetrics; +import org.apache.iotdb.confignode.manager.pipe.metric.source.PipeConfigRegionSourceMetrics; import org.apache.iotdb.metrics.AbstractMetricService; import org.apache.iotdb.metrics.metricsets.IMetricSet; @@ -38,11 +47,12 @@ public void bindTo(final AbstractMetricService metricService) { PipeProcedureMetrics.getInstance().bindTo(metricService); pipeTaskInfoMetrics.bindTo(metricService); PipeConfigNodeListenerMetrics.getInstance().bindTo(metricService); - PipeConfigRegionExtractorMetrics.getInstance().bindTo(metricService); - PipeConfigRegionConnectorMetrics.getInstance().bindTo(metricService); + PipeConfigRegionSourceMetrics.getInstance().bindTo(metricService); + PipeConfigRegionSinkMetrics.getInstance().bindTo(metricService); PipeConfigNodeRemainingTimeMetrics.getInstance().bindTo(metricService); - PipeTemporaryMetaMetrics.getInstance().bindTo(metricService); + PipeTemporaryMetaInCoordinatorMetrics.getInstance().bindTo(metricService); PipeConfigNodeReceiverMetrics.getInstance().bindTo(metricService); + PipeConfigNodeResourceMetrics.getInstance().bindTo(metricService); } @Override @@ -50,10 +60,11 @@ public void unbindFrom(final AbstractMetricService metricService) { PipeProcedureMetrics.getInstance().unbindFrom(metricService); pipeTaskInfoMetrics.unbindFrom(metricService); PipeConfigNodeListenerMetrics.getInstance().unbindFrom(metricService); - PipeConfigRegionExtractorMetrics.getInstance().unbindFrom(metricService); - PipeConfigRegionConnectorMetrics.getInstance().unbindFrom(metricService); + PipeConfigRegionSourceMetrics.getInstance().unbindFrom(metricService); + PipeConfigRegionSinkMetrics.getInstance().unbindFrom(metricService); PipeConfigNodeRemainingTimeMetrics.getInstance().unbindFrom(metricService); - PipeTemporaryMetaMetrics.getInstance().unbindFrom(metricService); + PipeTemporaryMetaInCoordinatorMetrics.getInstance().unbindFrom(metricService); PipeConfigNodeReceiverMetrics.getInstance().unbindFrom(metricService); + PipeConfigNodeResourceMetrics.getInstance().unbindFrom(metricService); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigNodeRemainingTimeMetrics.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeConfigNodeRemainingTimeMetrics.java similarity index 90% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigNodeRemainingTimeMetrics.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeConfigNodeRemainingTimeMetrics.java index e3dbbed5d6c61..bd4f42042a1e4 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigNodeRemainingTimeMetrics.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeConfigNodeRemainingTimeMetrics.java @@ -17,12 +17,12 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.metric; +package org.apache.iotdb.confignode.manager.pipe.metric.overview; -import org.apache.iotdb.commons.pipe.progress.PipeEventCommitManager; +import org.apache.iotdb.commons.pipe.agent.task.progress.PipeEventCommitManager; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; -import org.apache.iotdb.confignode.manager.pipe.extractor.IoTDBConfigRegionExtractor; +import org.apache.iotdb.confignode.manager.pipe.source.IoTDBConfigRegionSource; import org.apache.iotdb.metrics.AbstractMetricService; import org.apache.iotdb.metrics.metricsets.IMetricSet; import org.apache.iotdb.metrics.utils.MetricLevel; @@ -99,11 +99,15 @@ private void removeAutoGauge(final String pipeID) { //////////////////////////// register & deregister (pipe integration) //////////////////////////// - public void register(final IoTDBConfigRegionExtractor extractor) { + public void register(final IoTDBConfigRegionSource extractor) { // The metric is global thus the regionId is omitted final String pipeID = extractor.getPipeName() + "_" + extractor.getCreationTime(); remainingTimeOperatorMap - .computeIfAbsent(pipeID, k -> new PipeConfigNodeRemainingTimeOperator()) + .computeIfAbsent( + pipeID, + k -> + new PipeConfigNodeRemainingTimeOperator( + extractor.getPipeName(), extractor.getCreationTime())) .register(extractor); if (Objects.nonNull(metricService)) { createMetrics(pipeID); @@ -157,7 +161,8 @@ public void markRegionCommit(final String pipeID, final boolean isDataRegion) { public double getRemainingTime(final String pipeName, final long creationTime) { return remainingTimeOperatorMap .computeIfAbsent( - pipeName + "_" + creationTime, k -> new PipeConfigNodeRemainingTimeOperator()) + pipeName + "_" + creationTime, + k -> new PipeConfigNodeRemainingTimeOperator(pipeName, creationTime)) .getRemainingTime(); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigNodeRemainingTimeOperator.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeConfigNodeRemainingTimeOperator.java similarity index 85% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigNodeRemainingTimeOperator.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeConfigNodeRemainingTimeOperator.java index 7a5044b0c6162..c27e4cd898b06 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigNodeRemainingTimeOperator.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeConfigNodeRemainingTimeOperator.java @@ -17,13 +17,13 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.metric; +package org.apache.iotdb.confignode.manager.pipe.metric.overview; -import org.apache.iotdb.commons.enums.PipeRemainingTimeRateAverageTime; +import org.apache.iotdb.commons.enums.PipeRateAverage; import org.apache.iotdb.commons.pipe.config.PipeConfig; import org.apache.iotdb.commons.pipe.metric.PipeRemainingOperator; -import org.apache.iotdb.confignode.manager.pipe.execution.PipeConfigNodeSubtask; -import org.apache.iotdb.confignode.manager.pipe.extractor.IoTDBConfigRegionExtractor; +import org.apache.iotdb.confignode.manager.pipe.agent.task.PipeConfigNodeSubtask; +import org.apache.iotdb.confignode.manager.pipe.source.IoTDBConfigRegionSource; import com.codahale.metrics.Clock; import com.codahale.metrics.ExponentialMovingAverages; @@ -37,12 +37,16 @@ class PipeConfigNodeRemainingTimeOperator extends PipeRemainingOperator { - private final Set configRegionExtractors = + private final Set configRegionExtractors = Collections.newSetFromMap(new ConcurrentHashMap<>()); private final AtomicReference configRegionCommitMeter = new AtomicReference<>(null); private double lastConfigRegionCommitSmoothingValue = Long.MAX_VALUE; + PipeConfigNodeRemainingTimeOperator(String pipeName, long creationTime) { + super(pipeName, creationTime); + } + //////////////////////////// Remaining time calculation //////////////////////////// /** @@ -52,13 +56,13 @@ class PipeConfigNodeRemainingTimeOperator extends PipeRemainingOperator { * @return The estimated remaining time */ double getRemainingTime() { - final PipeRemainingTimeRateAverageTime pipeRemainingTimeCommitRateAverageTime = + final PipeRateAverage pipeRemainingTimeCommitRateAverageTime = PipeConfig.getInstance().getPipeRemainingTimeCommitRateAverageTime(); // Do not calculate heartbeat event final long totalConfigRegionWriteEventCount = configRegionExtractors.stream() - .map(IoTDBConfigRegionExtractor::getUnTransferredEventCount) + .map(IoTDBConfigRegionSource::getUnTransferredEventCount) .reduce(Long::sum) .orElse(0L); @@ -90,8 +94,7 @@ class PipeConfigNodeRemainingTimeOperator extends PipeRemainingOperator { //////////////////////////// Register & deregister (pipe integration) //////////////////////////// - void register(final IoTDBConfigRegionExtractor extractor) { - setNameAndCreationTime(extractor.getPipeName(), extractor.getCreationTime()); + void register(final IoTDBConfigRegionSource extractor) { configRegionExtractors.add(extractor); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeConfigNodeResourceMetrics.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeConfigNodeResourceMetrics.java new file mode 100644 index 0000000000000..77fdcd9fbcd78 --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeConfigNodeResourceMetrics.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.manager.pipe.metric.overview; + +import org.apache.iotdb.commons.pipe.resource.ref.PipePhantomReferenceManager; +import org.apache.iotdb.commons.service.metric.enums.Metric; +import org.apache.iotdb.confignode.manager.pipe.resource.PipeConfigNodeResourceManager; +import org.apache.iotdb.metrics.AbstractMetricService; +import org.apache.iotdb.metrics.metricsets.IMetricSet; +import org.apache.iotdb.metrics.utils.MetricLevel; +import org.apache.iotdb.metrics.utils.MetricType; + +public class PipeConfigNodeResourceMetrics implements IMetricSet { + + //////////////////////////// bindTo & unbindFrom (metric framework) //////////////////////////// + + @Override + public void bindTo(final AbstractMetricService metricService) { + // phantom reference count + metricService.createAutoGauge( + Metric.PIPE_PHANTOM_REFERENCE_COUNT.toString(), + MetricLevel.IMPORTANT, + PipeConfigNodeResourceManager.ref(), + PipePhantomReferenceManager::getPhantomReferenceCount); + } + + @Override + public void unbindFrom(final AbstractMetricService metricService) { + // phantom reference count + metricService.remove(MetricType.AUTO_GAUGE, Metric.PIPE_PHANTOM_REFERENCE_COUNT.toString()); + } + + //////////////////////////// singleton //////////////////////////// + + private static class PipeConfigNodeResourceMetricsHolder { + + private static final PipeConfigNodeResourceMetrics INSTANCE = + new PipeConfigNodeResourceMetrics(); + + private PipeConfigNodeResourceMetricsHolder() { + // empty constructor + } + } + + public static PipeConfigNodeResourceMetrics getInstance() { + return PipeConfigNodeResourceMetrics.PipeConfigNodeResourceMetricsHolder.INSTANCE; + } + + private PipeConfigNodeResourceMetrics() { + // empty constructor + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeProcedureMetrics.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeProcedureMetrics.java similarity index 98% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeProcedureMetrics.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeProcedureMetrics.java index 021c261eac288..102c050c8f252 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeProcedureMetrics.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeProcedureMetrics.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.metric; +package org.apache.iotdb.confignode.manager.pipe.metric.overview; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeTaskInfoMetrics.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeTaskInfoMetrics.java similarity index 98% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeTaskInfoMetrics.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeTaskInfoMetrics.java index c6fa20eb71bf9..aa35ba89494b6 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeTaskInfoMetrics.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeTaskInfoMetrics.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.metric; +package org.apache.iotdb.confignode.manager.pipe.metric.overview; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeTemporaryMetaMetrics.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeTemporaryMetaInCoordinatorMetrics.java similarity index 79% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeTemporaryMetaMetrics.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeTemporaryMetaInCoordinatorMetrics.java index 4732ec2365806..8ebc9d22b3c69 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeTemporaryMetaMetrics.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeTemporaryMetaInCoordinatorMetrics.java @@ -17,10 +17,11 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.metric; +package org.apache.iotdb.confignode.manager.pipe.metric.overview; -import org.apache.iotdb.commons.pipe.task.meta.PipeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeTemporaryMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTemporaryMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTemporaryMetaInCoordinator; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; import org.apache.iotdb.metrics.AbstractMetricService; @@ -39,17 +40,19 @@ import java.util.concurrent.ConcurrentHashMap; /** - * The {@link PipeTemporaryMetaMetrics} is to calculate the pipe-statistics from the {@link - * PipeTemporaryMeta}. The class is lock-free and can only read from the thread-safe variables from - * the {@link PipeTemporaryMeta}. + * The {@link PipeTemporaryMetaInCoordinatorMetrics} is to calculate the pipe-statistics from the + * {@link PipeTemporaryMeta}. The class is lock-free and can only read from the thread-safe + * variables from the {@link PipeTemporaryMeta}. */ -public class PipeTemporaryMetaMetrics implements IMetricSet { - private static final Logger LOGGER = LoggerFactory.getLogger(PipeTemporaryMetaMetrics.class); +public class PipeTemporaryMetaInCoordinatorMetrics implements IMetricSet { + private static final Logger LOGGER = + LoggerFactory.getLogger(PipeTemporaryMetaInCoordinatorMetrics.class); @SuppressWarnings("java:S3077") private volatile AbstractMetricService metricService; - private final Map pipeTemporaryMetaMap = new ConcurrentHashMap<>(); + private final Map pipeTemporaryMetaMap = + new ConcurrentHashMap<>(); //////////////////////////// bindTo & unbindFrom (metric framework) //////////////////////////// @@ -64,13 +67,13 @@ private void createMetrics(final String pipeID) { } private void createAutoGauge(final String pipeID) { - final PipeTemporaryMeta pipeTemporaryMeta = pipeTemporaryMetaMap.get(pipeID); + final PipeTemporaryMetaInCoordinator pipeTemporaryMeta = pipeTemporaryMetaMap.get(pipeID); final String[] pipeNameAndCreationTime = pipeID.split("_"); metricService.createAutoGauge( Metric.PIPE_GLOBAL_REMAINING_EVENT_COUNT.toString(), MetricLevel.IMPORTANT, pipeTemporaryMeta, - PipeTemporaryMeta::getGlobalRemainingEvents, + PipeTemporaryMetaInCoordinator::getGlobalRemainingEvents, Tag.NAME.toString(), pipeNameAndCreationTime[0], Tag.CREATION_TIME.toString(), @@ -79,7 +82,7 @@ private void createAutoGauge(final String pipeID) { Metric.PIPE_GLOBAL_REMAINING_TIME.toString(), MetricLevel.IMPORTANT, pipeTemporaryMeta, - PipeTemporaryMeta::getGlobalRemainingTime, + PipeTemporaryMetaInCoordinator::getGlobalRemainingTime, Tag.NAME.toString(), pipeNameAndCreationTime[0], Tag.CREATION_TIME.toString(), @@ -123,7 +126,8 @@ private void removeAutoGauge(final String pipeID) { public void register(final PipeMeta pipeMeta) { final String taskID = pipeMeta.getStaticMeta().getPipeName() + "_" + pipeMeta.getStaticMeta().getCreationTime(); - pipeTemporaryMetaMap.putIfAbsent(taskID, pipeMeta.getTemporaryMeta()); + pipeTemporaryMetaMap.putIfAbsent( + taskID, (PipeTemporaryMetaInCoordinator) pipeMeta.getTemporaryMeta()); if (Objects.nonNull(metricService)) { createMetrics(taskID); } @@ -163,14 +167,15 @@ public void handleTemporaryMetaChanges(final Iterable pipeMetaList) { private static class PipeTemporaryMetaMetricsHolder { - private static final PipeTemporaryMetaMetrics INSTANCE = new PipeTemporaryMetaMetrics(); + private static final PipeTemporaryMetaInCoordinatorMetrics INSTANCE = + new PipeTemporaryMetaInCoordinatorMetrics(); private PipeTemporaryMetaMetricsHolder() { // Empty constructor } } - public static PipeTemporaryMetaMetrics getInstance() { + public static PipeTemporaryMetaInCoordinatorMetrics getInstance() { return PipeTemporaryMetaMetricsHolder.INSTANCE; } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigNodeReceiverMetrics.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/receiver/PipeConfigNodeReceiverMetrics.java similarity index 98% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigNodeReceiverMetrics.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/receiver/PipeConfigNodeReceiverMetrics.java index e0a30f42c2e4a..d264abe3c31bc 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigNodeReceiverMetrics.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/receiver/PipeConfigNodeReceiverMetrics.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.metric; +package org.apache.iotdb.confignode.manager.pipe.metric.receiver; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigRegionConnectorMetrics.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/sink/PipeConfigRegionSinkMetrics.java similarity index 89% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigRegionConnectorMetrics.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/sink/PipeConfigRegionSinkMetrics.java index 8fcb6fd902967..aa3661440fa1a 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigRegionConnectorMetrics.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/sink/PipeConfigRegionSinkMetrics.java @@ -17,11 +17,11 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.metric; +package org.apache.iotdb.confignode.manager.pipe.metric.sink; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; -import org.apache.iotdb.confignode.manager.pipe.execution.PipeConfigNodeSubtask; +import org.apache.iotdb.confignode.manager.pipe.agent.task.PipeConfigNodeSubtask; import org.apache.iotdb.metrics.AbstractMetricService; import org.apache.iotdb.metrics.metricsets.IMetricSet; import org.apache.iotdb.metrics.type.Rate; @@ -36,10 +36,9 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -public class PipeConfigRegionConnectorMetrics implements IMetricSet { +public class PipeConfigRegionSinkMetrics implements IMetricSet { - private static final Logger LOGGER = - LoggerFactory.getLogger(PipeConfigRegionConnectorMetrics.class); + private static final Logger LOGGER = LoggerFactory.getLogger(PipeConfigRegionSinkMetrics.class); @SuppressWarnings("java:S3077") private volatile AbstractMetricService metricService; @@ -140,19 +139,18 @@ public void markConfigEvent(final String taskID) { private static class PipeConfigNodeSubtaskMetricsHolder { - private static final PipeConfigRegionConnectorMetrics INSTANCE = - new PipeConfigRegionConnectorMetrics(); + private static final PipeConfigRegionSinkMetrics INSTANCE = new PipeConfigRegionSinkMetrics(); private PipeConfigNodeSubtaskMetricsHolder() { // Empty constructor } } - public static PipeConfigRegionConnectorMetrics getInstance() { + public static PipeConfigRegionSinkMetrics getInstance() { return PipeConfigNodeSubtaskMetricsHolder.INSTANCE; } - private PipeConfigRegionConnectorMetrics() { + private PipeConfigRegionSinkMetrics() { // Empty constructor } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigNodeListenerMetrics.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/source/PipeConfigNodeListenerMetrics.java similarity index 94% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigNodeListenerMetrics.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/source/PipeConfigNodeListenerMetrics.java index 554c89326ee9e..5cfc1a0f62254 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigNodeListenerMetrics.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/source/PipeConfigNodeListenerMetrics.java @@ -17,11 +17,11 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.metric; +package org.apache.iotdb.confignode.manager.pipe.metric.source; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.confignode.manager.pipe.agent.PipeConfigNodeAgent; -import org.apache.iotdb.confignode.manager.pipe.extractor.ConfigRegionListeningQueue; +import org.apache.iotdb.confignode.manager.pipe.source.ConfigRegionListeningQueue; import org.apache.iotdb.metrics.AbstractMetricService; import org.apache.iotdb.metrics.metricsets.IMetricSet; import org.apache.iotdb.metrics.utils.MetricLevel; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigRegionExtractorMetrics.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/source/PipeConfigRegionSourceMetrics.java similarity index 77% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigRegionExtractorMetrics.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/source/PipeConfigRegionSourceMetrics.java index b736b2a82e87f..59e204fd27634 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigRegionExtractorMetrics.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/source/PipeConfigRegionSourceMetrics.java @@ -17,11 +17,11 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.metric; +package org.apache.iotdb.confignode.manager.pipe.metric.source; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; -import org.apache.iotdb.confignode.manager.pipe.extractor.IoTDBConfigRegionExtractor; +import org.apache.iotdb.confignode.manager.pipe.source.IoTDBConfigRegionSource; import org.apache.iotdb.metrics.AbstractMetricService; import org.apache.iotdb.metrics.metricsets.IMetricSet; import org.apache.iotdb.metrics.utils.MetricLevel; @@ -35,14 +35,13 @@ import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; -public class PipeConfigRegionExtractorMetrics implements IMetricSet { +public class PipeConfigRegionSourceMetrics implements IMetricSet { - private static final Logger LOGGER = - LoggerFactory.getLogger(PipeConfigRegionExtractorMetrics.class); + private static final Logger LOGGER = LoggerFactory.getLogger(PipeConfigRegionSourceMetrics.class); private volatile AbstractMetricService metricService; - private final Map extractorMap = new ConcurrentHashMap<>(); + private final Map extractorMap = new ConcurrentHashMap<>(); //////////////////////////// bindTo & unbindFrom (metric framework) //////////////////////////// @@ -57,12 +56,12 @@ private void createMetrics(final String taskID) { } private void createAutoGauge(final String taskID) { - final IoTDBConfigRegionExtractor extractor = extractorMap.get(taskID); + final IoTDBConfigRegionSource extractor = extractorMap.get(taskID); metricService.createAutoGauge( Metric.UNTRANSFERRED_CONFIG_COUNT.toString(), MetricLevel.IMPORTANT, extractorMap.get(taskID), - IoTDBConfigRegionExtractor::getUnTransferredEventCount, + IoTDBConfigRegionSource::getUnTransferredEventCount, Tag.NAME.toString(), extractor.getPipeName(), Tag.CREATION_TIME.toString(), @@ -83,7 +82,7 @@ private void removeMetrics(final String taskID) { } private void removeAutoGauge(final String taskID) { - final IoTDBConfigRegionExtractor extractor = extractorMap.get(taskID); + final IoTDBConfigRegionSource extractor = extractorMap.get(taskID); // Pending event count metricService.remove( MetricType.AUTO_GAUGE, @@ -96,7 +95,7 @@ private void removeAutoGauge(final String taskID) { //////////////////////////// pipe integration //////////////////////////// - public void register(final IoTDBConfigRegionExtractor extractor) { + public void register(final IoTDBConfigRegionSource extractor) { final String taskID = extractor.getTaskID(); extractorMap.putIfAbsent(taskID, extractor); if (Objects.nonNull(metricService)) { @@ -121,7 +120,7 @@ public void deregister(final String taskID) { public long getRemainingEventCount(final String pipeName, final long creationTime) { final String taskID = pipeName + "_" + creationTime; - final IoTDBConfigRegionExtractor extractor = extractorMap.get(taskID); + final IoTDBConfigRegionSource extractor = extractorMap.get(taskID); // Do not print log to allow collection when config region extractor does not exists if (Objects.isNull(extractor)) { return 0; @@ -131,21 +130,21 @@ public long getRemainingEventCount(final String pipeName, final long creationTim //////////////////////////// singleton //////////////////////////// - private static class PipeConfigRegionExtractorMetricsHolder { + private static class PipeConfigRegionSourceMetricsHolder { - private static final PipeConfigRegionExtractorMetrics INSTANCE = - new PipeConfigRegionExtractorMetrics(); + private static final PipeConfigRegionSourceMetrics INSTANCE = + new PipeConfigRegionSourceMetrics(); - private PipeConfigRegionExtractorMetricsHolder() { + private PipeConfigRegionSourceMetricsHolder() { // Empty constructor } } - public static PipeConfigRegionExtractorMetrics getInstance() { - return PipeConfigRegionExtractorMetricsHolder.INSTANCE; + public static PipeConfigRegionSourceMetrics getInstance() { + return PipeConfigRegionSourceMetricsHolder.INSTANCE; } - private PipeConfigRegionExtractorMetrics() { + private PipeConfigRegionSourceMetrics() { // Empty constructor } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/receiver/protocol/IoTDBConfigNodeReceiver.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/receiver/protocol/IoTDBConfigNodeReceiver.java index 711d7bdf26c65..d06762d7f88bb 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/receiver/protocol/IoTDBConfigNodeReceiver.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/receiver/protocol/IoTDBConfigNodeReceiver.java @@ -20,20 +20,25 @@ package org.apache.iotdb.confignode.manager.pipe.receiver.protocol; import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.auth.entity.PrivilegeType; import org.apache.iotdb.commons.conf.CommonDescriptor; -import org.apache.iotdb.commons.pipe.connector.PipeReceiverStatusHandler; -import org.apache.iotdb.commons.pipe.connector.payload.airgap.AirGapPseudoTPipeTransferRequest; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeRequestType; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferCompressedReq; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferFileSealReqV1; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferFileSealReqV2; -import org.apache.iotdb.commons.pipe.pattern.IoTDBPipePattern; +import org.apache.iotdb.commons.conf.IoTDBConstant; +import org.apache.iotdb.commons.path.PartialPath; +import org.apache.iotdb.commons.path.PathPatternTree; +import org.apache.iotdb.commons.pipe.datastructure.pattern.IoTDBPipePattern; import org.apache.iotdb.commons.pipe.receiver.IoTDBFileReceiver; +import org.apache.iotdb.commons.pipe.receiver.PipeReceiverStatusHandler; +import org.apache.iotdb.commons.pipe.sink.payload.airgap.AirGapPseudoTPipeTransferRequest; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeRequestType; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferCompressedReq; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferFileSealReqV1; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferFileSealReqV2; import org.apache.iotdb.commons.schema.ttl.TTLCache; +import org.apache.iotdb.commons.utils.StatusUtils; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; -import org.apache.iotdb.confignode.consensus.request.auth.AuthorPlan; +import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan; import org.apache.iotdb.confignode.consensus.request.write.database.DatabaseSchemaPlan; import org.apache.iotdb.confignode.consensus.request.write.database.DeleteDatabasePlan; import org.apache.iotdb.confignode.consensus.request.write.database.SetTTLPlan; @@ -46,16 +51,16 @@ import org.apache.iotdb.confignode.consensus.request.write.template.ExtendSchemaTemplatePlan; import org.apache.iotdb.confignode.consensus.request.write.trigger.DeleteTriggerInTablePlan; import org.apache.iotdb.confignode.manager.ConfigManager; -import org.apache.iotdb.confignode.manager.pipe.connector.payload.PipeTransferConfigNodeHandshakeV1Req; -import org.apache.iotdb.confignode.manager.pipe.connector.payload.PipeTransferConfigNodeHandshakeV2Req; -import org.apache.iotdb.confignode.manager.pipe.connector.payload.PipeTransferConfigPlanReq; -import org.apache.iotdb.confignode.manager.pipe.connector.payload.PipeTransferConfigSnapshotPieceReq; -import org.apache.iotdb.confignode.manager.pipe.connector.payload.PipeTransferConfigSnapshotSealReq; import org.apache.iotdb.confignode.manager.pipe.event.PipeConfigRegionSnapshotEvent; -import org.apache.iotdb.confignode.manager.pipe.extractor.IoTDBConfigRegionExtractor; -import org.apache.iotdb.confignode.manager.pipe.metric.PipeConfigNodeReceiverMetrics; +import org.apache.iotdb.confignode.manager.pipe.metric.receiver.PipeConfigNodeReceiverMetrics; import org.apache.iotdb.confignode.manager.pipe.receiver.visitor.PipeConfigPhysicalPlanExceptionVisitor; import org.apache.iotdb.confignode.manager.pipe.receiver.visitor.PipeConfigPhysicalPlanTSStatusVisitor; +import org.apache.iotdb.confignode.manager.pipe.sink.payload.PipeTransferConfigNodeHandshakeV1Req; +import org.apache.iotdb.confignode.manager.pipe.sink.payload.PipeTransferConfigNodeHandshakeV2Req; +import org.apache.iotdb.confignode.manager.pipe.sink.payload.PipeTransferConfigPlanReq; +import org.apache.iotdb.confignode.manager.pipe.sink.payload.PipeTransferConfigSnapshotPieceReq; +import org.apache.iotdb.confignode.manager.pipe.sink.payload.PipeTransferConfigSnapshotSealReq; +import org.apache.iotdb.confignode.manager.pipe.source.IoTDBConfigRegionSource; import org.apache.iotdb.confignode.persistence.schema.CNPhysicalPlanGenerator; import org.apache.iotdb.confignode.persistence.schema.CNSnapshotFileType; import org.apache.iotdb.confignode.persistence.schema.ConfignodeSnapshotParser; @@ -68,6 +73,8 @@ import org.apache.iotdb.confignode.rpc.thrift.TUnsetSchemaTemplateReq; import org.apache.iotdb.confignode.service.ConfigNode; import org.apache.iotdb.consensus.exception.ConsensusException; +import org.apache.iotdb.db.protocol.session.IClientSession; +import org.apache.iotdb.db.protocol.session.SessionManager; import org.apache.iotdb.db.queryengine.common.header.ColumnHeaderConstant; import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.TSStatusCode; @@ -91,6 +98,8 @@ public class IoTDBConfigNodeReceiver extends IoTDBFileReceiver { private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBConfigNodeReceiver.class); + private static final SessionManager SESSION_MANAGER = SessionManager.getInstance(); + private static final AtomicInteger QUERY_ID_GENERATOR = new AtomicInteger(0); private static final PipeConfigPhysicalPlanTSStatusVisitor STATUS_VISITOR = @@ -197,6 +206,15 @@ private TPipeTransferResp handleTransferConfigPlan(final PipeTransferConfigPlanR private TSStatus executePlanAndClassifyExceptions(final ConfigPhysicalPlan plan) { TSStatus result; try { + result = checkPermission(plan); + if (result.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + LOGGER.warn( + "Receiver id = {}: Permission check failed while executing plan {}: {}", + receiverId.get(), + plan, + result); + return result; + } result = executePlan(plan); if (result.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { LOGGER.warn( @@ -217,6 +235,128 @@ private TSStatus executePlanAndClassifyExceptions(final ConfigPhysicalPlan plan) return result; } + private TSStatus checkPermission(final ConfigPhysicalPlan plan) { + switch (plan.getType()) { + case CreateDatabase: + case AlterDatabase: + case DeleteDatabase: + return configManager + .checkUserPrivileges( + username, Collections.emptyList(), PrivilegeType.MANAGE_DATABASE.ordinal()) + .getStatus(); + case ExtendSchemaTemplate: + return configManager + .checkUserPrivileges( + username, Collections.emptyList(), PrivilegeType.EXTEND_TEMPLATE.ordinal()) + .getStatus(); + case CreateSchemaTemplate: + case CommitSetSchemaTemplate: + case PipeUnsetTemplate: + return CommonDescriptor.getInstance().getConfig().getAdminName().equals(username) + ? StatusUtils.OK + : new TSStatus(TSStatusCode.NO_PERMISSION.getStatusCode()) + .setMessage("Only the admin user can perform this operation"); + case PipeDeleteTimeSeries: + return configManager + .checkUserPrivileges( + username, + new ArrayList<>( + PathPatternTree.deserialize( + ((PipeDeleteTimeSeriesPlan) plan).getPatternTreeBytes()) + .getAllPathPatterns()), + PrivilegeType.WRITE_SCHEMA.ordinal()) + .getStatus(); + case PipeDeleteLogicalView: + return configManager + .checkUserPrivileges( + username, + new ArrayList<>( + PathPatternTree.deserialize( + ((PipeDeleteLogicalViewPlan) plan).getPatternTreeBytes()) + .getAllPathPatterns()), + PrivilegeType.WRITE_SCHEMA.ordinal()) + .getStatus(); + case PipeDeactivateTemplate: + return configManager + .checkUserPrivileges( + username, + new ArrayList<>(((PipeDeactivateTemplatePlan) plan).getTemplateSetInfo().keySet()), + PrivilegeType.WRITE_SCHEMA.ordinal()) + .getStatus(); + case SetTTL: + return Objects.equals( + configManager + .getTTLManager() + .getAllTTL() + .get( + String.join( + String.valueOf(IoTDBConstant.PATH_SEPARATOR), + ((SetTTLPlan) plan).getPathPattern())), + ((SetTTLPlan) plan).getTTL()) + ? StatusUtils.OK + : configManager + .checkUserPrivileges( + username, + ((SetTTLPlan) plan).isDataBase() + ? Collections.emptyList() + : Collections.singletonList( + new PartialPath(((SetTTLPlan) plan).getPathPattern())), + ((SetTTLPlan) plan).isDataBase() + ? PrivilegeType.MANAGE_DATABASE.ordinal() + : PrivilegeType.WRITE_SCHEMA.ordinal()) + .getStatus(); + case UpdateTriggerStateInTable: + case DeleteTriggerInTable: + return configManager + .checkUserPrivileges( + username, Collections.emptyList(), PrivilegeType.USE_TRIGGER.ordinal()) + .getStatus(); + case GrantRole: + case GrantUser: + case RevokeUser: + case RevokeRole: + for (final int permission : ((AuthorPlan) plan).getPermissions()) { + final TSStatus status = + configManager + .checkUserPrivilegeGrantOpt( + username, + PrivilegeType.isPathRelevant(permission) + ? ((AuthorPlan) plan).getNodeNameList() + : Collections.emptyList(), + permission) + .getStatus(); + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; + } + } + return StatusUtils.OK; + case UpdateUser: + return ((AuthorPlan) plan).getUserName().equals(username) + ? StatusUtils.OK + : configManager + .checkUserPrivileges( + username, Collections.emptyList(), PrivilegeType.MANAGE_USER.ordinal()) + .getStatus(); + case CreateUser: + case CreateUserWithRawPassword: + case DropUser: + return configManager + .checkUserPrivileges( + username, Collections.emptyList(), PrivilegeType.MANAGE_USER.ordinal()) + .getStatus(); + case CreateRole: + case DropRole: + case GrantRoleToUser: + case RevokeRoleFromUser: + return configManager + .checkUserPrivileges( + username, Collections.emptyList(), PrivilegeType.MANAGE_ROLE.ordinal()) + .getStatus(); + default: + return StatusUtils.OK; + } + } + private TSStatus executePlan(final ConfigPhysicalPlan plan) throws ConsensusException { switch (plan.getType()) { case CreateDatabase: @@ -321,7 +461,17 @@ private String generatePseudoQueryId() { @Override protected String getClusterId() { - return ConfigNode.getInstance().getConfigManager().getClusterManager().getClusterId(); + return configManager.getClusterManager().getClusterId(); + } + + @Override + protected boolean shouldLogin() { + return lastSuccessfulLoginTime == Long.MIN_VALUE || super.shouldLogin(); + } + + @Override + protected TSStatus login() { + return configManager.login(username, password).getStatus(); } @Override @@ -329,6 +479,18 @@ protected String getReceiverFileBaseDir() { return ConfigNodeDescriptor.getInstance().getConf().getPipeReceiverFileDir(); } + @Override + protected String getSenderHost() { + final IClientSession session = SESSION_MANAGER.getCurrSession(); + return session != null ? session.getClientAddress() : "unknown"; + } + + @Override + protected String getSenderPort() { + final IClientSession session = SESSION_MANAGER.getCurrSession(); + return session != null ? String.valueOf(session.getClientPort()) : "unknown"; + } + @Override protected TSStatus loadFileV1( final PipeTransferFileSealReqV1 req, final String fileAbsolutePath) { @@ -358,7 +520,7 @@ protected TSStatus loadFileV2( new IoTDBPipePattern(parameters.get(ColumnHeaderConstant.PATH_PATTERN)); final List results = new ArrayList<>(); while (generator.hasNext()) { - IoTDBConfigRegionExtractor.PATTERN_PARSE_VISITOR + IoTDBConfigRegionSource.PATTERN_PARSE_VISITOR .process(generator.next(), pattern) .filter(configPhysicalPlan -> executionTypes.contains(configPhysicalPlan.getType())) .ifPresent( @@ -367,4 +529,9 @@ protected TSStatus loadFileV2( } return PipeReceiverStatusHandler.getPriorStatus(results); } + + @Override + protected void closeSession() { + // Do nothing. The session will be closed in the data node receiver. + } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/receiver/visitor/PipeConfigPhysicalPlanTSStatusVisitor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/receiver/visitor/PipeConfigPhysicalPlanTSStatusVisitor.java index f2db9d8c3ee23..a772ab774578c 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/receiver/visitor/PipeConfigPhysicalPlanTSStatusVisitor.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/receiver/visitor/PipeConfigPhysicalPlanTSStatusVisitor.java @@ -22,7 +22,7 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanVisitor; -import org.apache.iotdb.confignode.consensus.request.auth.AuthorPlan; +import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan; import org.apache.iotdb.confignode.consensus.request.write.database.DatabaseSchemaPlan; import org.apache.iotdb.confignode.consensus.request.write.database.DeleteDatabasePlan; import org.apache.iotdb.confignode.consensus.request.write.database.SetTTLPlan; @@ -52,20 +52,10 @@ public TSStatus visitPlan(final ConfigPhysicalPlan plan, final TSStatus context) @Override public TSStatus visitCreateDatabase(final DatabaseSchemaPlan plan, final TSStatus context) { if (context.getCode() == TSStatusCode.DATABASE_ALREADY_EXISTS.getStatusCode()) { - if (context - .getMessage() - .contains( - String.format( - "%s has already been created as database", plan.getSchema().getName()))) { - // The same database has been created - return new TSStatus( - TSStatusCode.PIPE_RECEIVER_IDEMPOTENT_CONFLICT_EXCEPTION.getStatusCode()) - .setMessage(context.getMessage()); - } - // Lower or higher level database has been created - return new TSStatus(TSStatusCode.PIPE_RECEIVER_USER_CONFLICT_EXCEPTION.getStatusCode()) + return new TSStatus(TSStatusCode.PIPE_RECEIVER_IDEMPOTENT_CONFLICT_EXCEPTION.getStatusCode()) .setMessage(context.getMessage()); - } else if (context.getCode() == TSStatusCode.SCHEMA_QUOTA_EXCEEDED.getStatusCode()) { + } else if (context.getCode() == TSStatusCode.SCHEMA_QUOTA_EXCEEDED.getStatusCode() + || context.getCode() == TSStatusCode.DATABASE_CONFLICT.getStatusCode()) { return new TSStatus(TSStatusCode.PIPE_RECEIVER_USER_CONFLICT_EXCEPTION.getStatusCode()) .setMessage(context.getMessage()); } else if (context.getCode() == TSStatusCode.METADATA_ERROR.getStatusCode()) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/resource/PipeConfigNodeCopiedFileDirStartupCleaner.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/resource/PipeConfigNodeCopiedFileDirStartupCleaner.java index 1c4e2288c4e95..a60d3a7fa659d 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/resource/PipeConfigNodeCopiedFileDirStartupCleaner.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/resource/PipeConfigNodeCopiedFileDirStartupCleaner.java @@ -19,7 +19,7 @@ package org.apache.iotdb.confignode.manager.pipe.resource; -import org.apache.iotdb.commons.pipe.resource.PipeSnapshotResourceManager; +import org.apache.iotdb.commons.pipe.resource.snapshot.PipeSnapshotResourceManager; import org.apache.iotdb.commons.utils.FileUtils; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/resource/PipeConfigNodeResourceManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/resource/PipeConfigNodeResourceManager.java index 3500d26e3c215..33b68f63821aa 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/resource/PipeConfigNodeResourceManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/resource/PipeConfigNodeResourceManager.java @@ -19,14 +19,17 @@ package org.apache.iotdb.confignode.manager.pipe.resource; -import org.apache.iotdb.commons.pipe.resource.PipeSnapshotResourceManager; import org.apache.iotdb.commons.pipe.resource.log.PipeLogManager; +import org.apache.iotdb.commons.pipe.resource.ref.PipePhantomReferenceManager; +import org.apache.iotdb.commons.pipe.resource.snapshot.PipeSnapshotResourceManager; +import org.apache.iotdb.confignode.manager.pipe.resource.ref.PipeConfigNodePhantomReferenceManager; import org.apache.iotdb.confignode.manager.pipe.resource.snapshot.PipeConfigNodeSnapshotResourceManager; public class PipeConfigNodeResourceManager { private final PipeSnapshotResourceManager pipeSnapshotResourceManager; private final PipeLogManager pipeLogManager; + private final PipePhantomReferenceManager pipePhantomReferenceManager; public static PipeSnapshotResourceManager snapshot() { return PipeConfigNodeResourceManager.PipeResourceManagerHolder.INSTANCE @@ -37,11 +40,16 @@ public static PipeLogManager log() { return PipeConfigNodeResourceManager.PipeResourceManagerHolder.INSTANCE.pipeLogManager; } + public static PipePhantomReferenceManager ref() { + return PipeResourceManagerHolder.INSTANCE.pipePhantomReferenceManager; + } + ///////////////////////////// SINGLETON ///////////////////////////// private PipeConfigNodeResourceManager() { pipeSnapshotResourceManager = new PipeConfigNodeSnapshotResourceManager(); pipeLogManager = new PipeLogManager(); + pipePhantomReferenceManager = new PipeConfigNodePhantomReferenceManager(); } private static class PipeResourceManagerHolder { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/resource/ref/PipeConfigNodePhantomReferenceManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/resource/ref/PipeConfigNodePhantomReferenceManager.java new file mode 100644 index 0000000000000..b867163e47edc --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/resource/ref/PipeConfigNodePhantomReferenceManager.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.manager.pipe.resource.ref; + +import org.apache.iotdb.commons.pipe.config.PipeConfig; +import org.apache.iotdb.commons.pipe.resource.ref.PipePhantomReferenceManager; +import org.apache.iotdb.confignode.manager.pipe.agent.PipeConfigNodeAgent; + +public class PipeConfigNodePhantomReferenceManager extends PipePhantomReferenceManager { + + public PipeConfigNodePhantomReferenceManager() { + super(); + + PipeConfigNodeAgent.runtime() + .registerPhantomReferenceCleanJob( + "PipePhantomReferenceManager#gcHook()", + // NOTE: lambda CAN NOT be replaced with method reference + () -> super.gcHook(), + PipeConfig.getInstance().getPipeEventReferenceEliminateIntervalSeconds()); + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/resource/snapshot/PipeConfigNodeSnapshotResourceManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/resource/snapshot/PipeConfigNodeSnapshotResourceManager.java index 1776965456092..df8a05cf56849 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/resource/snapshot/PipeConfigNodeSnapshotResourceManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/resource/snapshot/PipeConfigNodeSnapshotResourceManager.java @@ -20,7 +20,7 @@ package org.apache.iotdb.confignode.manager.pipe.resource.snapshot; import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.commons.pipe.resource.PipeSnapshotResourceManager; +import org.apache.iotdb.commons.pipe.resource.snapshot.PipeSnapshotResourceManager; import java.util.Collections; import java.util.HashSet; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/client/IoTDBConfigNodeSyncClientManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/client/IoTDBConfigNodeSyncClientManager.java similarity index 66% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/client/IoTDBConfigNodeSyncClientManager.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/client/IoTDBConfigNodeSyncClientManager.java index e420a6b5c6da2..dfe4844d09e0a 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/client/IoTDBConfigNodeSyncClientManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/client/IoTDBConfigNodeSyncClientManager.java @@ -17,14 +17,14 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.connector.client; +package org.apache.iotdb.confignode.manager.pipe.sink.client; import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.commons.conf.CommonDescriptor; -import org.apache.iotdb.commons.pipe.connector.client.IoTDBSyncClientManager; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferHandshakeV2Req; -import org.apache.iotdb.confignode.manager.pipe.connector.payload.PipeTransferConfigNodeHandshakeV1Req; -import org.apache.iotdb.confignode.manager.pipe.connector.payload.PipeTransferConfigNodeHandshakeV2Req; +import org.apache.iotdb.commons.pipe.sink.client.IoTDBSyncClientManager; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferHandshakeV2Req; +import org.apache.iotdb.confignode.manager.pipe.sink.payload.PipeTransferConfigNodeHandshakeV1Req; +import org.apache.iotdb.confignode.manager.pipe.sink.payload.PipeTransferConfigNodeHandshakeV2Req; import org.apache.iotdb.confignode.service.ConfigNode; import java.io.IOException; @@ -35,11 +35,29 @@ public class IoTDBConfigNodeSyncClientManager extends IoTDBSyncClientManager { public IoTDBConfigNodeSyncClientManager( List endPoints, + String username, + String password, boolean useSSL, String trustStorePath, String trustStorePwd, - String loadBalanceStrategy) { - super(endPoints, useSSL, trustStorePath, trustStorePwd, false, loadBalanceStrategy); + String loadBalanceStrategy, + boolean shouldReceiverConvertOnTypeMismatch, + String loadTsFileStrategy, + boolean validateTsFile, + boolean shouldMarkAsPipeRequest) { + super( + endPoints, + username, + password, + useSSL, + trustStorePath, + trustStorePwd, + false, + loadBalanceStrategy, + shouldReceiverConvertOnTypeMismatch, + loadTsFileStrategy, + validateTsFile, + shouldMarkAsPipeRequest); } @Override diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/payload/PipeTransferConfigNodeHandshakeV1Req.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/payload/PipeTransferConfigNodeHandshakeV1Req.java similarity index 90% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/payload/PipeTransferConfigNodeHandshakeV1Req.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/payload/PipeTransferConfigNodeHandshakeV1Req.java index c161081782cb2..bc45cd8c5d1da 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/payload/PipeTransferConfigNodeHandshakeV1Req.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/payload/PipeTransferConfigNodeHandshakeV1Req.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.connector.payload; +package org.apache.iotdb.confignode.manager.pipe.sink.payload; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeRequestType; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferHandshakeV1Req; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeRequestType; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferHandshakeV1Req; import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; import java.io.IOException; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/payload/PipeTransferConfigNodeHandshakeV2Req.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/payload/PipeTransferConfigNodeHandshakeV2Req.java similarity index 90% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/payload/PipeTransferConfigNodeHandshakeV2Req.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/payload/PipeTransferConfigNodeHandshakeV2Req.java index f19b94ffed2be..57c572dbf1747 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/payload/PipeTransferConfigNodeHandshakeV2Req.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/payload/PipeTransferConfigNodeHandshakeV2Req.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.connector.payload; +package org.apache.iotdb.confignode.manager.pipe.sink.payload; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeRequestType; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferHandshakeV2Req; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeRequestType; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferHandshakeV2Req; import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; import java.io.IOException; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/payload/PipeTransferConfigPlanReq.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/payload/PipeTransferConfigPlanReq.java similarity index 86% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/payload/PipeTransferConfigPlanReq.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/payload/PipeTransferConfigPlanReq.java index ee9140bfb4f6d..4bbaf77da23c7 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/payload/PipeTransferConfigPlanReq.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/payload/PipeTransferConfigPlanReq.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.connector.payload; +package org.apache.iotdb.confignode.manager.pipe.sink.payload; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.IoTDBConnectorRequestVersion; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeRequestType; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.IoTDBSinkRequestVersion; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeRequestType; import org.apache.iotdb.consensus.common.request.IConsensusRequest; import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; @@ -42,7 +42,7 @@ private PipeTransferConfigPlanReq() { public static PipeTransferConfigPlanReq toTPipeTransferReq(IConsensusRequest consensusRequest) { final PipeTransferConfigPlanReq req = new PipeTransferConfigPlanReq(); - req.version = IoTDBConnectorRequestVersion.VERSION_1.getVersion(); + req.version = IoTDBSinkRequestVersion.VERSION_1.getVersion(); req.type = PipeRequestType.TRANSFER_CONFIG_PLAN.getType(); req.body = consensusRequest.serializeToByteBuffer(); @@ -64,7 +64,7 @@ public static PipeTransferConfigPlanReq fromTPipeTransferReq(TPipeTransferReq tr public static byte[] toTPipeTransferBytes(IConsensusRequest consensusRequest) throws IOException { try (final PublicBAOS byteArrayOutputStream = new PublicBAOS(); final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) { - ReadWriteIOUtils.write(IoTDBConnectorRequestVersion.VERSION_1.getVersion(), outputStream); + ReadWriteIOUtils.write(IoTDBSinkRequestVersion.VERSION_1.getVersion(), outputStream); ReadWriteIOUtils.write(PipeRequestType.TRANSFER_CONFIG_PLAN.getType(), outputStream); return BytesUtils.concatByteArray( byteArrayOutputStream.toByteArray(), consensusRequest.serializeToByteBuffer().array()); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/payload/PipeTransferConfigSnapshotPieceReq.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/payload/PipeTransferConfigSnapshotPieceReq.java similarity index 90% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/payload/PipeTransferConfigSnapshotPieceReq.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/payload/PipeTransferConfigSnapshotPieceReq.java index 1be784d1a5e24..b05f98518cb8a 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/payload/PipeTransferConfigSnapshotPieceReq.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/payload/PipeTransferConfigSnapshotPieceReq.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.connector.payload; +package org.apache.iotdb.confignode.manager.pipe.sink.payload; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeRequestType; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferFilePieceReq; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeRequestType; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferFilePieceReq; import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; import java.io.IOException; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/payload/PipeTransferConfigSnapshotSealReq.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/payload/PipeTransferConfigSnapshotSealReq.java similarity index 94% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/payload/PipeTransferConfigSnapshotSealReq.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/payload/PipeTransferConfigSnapshotSealReq.java index 07664f6523ccd..caab663c77dd9 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/payload/PipeTransferConfigSnapshotSealReq.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/payload/PipeTransferConfigSnapshotSealReq.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.connector.payload; +package org.apache.iotdb.confignode.manager.pipe.sink.payload; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeRequestType; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferFileSealReqV2; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeRequestType; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferFileSealReqV2; import org.apache.iotdb.confignode.persistence.schema.CNSnapshotFileType; import org.apache.iotdb.db.queryengine.common.header.ColumnHeaderConstant; import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/protocol/IoTDBConfigRegionAirGapConnector.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/protocol/IoTDBConfigRegionAirGapSink.java similarity index 78% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/protocol/IoTDBConfigRegionAirGapConnector.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/protocol/IoTDBConfigRegionAirGapSink.java index 1c5febcb4bf51..97413d68bebf2 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/protocol/IoTDBConfigRegionAirGapConnector.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/protocol/IoTDBConfigRegionAirGapSink.java @@ -17,20 +17,20 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.connector.protocol; +package org.apache.iotdb.confignode.manager.pipe.sink.protocol; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.conf.CommonDescriptor; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.common.PipeTransferHandshakeConstant; -import org.apache.iotdb.commons.pipe.connector.protocol.IoTDBAirGapConnector; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.confignode.manager.pipe.connector.payload.PipeTransferConfigNodeHandshakeV1Req; -import org.apache.iotdb.confignode.manager.pipe.connector.payload.PipeTransferConfigNodeHandshakeV2Req; -import org.apache.iotdb.confignode.manager.pipe.connector.payload.PipeTransferConfigPlanReq; -import org.apache.iotdb.confignode.manager.pipe.connector.payload.PipeTransferConfigSnapshotPieceReq; -import org.apache.iotdb.confignode.manager.pipe.connector.payload.PipeTransferConfigSnapshotSealReq; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.common.PipeTransferHandshakeConstant; +import org.apache.iotdb.commons.pipe.sink.protocol.IoTDBAirGapSink; import org.apache.iotdb.confignode.manager.pipe.event.PipeConfigRegionSnapshotEvent; import org.apache.iotdb.confignode.manager.pipe.event.PipeConfigRegionWritePlanEvent; +import org.apache.iotdb.confignode.manager.pipe.sink.payload.PipeTransferConfigNodeHandshakeV1Req; +import org.apache.iotdb.confignode.manager.pipe.sink.payload.PipeTransferConfigNodeHandshakeV2Req; +import org.apache.iotdb.confignode.manager.pipe.sink.payload.PipeTransferConfigPlanReq; +import org.apache.iotdb.confignode.manager.pipe.sink.payload.PipeTransferConfigSnapshotPieceReq; +import org.apache.iotdb.confignode.manager.pipe.sink.payload.PipeTransferConfigSnapshotSealReq; import org.apache.iotdb.confignode.service.ConfigNode; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; import org.apache.iotdb.pipe.api.event.Event; @@ -48,10 +48,9 @@ import java.util.HashMap; import java.util.Objects; -public class IoTDBConfigRegionAirGapConnector extends IoTDBAirGapConnector { +public class IoTDBConfigRegionAirGapSink extends IoTDBAirGapSink { - private static final Logger LOGGER = - LoggerFactory.getLogger(IoTDBConfigRegionAirGapConnector.class); + private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBConfigRegionAirGapSink.class); @Override protected byte[] generateHandShakeV1Payload() throws IOException { @@ -68,10 +67,28 @@ protected byte[] generateHandShakeV2Payload() throws IOException { params.put( PipeTransferHandshakeConstant.HANDSHAKE_KEY_TIME_PRECISION, CommonDescriptor.getInstance().getConfig().getTimestampPrecision()); + params.put( + PipeTransferHandshakeConstant.HANDSHAKE_KEY_CONVERT_ON_TYPE_MISMATCH, + Boolean.toString(shouldReceiverConvertOnTypeMismatch)); + params.put( + PipeTransferHandshakeConstant.HANDSHAKE_KEY_LOAD_TSFILE_STRATEGY, loadTsFileStrategy); + params.put(PipeTransferHandshakeConstant.HANDSHAKE_KEY_USERNAME, username); + params.put(PipeTransferHandshakeConstant.HANDSHAKE_KEY_PASSWORD, password); + params.put( + PipeTransferHandshakeConstant.HANDSHAKE_KEY_VALIDATE_TSFILE, + Boolean.toString(loadTsFileValidation)); + params.put( + PipeTransferHandshakeConstant.HANDSHAKE_KEY_MARK_AS_PIPE_REQUEST, + Boolean.toString(shouldMarkAsPipeRequest)); return PipeTransferConfigNodeHandshakeV2Req.toTPipeTransferBytes(params); } + @Override + protected void mayLimitRateAndRecordIO(final long requiredBytes) { + // Do nothing + } + @Override protected boolean mayNeedHandshakeWhenFail() { return true; @@ -132,16 +149,16 @@ private void doTransferWrapper( final AirGapSocket socket, final PipeConfigRegionWritePlanEvent pipeConfigRegionWritePlanEvent) throws PipeException, IOException { + // We increase the reference count for this event to determine if the event may be released. + if (!pipeConfigRegionWritePlanEvent.increaseReferenceCount( + IoTDBConfigRegionAirGapSink.class.getName())) { + return; + } try { - // We increase the reference count for this event to determine if the event may be released. - if (!pipeConfigRegionWritePlanEvent.increaseReferenceCount( - IoTDBConfigRegionAirGapConnector.class.getName())) { - return; - } doTransfer(socket, pipeConfigRegionWritePlanEvent); } finally { pipeConfigRegionWritePlanEvent.decreaseReferenceCount( - IoTDBConfigRegionAirGapConnector.class.getName(), false); + IoTDBConfigRegionAirGapSink.class.getName(), false); } } @@ -173,16 +190,16 @@ private void doTransfer( private void doTransferWrapper( final AirGapSocket socket, final PipeConfigRegionSnapshotEvent pipeConfigRegionSnapshotEvent) throws PipeException, IOException { + // We increase the reference count for this event to determine if the event may be released. + if (!pipeConfigRegionSnapshotEvent.increaseReferenceCount( + IoTDBConfigRegionAirGapSink.class.getName())) { + return; + } try { - // We increase the reference count for this event to determine if the event may be released. - if (!pipeConfigRegionSnapshotEvent.increaseReferenceCount( - IoTDBConfigRegionAirGapConnector.class.getName())) { - return; - } doTransfer(socket, pipeConfigRegionSnapshotEvent); } finally { pipeConfigRegionSnapshotEvent.decreaseReferenceCount( - IoTDBConfigRegionAirGapConnector.class.getName(), false); + IoTDBConfigRegionAirGapSink.class.getName(), false); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/protocol/IoTDBConfigRegionConnector.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/protocol/IoTDBConfigRegionSink.java similarity index 78% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/protocol/IoTDBConfigRegionConnector.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/protocol/IoTDBConfigRegionSink.java index 9ba081018b9e6..d3555e62dfa99 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/connector/protocol/IoTDBConfigRegionConnector.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/protocol/IoTDBConfigRegionSink.java @@ -17,20 +17,21 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.connector.protocol; +package org.apache.iotdb.confignode.manager.pipe.sink.protocol; import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.pipe.connector.client.IoTDBSyncClient; -import org.apache.iotdb.commons.pipe.connector.client.IoTDBSyncClientManager; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferFilePieceReq; -import org.apache.iotdb.commons.pipe.connector.protocol.IoTDBSslSyncConnector; -import org.apache.iotdb.confignode.manager.pipe.connector.client.IoTDBConfigNodeSyncClientManager; -import org.apache.iotdb.confignode.manager.pipe.connector.payload.PipeTransferConfigPlanReq; -import org.apache.iotdb.confignode.manager.pipe.connector.payload.PipeTransferConfigSnapshotPieceReq; -import org.apache.iotdb.confignode.manager.pipe.connector.payload.PipeTransferConfigSnapshotSealReq; +import org.apache.iotdb.commons.pipe.sink.client.IoTDBSyncClient; +import org.apache.iotdb.commons.pipe.sink.client.IoTDBSyncClientManager; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferFilePieceReq; +import org.apache.iotdb.commons.pipe.sink.protocol.IoTDBSslSyncSink; +import org.apache.iotdb.confignode.conf.ConfigNodeConfig; import org.apache.iotdb.confignode.manager.pipe.event.PipeConfigRegionSnapshotEvent; import org.apache.iotdb.confignode.manager.pipe.event.PipeConfigRegionWritePlanEvent; +import org.apache.iotdb.confignode.manager.pipe.sink.client.IoTDBConfigNodeSyncClientManager; +import org.apache.iotdb.confignode.manager.pipe.sink.payload.PipeTransferConfigPlanReq; +import org.apache.iotdb.confignode.manager.pipe.sink.payload.PipeTransferConfigSnapshotPieceReq; +import org.apache.iotdb.confignode.manager.pipe.sink.payload.PipeTransferConfigSnapshotSealReq; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; import org.apache.iotdb.pipe.api.event.Event; import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; @@ -51,20 +52,36 @@ import java.util.List; import java.util.Objects; -public class IoTDBConfigRegionConnector extends IoTDBSslSyncConnector { +public class IoTDBConfigRegionSink extends IoTDBSslSyncSink { - private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBConfigRegionConnector.class); + private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBConfigRegionSink.class); @Override protected IoTDBSyncClientManager constructClient( final List nodeUrls, + final String username, + final String password, final boolean useSSL, final String trustStorePath, final String trustStorePwd, final boolean useLeaderCache, - final String loadBalanceStrategy) { + final String loadBalanceStrategy, + final boolean shouldReceiverConvertOnTypeMismatch, + final String loadTsFileStrategy, + final boolean validateTsFile, + final boolean shouldMarkAsPipeRequest) { return new IoTDBConfigNodeSyncClientManager( - nodeUrls, useSSL, trustStorePath, trustStorePwd, loadBalanceStrategy); + nodeUrls, + username, + password, + useSSL, + Objects.nonNull(trustStorePath) ? ConfigNodeConfig.addHomeDir(trustStorePath) : null, + trustStorePwd, + loadBalanceStrategy, + shouldReceiverConvertOnTypeMismatch, + loadTsFileStrategy, + validateTsFile, + shouldMarkAsPipeRequest); } @Override @@ -80,6 +97,11 @@ protected PipeTransferFilePieceReq getTransferMultiFilePieceReq( return PipeTransferConfigSnapshotPieceReq.toTPipeTransferReq(fileName, position, payLoad); } + @Override + protected void mayLimitRateAndRecordIO(final long requiredBytes) { + // Do nothing + } + @Override public void transfer(final TabletInsertionEvent tabletInsertionEvent) throws Exception { throw new UnsupportedOperationException( @@ -106,22 +128,22 @@ public void transfer(final Event event) throws Exception { private void doTransferWrapper( final PipeConfigRegionWritePlanEvent pipeConfigRegionWritePlanEvent) throws PipeException { + // We increase the reference count for this event to determine if the event may be released. + if (!pipeConfigRegionWritePlanEvent.increaseReferenceCount( + IoTDBConfigRegionSink.class.getName())) { + return; + } try { - // We increase the reference count for this event to determine if the event may be released. - if (!pipeConfigRegionWritePlanEvent.increaseReferenceCount( - IoTDBConfigRegionConnector.class.getName())) { - return; - } doTransfer(pipeConfigRegionWritePlanEvent); } finally { pipeConfigRegionWritePlanEvent.decreaseReferenceCount( - IoTDBConfigRegionConnector.class.getName(), false); + IoTDBConfigRegionSink.class.getName(), false); } } private void doTransfer(final PipeConfigRegionWritePlanEvent pipeConfigRegionWritePlanEvent) throws PipeException { - final Pair clientAndStatus = clientManager.getClient(); + final Pair clientAndStatus = getClientManager().getClient(); final TPipeTransferResp resp; try { @@ -147,7 +169,7 @@ private void doTransfer(final PipeConfigRegionWritePlanEvent pipeConfigRegionWri final TSStatus status = resp.getStatus(); // Send handshake req and then re-transfer the event if (status.getCode() == TSStatusCode.PIPE_CONFIG_RECEIVER_HANDSHAKE_NEEDED.getStatusCode()) { - clientManager.sendHandshakeReq(clientAndStatus); + getClientManager().sendHandshakeReq(clientAndStatus); } // Only handle the failed statuses to avoid string format performance overhead if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode() @@ -167,16 +189,16 @@ private void doTransfer(final PipeConfigRegionWritePlanEvent pipeConfigRegionWri private void doTransferWrapper(final PipeConfigRegionSnapshotEvent pipeConfigRegionSnapshotEvent) throws PipeException, IOException { + // We increase the reference count for this event to determine if the event may be released. + if (!pipeConfigRegionSnapshotEvent.increaseReferenceCount( + IoTDBConfigRegionSink.class.getName())) { + return; + } try { - // We increase the reference count for this event to determine if the event may be released. - if (!pipeConfigRegionSnapshotEvent.increaseReferenceCount( - IoTDBConfigRegionConnector.class.getName())) { - return; - } doTransfer(pipeConfigRegionSnapshotEvent); } finally { pipeConfigRegionSnapshotEvent.decreaseReferenceCount( - IoTDBConfigRegionConnector.class.getName(), false); + IoTDBConfigRegionSink.class.getName(), false); } } @@ -186,7 +208,7 @@ private void doTransfer(final PipeConfigRegionSnapshotEvent snapshotEvent) final long creationTime = snapshotEvent.getCreationTime(); final File snapshotFile = snapshotEvent.getSnapshotFile(); final File templateFile = snapshotEvent.getTemplateFile(); - final Pair clientAndStatus = clientManager.getClient(); + final Pair clientAndStatus = getClientManager().getClient(); // 1. Transfer snapshotFile, and template File if exists transferFilePieces( @@ -233,7 +255,7 @@ private void doTransfer(final PipeConfigRegionSnapshotEvent snapshotEvent) final TSStatus status = resp.getStatus(); // Send handshake req and then re-transfer the event if (status.getCode() == TSStatusCode.PIPE_CONFIG_RECEIVER_HANDSHAKE_NEEDED.getStatusCode()) { - clientManager.sendHandshakeReq(clientAndStatus); + getClientManager().sendHandshakeReq(clientAndStatus); } // Only handle the failed statuses to avoid string format performance overhead if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode() diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/extractor/ConfigRegionListeningFilter.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/ConfigRegionListeningFilter.java similarity index 96% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/extractor/ConfigRegionListeningFilter.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/ConfigRegionListeningFilter.java index 1718fcb68cfbd..fdcb7b03417f0 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/extractor/ConfigRegionListeningFilter.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/ConfigRegionListeningFilter.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.extractor; +package org.apache.iotdb.confignode.manager.pipe.source; import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.path.PartialPath; @@ -36,12 +36,12 @@ import java.util.Set; import java.util.stream.Collectors; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_EXCLUSION_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_EXCLUSION_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_INCLUSION_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_INCLUSION_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_EXCLUSION_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_INCLUSION_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_EXCLUSION_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_EXCLUSION_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_INCLUSION_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_INCLUSION_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_EXCLUSION_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_INCLUSION_KEY; import static org.apache.iotdb.commons.pipe.datastructure.options.PipeInclusionOptions.parseOptions; /** diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/extractor/ConfigRegionListeningQueue.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/ConfigRegionListeningQueue.java similarity index 93% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/extractor/ConfigRegionListeningQueue.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/ConfigRegionListeningQueue.java index 4c908667a9ffb..715734c10c40d 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/extractor/ConfigRegionListeningQueue.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/ConfigRegionListeningQueue.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.extractor; +package org.apache.iotdb.confignode.manager.pipe.source; import org.apache.iotdb.commons.auth.user.LocalFileUserAccessor; import org.apache.iotdb.commons.conf.IoTDBConstant; @@ -155,14 +155,25 @@ protected Event deserializeFromByteBuffer(final ByteBuffer byteBuffer) { /////////////////////////////// Snapshot /////////////////////////////// @Override - public synchronized boolean processTakeSnapshot(final File snapshotDir) - throws TException, IOException { - return super.serializeToFile(new File(snapshotDir, SNAPSHOT_FILE_NAME)); + public synchronized boolean processTakeSnapshot(final File snapshotDir) throws IOException { + try { + return super.serializeToFile(new File(snapshotDir, SNAPSHOT_FILE_NAME)); + } catch (final IOException e) { + throw e; + } catch (final Exception e) { + throw new IOException(e); + } } @Override public synchronized void processLoadSnapshot(final File snapshotDir) throws TException, IOException { - super.deserializeFromFile(new File(snapshotDir, SNAPSHOT_FILE_NAME)); + try { + super.deserializeFromFile(new File(snapshotDir, SNAPSHOT_FILE_NAME)); + } catch (final IOException e) { + throw e; + } catch (final Exception e) { + throw new IOException(e); + } } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/extractor/IoTDBConfigRegionExtractor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/IoTDBConfigRegionSource.java similarity index 90% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/extractor/IoTDBConfigRegionExtractor.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/IoTDBConfigRegionSource.java index ea897465b8b7b..a89af14c8c063 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/extractor/IoTDBConfigRegionExtractor.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/IoTDBConfigRegionSource.java @@ -17,23 +17,23 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.extractor; +package org.apache.iotdb.confignode.manager.pipe.source; import org.apache.iotdb.commons.consensus.ConfigRegionId; +import org.apache.iotdb.commons.pipe.agent.task.progress.PipeEventCommitManager; import org.apache.iotdb.commons.pipe.config.PipeConfig; import org.apache.iotdb.commons.pipe.datastructure.queue.listening.AbstractPipeListeningQueue; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; import org.apache.iotdb.commons.pipe.event.PipeSnapshotEvent; import org.apache.iotdb.commons.pipe.event.PipeWritePlanEvent; -import org.apache.iotdb.commons.pipe.extractor.IoTDBNonDataRegionExtractor; -import org.apache.iotdb.commons.pipe.progress.PipeEventCommitManager; +import org.apache.iotdb.commons.pipe.source.IoTDBNonDataRegionSource; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; import org.apache.iotdb.confignode.manager.pipe.agent.PipeConfigNodeAgent; import org.apache.iotdb.confignode.manager.pipe.event.PipeConfigRegionSnapshotEvent; import org.apache.iotdb.confignode.manager.pipe.event.PipeConfigRegionWritePlanEvent; -import org.apache.iotdb.confignode.manager.pipe.metric.PipeConfigNodeRemainingTimeMetrics; -import org.apache.iotdb.confignode.manager.pipe.metric.PipeConfigRegionExtractorMetrics; +import org.apache.iotdb.confignode.manager.pipe.metric.overview.PipeConfigNodeRemainingTimeMetrics; +import org.apache.iotdb.confignode.manager.pipe.metric.source.PipeConfigRegionSourceMetrics; import org.apache.iotdb.confignode.service.ConfigNode; import org.apache.iotdb.consensus.ConsensusFactory; import org.apache.iotdb.consensus.exception.ConsensusException; @@ -46,7 +46,7 @@ import java.util.Optional; import java.util.Set; -public class IoTDBConfigRegionExtractor extends IoTDBNonDataRegionExtractor { +public class IoTDBConfigRegionSource extends IoTDBNonDataRegionSource { public static final PipeConfigPhysicalPlanPatternParseVisitor PATTERN_PARSE_VISITOR = new PipeConfigPhysicalPlanPatternParseVisitor(); @@ -69,7 +69,7 @@ public void customize( super.customize(parameters, configuration); listenedTypeSet = ConfigRegionListeningFilter.parseListeningPlanTypeSet(parameters); - PipeConfigRegionExtractorMetrics.getInstance().register(this); + PipeConfigRegionSourceMetrics.getInstance().register(this); PipeConfigNodeRemainingTimeMetrics.getInstance().register(this); } @@ -147,7 +147,7 @@ public synchronized void close() throws Exception { super.close(); if (Objects.nonNull(taskID)) { - PipeConfigRegionExtractorMetrics.getInstance().deregister(taskID); + PipeConfigRegionSourceMetrics.getInstance().deregister(taskID); PipeConfigNodeRemainingTimeMetrics.getInstance().deregister(pipeName + "_" + creationTime); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/extractor/PipeConfigPhysicalPlanPatternParseVisitor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/PipeConfigPhysicalPlanPatternParseVisitor.java similarity index 94% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/extractor/PipeConfigPhysicalPlanPatternParseVisitor.java rename to iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/PipeConfigPhysicalPlanPatternParseVisitor.java index 40d7141070d67..8ed9ffca21f86 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/extractor/PipeConfigPhysicalPlanPatternParseVisitor.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/PipeConfigPhysicalPlanPatternParseVisitor.java @@ -17,14 +17,15 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.extractor; +package org.apache.iotdb.confignode.manager.pipe.source; +import org.apache.iotdb.commons.auth.entity.PrivilegeType; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PathPatternTree; -import org.apache.iotdb.commons.pipe.pattern.IoTDBPipePattern; +import org.apache.iotdb.commons.pipe.datastructure.pattern.IoTDBPipePattern; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanVisitor; -import org.apache.iotdb.confignode.consensus.request.auth.AuthorPlan; +import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan; import org.apache.iotdb.confignode.consensus.request.write.database.DatabaseSchemaPlan; import org.apache.iotdb.confignode.consensus.request.write.database.DeleteDatabasePlan; import org.apache.iotdb.confignode.consensus.request.write.database.SetTTLPlan; @@ -50,6 +51,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; @@ -192,7 +194,13 @@ private Optional visitPathRelatedAuthorPlan( .map(pattern::getIntersection) .flatMap(Collection::stream) .collect(Collectors.toList()); - return !intersectedPaths.isEmpty() + final Set permissions = + !intersectedPaths.isEmpty() + ? pathRelatedAuthorPlan.getPermissions() + : pathRelatedAuthorPlan.getPermissions().stream() + .filter(permission -> !PrivilegeType.values()[permission].isPathRelevant()) + .collect(Collectors.toSet()); + return !permissions.isEmpty() ? Optional.of( new AuthorPlan( pathRelatedAuthorPlan.getAuthorType(), @@ -200,7 +208,7 @@ private Optional visitPathRelatedAuthorPlan( pathRelatedAuthorPlan.getRoleName(), pathRelatedAuthorPlan.getPassword(), pathRelatedAuthorPlan.getNewPassword(), - pathRelatedAuthorPlan.getPermissions(), + permissions, pathRelatedAuthorPlan.getGrantOpt(), intersectedPaths)) : Optional.empty(); @@ -232,7 +240,7 @@ public Optional visitPipeDeleteLogicalView( pattern.getIntersection( PathPatternTree.deserialize(pipeDeleteLogicalViewPlan.getPatternTreeBytes())); return !intersectedTree.isEmpty() - ? Optional.of(new PipeDeleteTimeSeriesPlan(intersectedTree.serialize())) + ? Optional.of(new PipeDeleteLogicalViewPlan(intersectedTree.serialize())) : Optional.empty(); } catch (final IOException e) { LOGGER.warn( diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/schema/ClusterSchemaManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/schema/ClusterSchemaManager.java index ebb7a264e0442..56fabd6aa5345 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/schema/ClusterSchemaManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/schema/ClusterSchemaManager.java @@ -30,7 +30,7 @@ import org.apache.iotdb.commons.service.metric.MetricService; import org.apache.iotdb.commons.utils.PathUtils; import org.apache.iotdb.commons.utils.StatusUtils; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; import org.apache.iotdb.confignode.conf.ConfigNodeConfig; @@ -107,8 +107,8 @@ public class ClusterSchemaManager { private static final Logger LOGGER = LoggerFactory.getLogger(ClusterSchemaManager.class); private static final ConfigNodeConfig CONF = ConfigNodeDescriptor.getInstance().getConf(); - private static final double SCHEMA_REGION_PER_DATA_NODE = CONF.getSchemaRegionPerDataNode(); - private static final double DATA_REGION_PER_DATA_NODE = CONF.getDataRegionPerDataNode(); + private static final int SCHEMA_REGION_PER_DATA_NODE = CONF.getSchemaRegionPerDataNode(); + private static final int DATA_REGION_PER_DATA_NODE = CONF.getDataRegionPerDataNode(); private final IManager configManager; private final ClusterSchemaInfo clusterSchemaInfo; @@ -462,6 +462,7 @@ public synchronized void adjustMaxRegionGroupNum() { } int dataNodeNum = getNodeManager().getRegisteredDataNodeCount(); + int totalCpuCoreNum = getNodeManager().getDataNodeCpuCoreCount(); int databaseNum = databaseSchemaMap.size(); for (TDatabaseSchema databaseSchema : databaseSchemaMap.values()) { @@ -479,57 +480,62 @@ public synchronized void adjustMaxRegionGroupNum() { continue; } + // Adjust maxSchemaRegionGroupNum for each Database. + // All Databases share the DataNodes equally. + // The allocated SchemaRegionGroups will not be shrunk. + final int allocatedSchemaRegionGroupCount; try { - // Adjust maxSchemaRegionGroupNum for each Database. - // All Databases share the DataNodes equally. - // The allocated SchemaRegionGroups will not be shrunk. - int allocatedSchemaRegionGroupCount; - try { - allocatedSchemaRegionGroupCount = - getPartitionManager() - .getRegionGroupCount(databaseSchema.getName(), TConsensusGroupType.SchemaRegion); - } catch (DatabaseNotExistsException e) { - // ignore the pre deleted database - continue; - } + allocatedSchemaRegionGroupCount = + getPartitionManager() + .getRegionGroupCount(databaseSchema.getName(), TConsensusGroupType.SchemaRegion); + } catch (final DatabaseNotExistsException e) { + // ignore the pre deleted database + continue; + } - int maxSchemaRegionGroupNum = - calcMaxRegionGroupNum( - databaseSchema.getMinSchemaRegionGroupNum(), - SCHEMA_REGION_PER_DATA_NODE, - dataNodeNum, - databaseNum, - databaseSchema.getSchemaReplicationFactor(), - allocatedSchemaRegionGroupCount); - LOGGER.info( - "[AdjustRegionGroupNum] The maximum number of SchemaRegionGroups for Database: {} is adjusted to: {}", - databaseSchema.getName(), - maxSchemaRegionGroupNum); - - // Adjust maxDataRegionGroupNum for each Database. - // All Databases share the DataNodes equally. - // The allocated DataRegionGroups will not be shrunk. - int allocatedDataRegionGroupCount = + final int maxSchemaRegionGroupNum = + calcMaxRegionGroupNum( + databaseSchema.getMinSchemaRegionGroupNum(), + SCHEMA_REGION_PER_DATA_NODE, + dataNodeNum, + databaseNum, + databaseSchema.getSchemaReplicationFactor(), + allocatedSchemaRegionGroupCount); + LOGGER.info( + "[AdjustRegionGroupNum] The maximum number of SchemaRegionGroups for Database: {} is adjusted to: {}", + databaseSchema.getName(), + maxSchemaRegionGroupNum); + + // Adjust maxDataRegionGroupNum for each Database. + // All Databases share the DataNodes equally. + // The allocated DataRegionGroups will not be shrunk. + final int allocatedDataRegionGroupCount; + try { + allocatedDataRegionGroupCount = getPartitionManager() .getRegionGroupCount(databaseSchema.getName(), TConsensusGroupType.DataRegion); - int maxDataRegionGroupNum = - calcMaxRegionGroupNum( - databaseSchema.getMinDataRegionGroupNum(), - DATA_REGION_PER_DATA_NODE, - dataNodeNum, - databaseNum, - databaseSchema.getDataReplicationFactor(), - allocatedDataRegionGroupCount); - LOGGER.info( - "[AdjustRegionGroupNum] The maximum number of DataRegionGroups for Database: {} is adjusted to: {}", - databaseSchema.getName(), - maxDataRegionGroupNum); - - adjustMaxRegionGroupNumPlan.putEntry( - databaseSchema.getName(), new Pair<>(maxSchemaRegionGroupNum, maxDataRegionGroupNum)); - } catch (DatabaseNotExistsException e) { - LOGGER.warn("Adjust maxRegionGroupNum failed because Database doesn't exist", e); + } catch (final DatabaseNotExistsException e) { + // ignore the pre deleted database + continue; } + + final int maxDataRegionGroupNum = + calcMaxRegionGroupNum( + databaseSchema.getMinDataRegionGroupNum(), + DATA_REGION_PER_DATA_NODE == 0 + ? CONF.getDataRegionPerDataNodeProportion() + : DATA_REGION_PER_DATA_NODE, + DATA_REGION_PER_DATA_NODE == 0 ? totalCpuCoreNum : dataNodeNum, + databaseNum, + databaseSchema.getDataReplicationFactor(), + allocatedDataRegionGroupCount); + LOGGER.info( + "[AdjustRegionGroupNum] The maximum number of DataRegionGroups for Database: {} is adjusted to: {}", + databaseSchema.getName(), + maxDataRegionGroupNum); + + adjustMaxRegionGroupNumPlan.putEntry( + databaseSchema.getName(), new Pair<>(maxSchemaRegionGroupNum, maxDataRegionGroupNum)); } try { getConsensusManager().write(adjustMaxRegionGroupNumPlan); @@ -1037,7 +1043,7 @@ public synchronized TSStatus extendSchemaTemplate( DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.UPDATE_TEMPLATE, updateTemplateReq, dataNodeLocationMap); + CnToDnAsyncRequestType.UPDATE_TEMPLATE, updateTemplateReq, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); Map statusMap = clientHandler.getResponseMap(); for (Map.Entry entry : statusMap.entrySet()) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/subscription/SubscriptionCoordinator.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/subscription/SubscriptionCoordinator.java index 20dfce44bf29c..28c596de7ba68 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/subscription/SubscriptionCoordinator.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/subscription/SubscriptionCoordinator.java @@ -30,6 +30,7 @@ import org.apache.iotdb.confignode.rpc.thrift.TCloseConsumerReq; import org.apache.iotdb.confignode.rpc.thrift.TCreateConsumerReq; import org.apache.iotdb.confignode.rpc.thrift.TCreateTopicReq; +import org.apache.iotdb.confignode.rpc.thrift.TDropSubscriptionReq; import org.apache.iotdb.confignode.rpc.thrift.TDropTopicReq; import org.apache.iotdb.confignode.rpc.thrift.TGetAllSubscriptionInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TGetAllTopicInfoResp; @@ -42,10 +43,12 @@ import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.TSStatusCode; +import org.apache.tsfile.utils.Pair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Collections; +import java.util.Optional; import java.util.concurrent.atomic.AtomicReference; public class SubscriptionCoordinator { @@ -248,6 +251,30 @@ public TSStatus dropSubscription(TUnsubscribeReq req) { return status; } + public TSStatus dropSubscription(TDropSubscriptionReq req) { + final String subscriptionId = req.getSubsciptionId(); + final boolean isSetIfExistsCondition = + req.isSetIfExistsCondition() && req.isIfExistsCondition(); + final Optional> topicNameWithConsumerGroupName = + subscriptionInfo.parseSubscriptionId(subscriptionId); + if (!topicNameWithConsumerGroupName.isPresent()) { + return isSetIfExistsCondition + ? RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS) + : RpcUtils.getStatus( + TSStatusCode.TOPIC_NOT_EXIST_ERROR, + String.format( + "Failed to drop subscription %s. Failures: %s does not exist.", + subscriptionId, subscriptionId)); + } + return configManager + .getProcedureManager() + .dropSubscription( + new TUnsubscribeReq() + .setConsumerId(null) + .setConsumerGroupId(topicNameWithConsumerGroupName.get().right) + .setTopicNames(Collections.singleton(topicNameWithConsumerGroupName.get().left))); + } + public TShowSubscriptionResp showSubscription(TShowSubscriptionReq req) { try { return ((SubscriptionTableResp) diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/subscription/SubscriptionMetaSyncer.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/subscription/SubscriptionMetaSyncer.java index 50a4ebb22b25a..de49987e13fbe 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/subscription/SubscriptionMetaSyncer.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/subscription/SubscriptionMetaSyncer.java @@ -23,7 +23,7 @@ import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory; import org.apache.iotdb.commons.concurrent.ThreadName; import org.apache.iotdb.commons.concurrent.threadpool.ScheduledExecutorUtil; -import org.apache.iotdb.commons.pipe.config.PipeConfig; +import org.apache.iotdb.commons.subscription.config.SubscriptionConfig; import org.apache.iotdb.confignode.manager.ConfigManager; import org.apache.iotdb.confignode.manager.ProcedureManager; import org.apache.iotdb.rpc.TSStatusCode; @@ -43,9 +43,9 @@ public class SubscriptionMetaSyncer { IoTDBThreadPoolFactory.newSingleThreadScheduledExecutor( ThreadName.SUBSCRIPTION_RUNTIME_META_SYNCER.getName()); private static final long INITIAL_SYNC_DELAY_MINUTES = - PipeConfig.getInstance().getPipeMetaSyncerInitialSyncDelayMinutes(); + SubscriptionConfig.getInstance().getSubscriptionMetaSyncerInitialSyncDelayMinutes(); private static final long SYNC_INTERVAL_MINUTES = - PipeConfig.getInstance().getPipeMetaSyncerSyncIntervalMinutes(); + SubscriptionConfig.getInstance().getSubscriptionMetaSyncerSyncIntervalMinutes(); private final ConfigManager configManager; @@ -89,22 +89,26 @@ private synchronized void sync() { } final ProcedureManager procedureManager = configManager.getProcedureManager(); - final TSStatus consumerGroupMetaSyncStatus = procedureManager.consumerGroupMetaSync(); + + // sync topic meta firstly + // TODO: consider drop the topic which is subscribed by consumers final TSStatus topicMetaSyncStatus = procedureManager.topicMetaSync(); - if (consumerGroupMetaSyncStatus.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode() - && topicMetaSyncStatus.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - LOGGER.info( - "After this successful sync, if SubscriptionInfo is empty during this sync and has not been modified afterwards, all subsequent syncs will be skipped"); - isLastSubscriptionSyncSuccessful = true; - } else { - if (consumerGroupMetaSyncStatus.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - LOGGER.warn( - "Failed to sync consumer group meta. Result status: {}.", consumerGroupMetaSyncStatus); - } - if (topicMetaSyncStatus.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - LOGGER.warn("Failed to sync topic meta. Result status: {}.", topicMetaSyncStatus); - } + if (topicMetaSyncStatus.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + LOGGER.warn("Failed to sync topic meta. Result status: {}.", topicMetaSyncStatus); + return; } + + // sync consumer meta if syncing topic meta successfully + final TSStatus consumerGroupMetaSyncStatus = procedureManager.consumerGroupMetaSync(); + if (consumerGroupMetaSyncStatus.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + LOGGER.warn( + "Failed to sync consumer group meta. Result status: {}.", consumerGroupMetaSyncStatus); + return; + } + + LOGGER.info( + "After this successful sync, if SubscriptionInfo is empty during this sync and has not been modified afterwards, all subsequent syncs will be skipped"); + isLastSubscriptionSyncSuccessful = true; } public synchronized void stop() { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/AuthorInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/AuthorInfo.java index e993e40fbc268..61100f199f0a0 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/AuthorInfo.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/AuthorInfo.java @@ -37,7 +37,8 @@ import org.apache.iotdb.commons.utils.FileUtils; import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; -import org.apache.iotdb.confignode.consensus.request.auth.AuthorPlan; +import org.apache.iotdb.confignode.consensus.request.read.auth.AuthorReadPlan; +import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan; import org.apache.iotdb.confignode.consensus.response.auth.PermissionInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TAuthizedPatternTreeResp; import org.apache.iotdb.confignode.rpc.thrift.TPathPrivilege; @@ -318,18 +319,18 @@ public TSStatus authorNonQuery(AuthorPlan authorPlan) { return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS); } - public PermissionInfoResp executeListUsers(AuthorPlan plan) throws AuthException { - PermissionInfoResp result = new PermissionInfoResp(); - List userList = authorizer.listAllUsers(); + public PermissionInfoResp executeListUsers(final AuthorReadPlan plan) throws AuthException { + final PermissionInfoResp result = new PermissionInfoResp(); + final List userList = authorizer.listAllUsers(); if (!plan.getRoleName().isEmpty()) { - Role role = authorizer.getRole(plan.getRoleName()); + final Role role = authorizer.getRole(plan.getRoleName()); if (role == null) { result.setStatus( RpcUtils.getStatus( TSStatusCode.ROLE_NOT_EXIST, "No such role : " + plan.getRoleName())); return result; } - Iterator itr = userList.iterator(); + final Iterator itr = userList.iterator(); while (itr.hasNext()) { User userObj = authorizer.getUser(itr.next()); if (userObj == null || !userObj.hasRole(plan.getRoleName())) { @@ -343,14 +344,14 @@ public PermissionInfoResp executeListUsers(AuthorPlan plan) throws AuthException return result; } - public PermissionInfoResp executeListRoles(AuthorPlan plan) throws AuthException { - PermissionInfoResp result = new PermissionInfoResp(); - List permissionInfo = new ArrayList<>(); - List roleList = new ArrayList<>(); + public PermissionInfoResp executeListRoles(final AuthorReadPlan plan) throws AuthException { + final PermissionInfoResp result = new PermissionInfoResp(); + final List permissionInfo = new ArrayList<>(); + final List roleList = new ArrayList<>(); if (plan.getUserName().isEmpty()) { roleList.addAll(authorizer.listAllRoles()); } else { - User user = authorizer.getUser(plan.getUserName()); + final User user = authorizer.getUser(plan.getUserName()); if (user == null) { result.setStatus( RpcUtils.getStatus(TSStatusCode.USER_NOT_EXIST, NO_USER_MSG + plan.getUserName())); @@ -365,22 +366,23 @@ public PermissionInfoResp executeListRoles(AuthorPlan plan) throws AuthException return result; } - public PermissionInfoResp executeListRolePrivileges(AuthorPlan plan) throws AuthException { - PermissionInfoResp result = new PermissionInfoResp(); - List permissionInfo = new ArrayList<>(); - Role role = authorizer.getRole(plan.getRoleName()); + public PermissionInfoResp executeListRolePrivileges(final AuthorReadPlan plan) + throws AuthException { + final PermissionInfoResp result = new PermissionInfoResp(); + final List permissionInfo = new ArrayList<>(); + final Role role = authorizer.getRole(plan.getRoleName()); if (role == null) { result.setStatus( RpcUtils.getStatus(TSStatusCode.ROLE_NOT_EXIST, "No such role : " + plan.getRoleName())); result.setMemberInfo(permissionInfo); return result; } - TPermissionInfoResp resp = new TPermissionInfoResp(); - TRoleResp roleResp = new TRoleResp(); + final TPermissionInfoResp resp = new TPermissionInfoResp(); + final TRoleResp roleResp = new TRoleResp(); roleResp.setRoleName(role.getName()); - List pathList = new ArrayList<>(); - for (PathPrivilege path : role.getPathPrivilegeList()) { - TPathPrivilege pathPri = new TPathPrivilege(); + final List pathList = new ArrayList<>(); + for (final PathPrivilege path : role.getPathPrivilegeList()) { + final TPathPrivilege pathPri = new TPathPrivilege(); pathPri.setPriGrantOpt(path.getGrantOpt()); pathPri.setPriSet(path.getPrivileges()); pathPri.setPath(path.getPath().toString()); @@ -389,7 +391,7 @@ public PermissionInfoResp executeListRolePrivileges(AuthorPlan plan) throws Auth roleResp.setPrivilegeList(pathList); roleResp.setSysPriSet(role.getSysPrivilege()); roleResp.setSysPriSetGrantOpt(role.getSysPriGrantOpt()); - Map roleInfo = new HashMap<>(); + final Map roleInfo = new HashMap<>(); roleInfo.put(role.getName(), roleResp); resp.setRoleInfo(roleInfo); resp.setStatus(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS)); @@ -400,15 +402,16 @@ public PermissionInfoResp executeListRolePrivileges(AuthorPlan plan) throws Auth return result; } - public PermissionInfoResp executeListUserPrivileges(AuthorPlan plan) throws AuthException { - PermissionInfoResp result = new PermissionInfoResp(); - User user = authorizer.getUser(plan.getUserName()); + public PermissionInfoResp executeListUserPrivileges(final AuthorReadPlan plan) + throws AuthException { + final PermissionInfoResp result = new PermissionInfoResp(); + final User user = authorizer.getUser(plan.getUserName()); if (user == null) { result.setStatus( RpcUtils.getStatus(TSStatusCode.USER_NOT_EXIST, NO_USER_MSG + plan.getUserName())); return result; } - TPermissionInfoResp resp = getUserPermissionInfo(plan.getUserName()); + final TPermissionInfoResp resp = getUserPermissionInfo(plan.getUserName()); resp.setStatus(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS)); result.setTag(ColumnHeaderConstant.PRIVILEGES); result.setPermissionInfoResp(resp); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/ModelInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/ModelInfo.java new file mode 100644 index 0000000000000..21b317d9854ec --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/ModelInfo.java @@ -0,0 +1,382 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.persistence; + +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.model.ModelInformation; +import org.apache.iotdb.commons.model.ModelStatus; +import org.apache.iotdb.commons.model.ModelTable; +import org.apache.iotdb.commons.model.ModelType; +import org.apache.iotdb.commons.snapshot.SnapshotProcessor; +import org.apache.iotdb.confignode.consensus.request.read.model.GetModelInfoPlan; +import org.apache.iotdb.confignode.consensus.request.read.model.ShowModelPlan; +import org.apache.iotdb.confignode.consensus.request.write.model.CreateModelPlan; +import org.apache.iotdb.confignode.consensus.request.write.model.UpdateModelInfoPlan; +import org.apache.iotdb.confignode.consensus.response.model.GetModelInfoResp; +import org.apache.iotdb.confignode.consensus.response.model.ModelTableResp; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.apache.thrift.TException; +import org.apache.tsfile.utils.PublicBAOS; +import org.apache.tsfile.utils.ReadWriteIOUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.annotation.concurrent.ThreadSafe; + +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +@ThreadSafe +public class ModelInfo implements SnapshotProcessor { + + private static final Logger LOGGER = LoggerFactory.getLogger(ModelInfo.class); + + private static final String SNAPSHOT_FILENAME = "model_info.snapshot"; + + private ModelTable modelTable; + + private final Map> modelNameToNodes; + + private final ReadWriteLock modelTableLock = new ReentrantReadWriteLock(); + + private static final Set builtInForecastModel = new HashSet<>(); + + private static final Set builtInAnomalyDetectionModel = new HashSet<>(); + + static { + builtInForecastModel.add("_ARIMA"); + builtInForecastModel.add("_NaiveForecaster"); + builtInForecastModel.add("_STLForecaster"); + builtInForecastModel.add("_ExponentialSmoothing"); + builtInAnomalyDetectionModel.add("_GaussianHMM"); + builtInAnomalyDetectionModel.add("_GMMHMM"); + builtInAnomalyDetectionModel.add("_Stray"); + } + + public ModelInfo() { + this.modelTable = new ModelTable(); + this.modelNameToNodes = new HashMap<>(); + } + + public boolean contain(String modelName) { + return modelTable.containsModel(modelName); + } + + public void acquireModelTableReadLock() { + LOGGER.info("acquire ModelTableReadLock"); + modelTableLock.readLock().lock(); + } + + public void releaseModelTableReadLock() { + LOGGER.info("release ModelTableReadLock"); + modelTableLock.readLock().unlock(); + } + + public void acquireModelTableWriteLock() { + LOGGER.info("acquire ModelTableWriteLock"); + modelTableLock.writeLock().lock(); + } + + public void releaseModelTableWriteLock() { + LOGGER.info("release ModelTableWriteLock"); + modelTableLock.writeLock().unlock(); + } + + // init the model in modeInfo, it won't update the details information of the model + public TSStatus createModel(CreateModelPlan plan) { + try { + acquireModelTableWriteLock(); + String modelName = plan.getModelName(); + if (modelTable.containsModel(modelName)) { + return new TSStatus(TSStatusCode.MODEL_EXIST_ERROR.getStatusCode()) + .setMessage(String.format("model [%s] has already been created.", modelName)); + } else { + modelTable.addModel(new ModelInformation(modelName, ModelStatus.LOADING)); + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } + } catch (Exception e) { + final String errorMessage = + String.format( + "Failed to add model [%s] in ModelTable on Config Nodes, because of %s", + plan.getModelName(), e); + LOGGER.warn(errorMessage, e); + return new TSStatus(TSStatusCode.CREATE_MODEL_ERROR.getStatusCode()).setMessage(errorMessage); + } finally { + releaseModelTableWriteLock(); + } + } + + public TSStatus dropModelInNode(int aiNodeId) { + acquireModelTableWriteLock(); + try { + for (Map.Entry> entry : modelNameToNodes.entrySet()) { + entry.getValue().remove(Integer.valueOf(aiNodeId)); + // if list is empty, remove this model totally + if (entry.getValue().isEmpty()) { + modelTable.removeModel(entry.getKey()); + modelNameToNodes.remove(entry.getKey()); + } + } + // currently, we only have one AINode at a time, so we can just clear failed model. + modelTable.clearFailedModel(); + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } finally { + releaseModelTableWriteLock(); + } + } + + public TSStatus dropModel(String modelName) { + acquireModelTableWriteLock(); + TSStatus status; + if (modelTable.containsModel(modelName)) { + modelTable.removeModel(modelName); + modelNameToNodes.remove(modelName); + status = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } else { + status = + new TSStatus(TSStatusCode.DROP_MODEL_ERROR.getStatusCode()) + .setMessage(String.format("model [%s] has not been created.", modelName)); + } + releaseModelTableWriteLock(); + return status; + } + + public List getNodeIds(String modelName) { + return modelNameToNodes.getOrDefault(modelName, Collections.emptyList()); + } + + private ModelInformation getModelByName(String modelName) { + ModelType modelType = checkModelType(modelName); + if (modelType != ModelType.USER_DEFINED) { + if (modelType == ModelType.BUILT_IN_FORECAST && builtInForecastModel.contains(modelName)) { + return new ModelInformation(ModelType.BUILT_IN_FORECAST, modelName); + } else if (modelType == ModelType.BUILT_IN_ANOMALY_DETECTION + && builtInAnomalyDetectionModel.contains(modelName)) { + return new ModelInformation(ModelType.BUILT_IN_ANOMALY_DETECTION, modelName); + } + } else { + return modelTable.getModelInformationById(modelName); + } + return null; + } + + public ModelTableResp showModel(ShowModelPlan plan) { + acquireModelTableReadLock(); + try { + ModelTableResp modelTableResp = + new ModelTableResp(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); + if (plan.isSetModelName()) { + ModelInformation modelInformation = getModelByName(plan.getModelName()); + if (modelInformation != null) { + modelTableResp.addModelInformation(modelInformation); + } + } else { + modelTableResp.addModelInformation(modelTable.getAllModelInformation()); + for (String modelName : builtInForecastModel) { + modelTableResp.addModelInformation( + new ModelInformation(ModelType.BUILT_IN_FORECAST, modelName)); + } + for (String modelName : builtInAnomalyDetectionModel) { + modelTableResp.addModelInformation( + new ModelInformation(ModelType.BUILT_IN_ANOMALY_DETECTION, modelName)); + } + } + return modelTableResp; + } catch (IOException e) { + LOGGER.warn("Fail to get ModelTable", e); + return new ModelTableResp( + new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()) + .setMessage(e.getMessage())); + } finally { + releaseModelTableReadLock(); + } + } + + private boolean containsBuiltInModelName(Set builtInModelSet, String modelName) { + // ignore the case + for (String builtInModelName : builtInModelSet) { + if (builtInModelName.equalsIgnoreCase(modelName)) { + return true; + } + } + return false; + } + + public ModelType checkModelType(String modelName) { + if (containsBuiltInModelName(builtInForecastModel, modelName)) { + return ModelType.BUILT_IN_FORECAST; + } else if (containsBuiltInModelName(builtInAnomalyDetectionModel, modelName)) { + return ModelType.BUILT_IN_ANOMALY_DETECTION; + } else { + return ModelType.USER_DEFINED; + } + } + + private int getAvailableAINodeForModel(String modelName, ModelType modelType) { + if (modelType == ModelType.USER_DEFINED) { + List aiNodeIds = modelNameToNodes.get(modelName); + if (aiNodeIds != null) { + return aiNodeIds.get(0); + } + } else { + // any AINode is fine for built-in model + // 0 is always the nodeId for configNode, so it's fine to use 0 as special value + return 0; + } + return -1; + } + + // This method will be used by dataNode to get schema of the model for inference + public GetModelInfoResp getModelInfo(GetModelInfoPlan plan) { + acquireModelTableReadLock(); + try { + String modelName = plan.getModelId(); + GetModelInfoResp getModelInfoResp; + ModelInformation modelInformation; + ModelType modelType; + // check if it's a built-in model + if ((modelType = checkModelType(modelName)) != ModelType.USER_DEFINED) { + modelInformation = new ModelInformation(modelType, modelName); + } else { + modelInformation = modelTable.getModelInformationById(modelName); + } + + if (modelInformation != null) { + getModelInfoResp = + new GetModelInfoResp(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); + } else { + TSStatus errorStatus = new TSStatus(TSStatusCode.GET_MODEL_INFO_ERROR.getStatusCode()); + errorStatus.setMessage(String.format("model [%s] has not been created.", modelName)); + getModelInfoResp = new GetModelInfoResp(errorStatus); + return getModelInfoResp; + } + PublicBAOS buffer = new PublicBAOS(); + DataOutputStream stream = new DataOutputStream(buffer); + modelInformation.serialize(stream); + getModelInfoResp.setModelInfo(ByteBuffer.wrap(buffer.getBuf(), 0, buffer.size())); + // select the nodeId to process the task, currently we default use the first one. + int aiNodeId = getAvailableAINodeForModel(modelName, modelType); + if (aiNodeId == -1) { + TSStatus errorStatus = new TSStatus(TSStatusCode.GET_MODEL_INFO_ERROR.getStatusCode()); + errorStatus.setMessage(String.format("There is no AINode with %s available", modelName)); + getModelInfoResp = new GetModelInfoResp(errorStatus); + return getModelInfoResp; + } else { + getModelInfoResp.setTargetAINodeId(aiNodeId); + } + return getModelInfoResp; + } catch (IOException e) { + LOGGER.warn("Fail to get model info", e); + return new GetModelInfoResp( + new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()) + .setMessage(e.getMessage())); + } finally { + releaseModelTableReadLock(); + } + } + + public TSStatus updateModelInfo(UpdateModelInfoPlan plan) { + acquireModelTableWriteLock(); + try { + String modelName = plan.getModelName(); + if (modelTable.containsModel(modelName)) { + modelTable.updateModel(modelName, plan.getModelInformation()); + } + if (!plan.getNodeIds().isEmpty()) { + // only used in model registration, so we can just put the nodeIds in the map without + // checking + modelNameToNodes.put(modelName, plan.getNodeIds()); + } + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } finally { + releaseModelTableWriteLock(); + } + } + + @Override + public boolean processTakeSnapshot(File snapshotDir) throws TException, IOException { + File snapshotFile = new File(snapshotDir, SNAPSHOT_FILENAME); + if (snapshotFile.exists() && snapshotFile.isFile()) { + LOGGER.error( + "Failed to take snapshot of ModelInfo, because snapshot file [{}] is already exist.", + snapshotFile.getAbsolutePath()); + return false; + } + + acquireModelTableReadLock(); + try (FileOutputStream fileOutputStream = new FileOutputStream(snapshotFile)) { + modelTable.serialize(fileOutputStream); + ReadWriteIOUtils.write(modelNameToNodes.size(), fileOutputStream); + for (Map.Entry> entry : modelNameToNodes.entrySet()) { + ReadWriteIOUtils.write(entry.getKey(), fileOutputStream); + ReadWriteIOUtils.write(entry.getValue().size(), fileOutputStream); + for (Integer nodeId : entry.getValue()) { + ReadWriteIOUtils.write(nodeId, fileOutputStream); + } + } + fileOutputStream.getFD().sync(); + return true; + } finally { + releaseModelTableReadLock(); + } + } + + @Override + public void processLoadSnapshot(File snapshotDir) throws TException, IOException { + File snapshotFile = new File(snapshotDir, SNAPSHOT_FILENAME); + if (!snapshotFile.exists() || !snapshotFile.isFile()) { + LOGGER.error( + "Failed to load snapshot of ModelInfo, snapshot file [{}] does not exist.", + snapshotFile.getAbsolutePath()); + return; + } + acquireModelTableWriteLock(); + try (FileInputStream fileInputStream = new FileInputStream(snapshotFile)) { + modelTable.clear(); + modelTable = ModelTable.deserialize(fileInputStream); + int size = ReadWriteIOUtils.readInt(fileInputStream); + for (int i = 0; i < size; i++) { + String modelName = ReadWriteIOUtils.readString(fileInputStream); + int nodeSize = ReadWriteIOUtils.readInt(fileInputStream); + List nodes = new LinkedList<>(); + for (int j = 0; j < nodeSize; j++) { + nodes.add(ReadWriteIOUtils.readInt(fileInputStream)); + } + modelNameToNodes.put(modelName, nodes); + } + } finally { + releaseModelTableWriteLock(); + } + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/ProcedureInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/ProcedureInfo.java index b2abf1f95e8e1..5589fdd37994b 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/ProcedureInfo.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/ProcedureInfo.java @@ -171,7 +171,7 @@ private static Optional loadProcedure(Path procedureFilePath) { } return Optional.ofNullable(procedure); } - } catch (IOException e) { + } catch (Exception e) { LOGGER.error("Load {} failed, it will be deleted.", procedureFilePath, e); if (!procedureFilePath.toFile().delete()) { LOGGER.error("{} deleted failed; take appropriate action.", procedureFilePath, e); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/TTLInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/TTLInfo.java index 0848fcc4fbf54..9587730988c92 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/TTLInfo.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/TTLInfo.java @@ -157,6 +157,24 @@ public int getTTLCount() { } } + /** + * Get the maximum ttl of the subtree of the corresponding database. + * + * @param database the path of the database. + * @return the maximum ttl of the subtree of the corresponding database. return NULL_TTL if the + * TTL is not set or the database does not exist. + */ + public long getDatabaseMaxTTL(String database) { + lock.readLock().lock(); + try { + return ttlCache.getDatabaseMaxTTL(database); + } catch (IllegalPathException e) { + return TTLCache.NULL_TTL; + } finally { + lock.readLock().unlock(); + } + } + @Override public boolean processTakeSnapshot(File snapshotDir) throws TException, IOException { File snapshotFile = new File(snapshotDir, SNAPSHOT_FILENAME); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/TriggerInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/TriggerInfo.java index e324c6a7c4db7..065ae826fb804 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/TriggerInfo.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/TriggerInfo.java @@ -247,7 +247,6 @@ public boolean processTakeSnapshot(File snapshotDir) throws TException, IOExcept return false; } - acquireTriggerTableLock(); try (FileOutputStream fileOutputStream = new FileOutputStream(snapshotFile)) { serializeExistedJarToMD5(fileOutputStream); @@ -257,8 +256,6 @@ public boolean processTakeSnapshot(File snapshotDir) throws TException, IOExcept // fsync fileOutputStream.getFD().sync(); return true; - } finally { - releaseTriggerTableLock(); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/UDFInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/UDFInfo.java index 51f997192a915..1c7682dd35b7c 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/UDFInfo.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/UDFInfo.java @@ -194,7 +194,6 @@ public boolean processTakeSnapshot(File snapshotDir) throws IOException { return false; } - acquireUDFTableLock(); try (FileOutputStream fileOutputStream = new FileOutputStream(snapshotFile)) { serializeExistedJarToMD5(fileOutputStream); @@ -205,8 +204,6 @@ public boolean processTakeSnapshot(File snapshotDir) throws IOException { fileOutputStream.getFD().sync(); return true; - } finally { - releaseUDFTableLock(); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java index dc1ffbde72034..4facb308d8a2c 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java @@ -27,11 +27,15 @@ import org.apache.iotdb.commons.schema.ttl.TTLCache; import org.apache.iotdb.commons.snapshot.SnapshotProcessor; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; -import org.apache.iotdb.confignode.consensus.request.auth.AuthorPlan; +import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan; +import org.apache.iotdb.confignode.consensus.request.read.ainode.GetAINodeConfigurationPlan; +import org.apache.iotdb.confignode.consensus.request.read.auth.AuthorReadPlan; import org.apache.iotdb.confignode.consensus.request.read.database.CountDatabasePlan; import org.apache.iotdb.confignode.consensus.request.read.database.GetDatabasePlan; import org.apache.iotdb.confignode.consensus.request.read.datanode.GetDataNodeConfigurationPlan; import org.apache.iotdb.confignode.consensus.request.read.function.GetUDFJarPlan; +import org.apache.iotdb.confignode.consensus.request.read.model.GetModelInfoPlan; +import org.apache.iotdb.confignode.consensus.request.read.model.ShowModelPlan; import org.apache.iotdb.confignode.consensus.request.read.partition.CountTimeSlotListPlan; import org.apache.iotdb.confignode.consensus.request.read.partition.GetDataPartitionPlan; import org.apache.iotdb.confignode.consensus.request.read.partition.GetNodePathsPartitionPlan; @@ -49,6 +53,10 @@ import org.apache.iotdb.confignode.consensus.request.read.trigger.GetTriggerLocationPlan; import org.apache.iotdb.confignode.consensus.request.read.trigger.GetTriggerTablePlan; import org.apache.iotdb.confignode.consensus.request.read.ttl.ShowTTLPlan; +import org.apache.iotdb.confignode.consensus.request.write.ainode.RegisterAINodePlan; +import org.apache.iotdb.confignode.consensus.request.write.ainode.RemoveAINodePlan; +import org.apache.iotdb.confignode.consensus.request.write.ainode.UpdateAINodePlan; +import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan; import org.apache.iotdb.confignode.consensus.request.write.confignode.ApplyConfigNodePlan; import org.apache.iotdb.confignode.consensus.request.write.confignode.RemoveConfigNodePlan; import org.apache.iotdb.confignode.consensus.request.write.confignode.UpdateClusterIdPlan; @@ -70,7 +78,12 @@ import org.apache.iotdb.confignode.consensus.request.write.datanode.UpdateDataNodePlan; import org.apache.iotdb.confignode.consensus.request.write.function.CreateFunctionPlan; import org.apache.iotdb.confignode.consensus.request.write.function.DropFunctionPlan; +import org.apache.iotdb.confignode.consensus.request.write.model.CreateModelPlan; +import org.apache.iotdb.confignode.consensus.request.write.model.DropModelInNodePlan; +import org.apache.iotdb.confignode.consensus.request.write.model.DropModelPlan; +import org.apache.iotdb.confignode.consensus.request.write.model.UpdateModelInfoPlan; import org.apache.iotdb.confignode.consensus.request.write.partition.AddRegionLocationPlan; +import org.apache.iotdb.confignode.consensus.request.write.partition.AutoCleanPartitionTablePlan; import org.apache.iotdb.confignode.consensus.request.write.partition.CreateDataPartitionPlan; import org.apache.iotdb.confignode.consensus.request.write.partition.CreateSchemaPartitionPlan; import org.apache.iotdb.confignode.consensus.request.write.partition.RemoveRegionLocationPlan; @@ -118,6 +131,7 @@ import org.apache.iotdb.confignode.manager.pipe.agent.PipeConfigNodeAgent; import org.apache.iotdb.confignode.persistence.AuthorInfo; import org.apache.iotdb.confignode.persistence.ClusterInfo; +import org.apache.iotdb.confignode.persistence.ModelInfo; import org.apache.iotdb.confignode.persistence.ProcedureInfo; import org.apache.iotdb.confignode.persistence.TTLInfo; import org.apache.iotdb.confignode.persistence.TriggerInfo; @@ -176,6 +190,8 @@ public class ConfigPlanExecutor { private final CQInfo cqInfo; + private final ModelInfo modelInfo; + private final PipeInfo pipeInfo; private final SubscriptionInfo subscriptionInfo; @@ -194,6 +210,7 @@ public ConfigPlanExecutor( UDFInfo udfInfo, TriggerInfo triggerInfo, CQInfo cqInfo, + ModelInfo modelInfo, PipeInfo pipeInfo, SubscriptionInfo subscriptionInfo, QuotaInfo quotaInfo, @@ -225,6 +242,9 @@ public ConfigPlanExecutor( this.cqInfo = cqInfo; this.snapshotProcessorList.add(cqInfo); + this.modelInfo = modelInfo; + this.snapshotProcessorList.add(modelInfo); + this.pipeInfo = pipeInfo; this.snapshotProcessorList.add(pipeInfo); @@ -243,11 +263,13 @@ public ConfigPlanExecutor( this.snapshotProcessorList.add(PipeConfigNodeAgent.runtime().listener()); } - public DataSet executeQueryPlan(ConfigPhysicalPlan req) + public DataSet executeQueryPlan(final ConfigPhysicalReadPlan req) throws UnknownPhysicalPlanTypeException, AuthException { switch (req.getType()) { case GetDataNodeConfiguration: return nodeInfo.getDataNodeConfiguration((GetDataNodeConfigurationPlan) req); + case GetAINodeConfiguration: + return nodeInfo.getAINodeConfiguration((GetAINodeConfigurationPlan) req); case CountDatabase: return clusterSchemaInfo.countMatchedDatabases((CountDatabasePlan) req); case GetDatabase: @@ -259,13 +281,13 @@ public DataSet executeQueryPlan(ConfigPhysicalPlan req) case GetOrCreateSchemaPartition: return partitionInfo.getSchemaPartition((GetSchemaPartitionPlan) req); case ListUser: - return authorInfo.executeListUsers((AuthorPlan) req); + return authorInfo.executeListUsers((AuthorReadPlan) req); case ListRole: - return authorInfo.executeListRoles((AuthorPlan) req); + return authorInfo.executeListRoles((AuthorReadPlan) req); case ListUserPrivilege: - return authorInfo.executeListUserPrivileges((AuthorPlan) req); + return authorInfo.executeListUserPrivileges((AuthorReadPlan) req); case ListRolePrivilege: - return authorInfo.executeListRolePrivileges((AuthorPlan) req); + return authorInfo.executeListRolePrivileges((AuthorReadPlan) req); case GetNodePathsPartition: return getSchemaNodeManagementPartition(req); case GetRegionInfoList: @@ -304,6 +326,10 @@ public DataSet executeQueryPlan(ConfigPhysicalPlan req) return udfInfo.getUDFTable(); case GetFunctionJar: return udfInfo.getUDFJar((GetUDFJarPlan) req); + case ShowModel: + return modelInfo.showModel((ShowModelPlan) req); + case GetModelInfo: + return modelInfo.getModelInfo((GetModelInfoPlan) req); case GetPipePluginTable: return pipeInfo.getPipePluginInfo().showPipePlugins(); case GetPipePluginJar: @@ -335,6 +361,12 @@ public TSStatus executeNonQueryPlan(ConfigPhysicalPlan physicalPlan) return status; } return partitionInfo.updateDataNode((UpdateDataNodePlan) physicalPlan); + case RegisterAINode: + return nodeInfo.registerAINode((RegisterAINodePlan) physicalPlan); + case UpdateAINodeConfiguration: + return nodeInfo.updateAINode((UpdateAINodePlan) physicalPlan); + case RemoveAINode: + return nodeInfo.removeAINode((RemoveAINodePlan) physicalPlan); case CreateDatabase: status = clusterSchemaInfo.createDatabase((DatabaseSchemaPlan) physicalPlan); if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { @@ -382,6 +414,8 @@ public TSStatus executeNonQueryPlan(ConfigPhysicalPlan physicalPlan) return partitionInfo.createSchemaPartition((CreateSchemaPartitionPlan) physicalPlan); case CreateDataPartition: return partitionInfo.createDataPartition((CreateDataPartitionPlan) physicalPlan); + case AutoCleanPartitionTable: + return partitionInfo.autoCleanPartitionTable((AutoCleanPartitionTablePlan) physicalPlan); case UpdateProcedure: return procedureInfo.updateProcedure((UpdateProcedurePlan) physicalPlan); case DeleteProcedure: @@ -496,10 +530,22 @@ public TSStatus executeNonQueryPlan(ConfigPhysicalPlan physicalPlan) return cqInfo.activeCQ((ActiveCQPlan) physicalPlan); case UPDATE_CQ_LAST_EXEC_TIME: return cqInfo.updateCQLastExecutionTime((UpdateCQLastExecTimePlan) physicalPlan); + case CreateModel: + return modelInfo.createModel((CreateModelPlan) physicalPlan); + case UpdateModelInfo: + return modelInfo.updateModelInfo((UpdateModelInfoPlan) physicalPlan); + case DropModel: + return modelInfo.dropModel(((DropModelPlan) physicalPlan).getModelName()); + case DropModelInNode: + return modelInfo.dropModelInNode(((DropModelInNodePlan) physicalPlan).getNodeId()); case CreatePipePlugin: return pipeInfo.getPipePluginInfo().createPipePlugin((CreatePipePluginPlan) physicalPlan); case DropPipePlugin: return pipeInfo.getPipePluginInfo().dropPipePlugin((DropPipePluginPlan) physicalPlan); + case setSpaceQuota: + return quotaInfo.setSpaceQuota((SetSpaceQuotaPlan) physicalPlan); + case setThrottleQuota: + return quotaInfo.setThrottleQuota((SetThrottleQuotaPlan) physicalPlan); case CreatePipeSinkV1: case DropPipeV1: case DropPipeSinkV1: @@ -509,10 +555,6 @@ public TSStatus executeNonQueryPlan(ConfigPhysicalPlan physicalPlan) case SetPipeStatusV1: case ShowPipeV1: return new TSStatus(TSStatusCode.INCOMPATIBLE_VERSION.getStatusCode()); - case setSpaceQuota: - return quotaInfo.setSpaceQuota((SetSpaceQuotaPlan) physicalPlan); - case setThrottleQuota: - return quotaInfo.setThrottleQuota((SetThrottleQuotaPlan) physicalPlan); case PipeEnriched: return executeNonQueryPlan(((PipeEnrichedPlan) physicalPlan).getInnerPlan()); case PipeDeleteTimeSeries: @@ -571,7 +613,7 @@ public boolean takeSnapshot(File snapshotDir) { x.getClass().getName(), System.currentTimeMillis() - startTime); } catch (TException | IOException e) { - LOGGER.error("Take snapshot error: {}", e.getMessage()); + LOGGER.error("Take snapshot error", e); takeSnapshotResult = false; } finally { // If any snapshot fails, the whole fails @@ -612,7 +654,7 @@ public void loadSnapshot(File latestSnapshotRootDir) { System.currentTimeMillis() - startTime); } catch (TException | IOException e) { result.set(false); - LOGGER.error("Load snapshot error: {}", e.getMessage()); + LOGGER.error("Load snapshot error", e); } }); if (result.get()) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/node/NodeInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/node/NodeInfo.java index 4f1c8819d8fed..dd9ea5e9e920c 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/node/NodeInfo.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/node/NodeInfo.java @@ -19,19 +19,28 @@ package org.apache.iotdb.confignode.persistence.node; +import org.apache.iotdb.common.rpc.thrift.TAINodeConfiguration; +import org.apache.iotdb.common.rpc.thrift.TAINodeLocation; import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; import org.apache.iotdb.common.rpc.thrift.TDataNodeConfiguration; import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.cluster.NodeStatus; +import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.snapshot.SnapshotProcessor; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; import org.apache.iotdb.confignode.conf.SystemPropertiesUtils; +import org.apache.iotdb.confignode.consensus.request.read.ainode.GetAINodeConfigurationPlan; import org.apache.iotdb.confignode.consensus.request.read.datanode.GetDataNodeConfigurationPlan; +import org.apache.iotdb.confignode.consensus.request.write.ainode.RegisterAINodePlan; +import org.apache.iotdb.confignode.consensus.request.write.ainode.RemoveAINodePlan; +import org.apache.iotdb.confignode.consensus.request.write.ainode.UpdateAINodePlan; import org.apache.iotdb.confignode.consensus.request.write.confignode.ApplyConfigNodePlan; import org.apache.iotdb.confignode.consensus.request.write.confignode.RemoveConfigNodePlan; import org.apache.iotdb.confignode.consensus.request.write.confignode.UpdateVersionInfoPlan; import org.apache.iotdb.confignode.consensus.request.write.datanode.RegisterDataNodePlan; import org.apache.iotdb.confignode.consensus.request.write.datanode.RemoveDataNodePlan; import org.apache.iotdb.confignode.consensus.request.write.datanode.UpdateDataNodePlan; +import org.apache.iotdb.confignode.consensus.response.ainode.AINodeConfigurationResp; import org.apache.iotdb.confignode.consensus.response.datanode.DataNodeConfigurationResp; import org.apache.iotdb.confignode.rpc.thrift.TNodeVersionInfo; import org.apache.iotdb.rpc.TSStatusCode; @@ -44,12 +53,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.ByteArrayInputStream; import java.io.File; -import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.nio.file.Files; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -62,6 +72,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.ReentrantReadWriteLock; +import static org.apache.iotdb.confignode.conf.ConfigNodeConstant.REMOVE_AINODE_PROCESS; import static org.apache.iotdb.confignode.conf.ConfigNodeConstant.REMOVE_DATANODE_PROCESS; /** @@ -91,6 +102,9 @@ public class NodeInfo implements SnapshotProcessor { private final Map registeredDataNodes; private final ReentrantReadWriteLock dataNodeInfoReadWriteLock; + private final Map registeredAINodes; + private final ReentrantReadWriteLock aiNodeInfoReadWriteLock; + private final Map nodeVersionInfo; private final ReentrantReadWriteLock versionInfoReadWriteLock; @@ -103,6 +117,9 @@ public NodeInfo() { this.dataNodeInfoReadWriteLock = new ReentrantReadWriteLock(); this.registeredDataNodes = new ConcurrentHashMap<>(); + this.aiNodeInfoReadWriteLock = new ReentrantReadWriteLock(); + this.registeredAINodes = new ConcurrentHashMap<>(); + this.nodeVersionInfo = new ConcurrentHashMap<>(); this.versionInfoReadWriteLock = new ReentrantReadWriteLock(); } @@ -223,6 +240,28 @@ public DataNodeConfigurationResp getDataNodeConfiguration( return result; } + public AINodeConfigurationResp getAINodeConfiguration( + GetAINodeConfigurationPlan getAINodeConfigurationPlan) { + AINodeConfigurationResp result = new AINodeConfigurationResp(); + result.setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); + + int aiNodeId = getAINodeConfigurationPlan.getAiNodeId(); + aiNodeInfoReadWriteLock.readLock().lock(); + try { + if (aiNodeId == -1) { + result.setAiNodeConfigurationMap(new HashMap<>(registeredAINodes)); + } else { + result.setAiNodeConfigurationMap( + registeredAINodes.get(aiNodeId) == null + ? new HashMap<>(0) + : Collections.singletonMap(aiNodeId, registeredAINodes.get(aiNodeId))); + } + } finally { + aiNodeInfoReadWriteLock.readLock().unlock(); + } + return result; + } + /** Return the number of registered Nodes. */ public int getRegisteredNodeCount() { int result; @@ -253,6 +292,7 @@ public int getRegisteredDataNodeCount() { return result; } + // Please do not delete this method even if is not used for now public int getDataNodeCpuCoreCount(int dataNodeId) { try { return registeredDataNodes.get(dataNodeId).getResource().getCpuCoreNum(); @@ -321,6 +361,47 @@ public List getRegisteredDataNodes(List dataNod return result; } + public List getRegisteredAINodes() { + List result; + aiNodeInfoReadWriteLock.readLock().lock(); + try { + result = new ArrayList<>(registeredAINodes.values()); + } finally { + aiNodeInfoReadWriteLock.readLock().unlock(); + } + return result; + } + + public TAINodeConfiguration getRegisteredAINode(int aiNodeId) { + aiNodeInfoReadWriteLock.readLock().lock(); + try { + return registeredAINodes.getOrDefault(aiNodeId, new TAINodeConfiguration()).deepCopy(); + } finally { + aiNodeInfoReadWriteLock.readLock().unlock(); + } + } + + /** Return the number of registered DataNodes. */ + public int getRegisteredAINodeCount() { + int result; + aiNodeInfoReadWriteLock.readLock().lock(); + try { + result = registeredAINodes.size(); + } finally { + aiNodeInfoReadWriteLock.readLock().unlock(); + } + return result; + } + + public boolean containsAINode(int aiNodeId) { + aiNodeInfoReadWriteLock.readLock().lock(); + try { + return registeredAINodes.containsKey(aiNodeId); + } finally { + aiNodeInfoReadWriteLock.readLock().unlock(); + } + } + /** * Update ConfigNodeList both in memory and confignode-system{@literal .}properties file. * @@ -367,6 +448,11 @@ public TSStatus applyConfigNode(ApplyConfigNodePlan applyConfigNodePlan) { * @return {@link TSStatusCode#REMOVE_CONFIGNODE_ERROR} if remove online ConfigNode failed. */ public TSStatus removeConfigNode(RemoveConfigNodePlan removeConfigNodePlan) { + if (removeConfigNodePlan.getConfigNodeLocation().getConfigNodeId() + == ConfigNodeDescriptor.getInstance().getConf().getConfigNodeId()) { + // set myself to Removing status + CommonDescriptor.getInstance().getConfig().setNodeStatus(NodeStatus.Removing); + } TSStatus status = new TSStatus(); configNodeInfoReadWriteLock.writeLock().lock(); versionInfoReadWriteLock.writeLock().lock(); @@ -391,6 +477,77 @@ public TSStatus removeConfigNode(RemoveConfigNodePlan removeConfigNodePlan) { return status; } + /** + * Persist AINode info. + * + * @param registerAINodePlan RegisterAINodePlan + * @return {@link TSStatusCode#SUCCESS_STATUS} + */ + public TSStatus registerAINode(RegisterAINodePlan registerAINodePlan) { + TSStatus result; + TAINodeConfiguration info = registerAINodePlan.getAINodeConfiguration(); + aiNodeInfoReadWriteLock.writeLock().lock(); + try { + synchronized (nextNodeId) { + if (nextNodeId.get() < info.getLocation().getAiNodeId()) { + nextNodeId.set(info.getLocation().getAiNodeId()); + } + } + registeredAINodes.put(info.getLocation().getAiNodeId(), info); + result = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } finally { + aiNodeInfoReadWriteLock.writeLock().unlock(); + } + return result; + } + + /** + * Update the specified AINode‘s location. + * + * @param updateAINodePlan UpdateAINodePlan + * @return {@link TSStatusCode#SUCCESS_STATUS} if update AINode info successfully. + */ + public TSStatus updateAINode(UpdateAINodePlan updateAINodePlan) { + dataNodeInfoReadWriteLock.writeLock().lock(); + try { + TAINodeConfiguration newConfiguration = updateAINodePlan.getAINodeConfiguration(); + registeredAINodes.replace(newConfiguration.getLocation().getAiNodeId(), newConfiguration); + } finally { + dataNodeInfoReadWriteLock.writeLock().unlock(); + } + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } + + /** + * Persist Information about remove dataNode. + * + * @param req RemoveDataNodePlan + * @return {@link TSStatus} + */ + public TSStatus removeAINode(RemoveAINodePlan req) { + LOGGER.info( + "{}, There are {} AI nodes in cluster before executed RemoveAINodePlan", + REMOVE_AINODE_PROCESS, + registeredAINodes.size()); + + aiNodeInfoReadWriteLock.writeLock().lock(); + versionInfoReadWriteLock.writeLock().lock(); + TAINodeLocation removedAINode = req.getAINodeLocation(); + try { + registeredAINodes.remove(removedAINode.getAiNodeId()); + nodeVersionInfo.remove(removedAINode.getAiNodeId()); + LOGGER.info("Removed the AINode {} from cluster", removedAINode); + } finally { + versionInfoReadWriteLock.writeLock().unlock(); + aiNodeInfoReadWriteLock.writeLock().unlock(); + } + LOGGER.info( + "{}, There are {} AI nodes in cluster after executed RemoveAINodePlan", + REMOVE_AINODE_PROCESS, + registeredAINodes.size()); + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } + /** * Update the specified Node‘s versionInfo. * @@ -482,6 +639,7 @@ public boolean processTakeSnapshot(File snapshotDir) throws IOException, TExcept File tmpFile = new File(snapshotFile.getAbsolutePath() + "-" + UUID.randomUUID()); configNodeInfoReadWriteLock.readLock().lock(); dataNodeInfoReadWriteLock.readLock().lock(); + aiNodeInfoReadWriteLock.readLock().lock(); versionInfoReadWriteLock.readLock().lock(); try (FileOutputStream fileOutputStream = new FileOutputStream(tmpFile); TIOStreamTransport tioStreamTransport = new TIOStreamTransport(fileOutputStream)) { @@ -494,6 +652,8 @@ public boolean processTakeSnapshot(File snapshotDir) throws IOException, TExcept serializeRegisteredDataNode(fileOutputStream, protocol); + serializeRegisteredAINode(fileOutputStream, protocol); + serializeVersionInfo(fileOutputStream); tioStreamTransport.flush(); @@ -505,6 +665,7 @@ public boolean processTakeSnapshot(File snapshotDir) throws IOException, TExcept return tmpFile.renameTo(snapshotFile); } finally { versionInfoReadWriteLock.readLock().unlock(); + aiNodeInfoReadWriteLock.readLock().unlock(); dataNodeInfoReadWriteLock.readLock().unlock(); configNodeInfoReadWriteLock.readLock().unlock(); for (int retry = 0; retry < 5; retry++) { @@ -536,6 +697,15 @@ private void serializeRegisteredDataNode(OutputStream outputStream, TProtocol pr } } + private void serializeRegisteredAINode(OutputStream outputStream, TProtocol protocol) + throws IOException, TException { + ReadWriteIOUtils.write(registeredAINodes.size(), outputStream); + for (Entry entry : registeredAINodes.entrySet()) { + ReadWriteIOUtils.write(entry.getKey(), outputStream); + entry.getValue().write(protocol); + } + } + private void serializeVersionInfo(OutputStream outputStream) throws IOException { ReadWriteIOUtils.write(nodeVersionInfo.size(), outputStream); for (Entry entry : nodeVersionInfo.entrySet()) { @@ -558,24 +728,31 @@ public void processLoadSnapshot(File snapshotDir) throws IOException, TException configNodeInfoReadWriteLock.writeLock().lock(); dataNodeInfoReadWriteLock.writeLock().lock(); + aiNodeInfoReadWriteLock.writeLock().lock(); versionInfoReadWriteLock.writeLock().lock(); - try (FileInputStream fileInputStream = new FileInputStream(snapshotFile); - TIOStreamTransport tioStreamTransport = new TIOStreamTransport(fileInputStream)) { + try (ByteArrayInputStream inputStream = + new ByteArrayInputStream(Files.readAllBytes(snapshotFile.toPath())); + TIOStreamTransport tioStreamTransport = new TIOStreamTransport(inputStream)) { TProtocol protocol = new TBinaryProtocol(tioStreamTransport); clear(); - nextNodeId.set(ReadWriteIOUtils.readInt(fileInputStream)); + nextNodeId.set(ReadWriteIOUtils.readInt(inputStream)); - deserializeRegisteredConfigNode(fileInputStream, protocol); + deserializeRegisteredConfigNode(inputStream, protocol); - deserializeRegisteredDataNode(fileInputStream, protocol); + deserializeRegisteredDataNode(inputStream, protocol); - deserializeBuildInfo(fileInputStream); + // TODO: Compatibility design. Should replace this function to actual deserialization method + // in IoTDB 2.2 / 1.5 + tryDeserializeRegisteredAINode(inputStream, protocol); + + deserializeBuildInfo(inputStream); } finally { versionInfoReadWriteLock.writeLock().unlock(); + aiNodeInfoReadWriteLock.writeLock().unlock(); dataNodeInfoReadWriteLock.writeLock().unlock(); configNodeInfoReadWriteLock.writeLock().unlock(); } @@ -605,6 +782,30 @@ private void deserializeRegisteredDataNode(InputStream inputStream, TProtocol pr } } + private void tryDeserializeRegisteredAINode(ByteArrayInputStream inputStream, TProtocol protocol) + throws IOException { + try { + // 0 has no meaning here + inputStream.mark(0); + deserializeRegisteredAINode(inputStream, protocol); + } catch (IOException | TException ignore) { + // Exception happens here means that the data is upgraded from the old version + inputStream.reset(); + } + } + + private void deserializeRegisteredAINode(InputStream inputStream, TProtocol protocol) + throws IOException, TException { + int size = ReadWriteIOUtils.readInt(inputStream); + while (size > 0) { + int aiNodeId = ReadWriteIOUtils.readInt(inputStream); + TAINodeConfiguration aiNodeInfo = new TAINodeConfiguration(); + aiNodeInfo.read(protocol); + registeredAINodes.put(aiNodeId, aiNodeInfo); + size--; + } + } + private void deserializeBuildInfo(InputStream inputStream) throws IOException { // old version may not have build info, // thus we need to check inputStream before deserialize. @@ -628,6 +829,7 @@ public void clear() { nextNodeId.set(-1); registeredDataNodes.clear(); registeredConfigNodes.clear(); + registeredAINodes.clear(); nodeVersionInfo.clear(); } @@ -643,6 +845,7 @@ public boolean equals(Object o) { return registeredConfigNodes.equals(nodeInfo.registeredConfigNodes) && nextNodeId.get() == nodeInfo.nextNodeId.get() && registeredDataNodes.equals(nodeInfo.registeredDataNodes) + && registeredAINodes.equals(nodeInfo.registeredAINodes) && nodeVersionInfo.equals(nodeInfo.nodeVersionInfo); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/DatabasePartitionTable.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/DatabasePartitionTable.java index 1ff56a89bb0fb..b301b13b56ef8 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/DatabasePartitionTable.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/DatabasePartitionTable.java @@ -538,7 +538,7 @@ void addRegionNewLocation(TConsensusGroupId regionId, TDataNodeLocation node) { regionGroup.addRegionLocation(node); } - void removeRegionLocation(TConsensusGroupId regionId, TDataNodeLocation node) { + void removeRegionLocation(TConsensusGroupId regionId, int nodeId) { RegionGroup regionGroup = regionGroupMap.get(regionId); if (regionGroup == null) { LOGGER.warn( @@ -547,16 +547,18 @@ void removeRegionLocation(TConsensusGroupId regionId, TDataNodeLocation node) { databaseName); return; } - if (!regionGroup.getReplicaSet().getDataNodeLocations().contains(node)) { + if (regionGroup.getReplicaSet().getDataNodeLocations().stream() + .map(TDataNodeLocation::getDataNodeId) + .noneMatch(id -> id == nodeId)) { LOGGER.info( "Node is not in region locations when removeRegionOldLocation in {}, " + "no need to remove it, node: {}, region: {}", databaseName, - node, + nodeId, regionId); return; } - regionGroup.removeRegionLocation(node); + regionGroup.removeRegionLocation(nodeId); } /** @@ -609,6 +611,28 @@ public Map getLastDataAllotTable() { return dataPartitionTable.getLastDataAllotTable(); } + /** + * Remove PartitionTable where the TimeSlot is expired. + * + * @param TTL The Time To Live + * @param currentTimeSlot The current TimeSlot + */ + public void autoCleanPartitionTable(long TTL, TTimePartitionSlot currentTimeSlot) { + long[] removedTimePartitionSlots = + dataPartitionTable.autoCleanPartitionTable(TTL, currentTimeSlot).stream() + .map(TTimePartitionSlot::getStartTime) + .collect(Collectors.toList()) + .stream() + .mapToLong(Long::longValue) + .toArray(); + if (removedTimePartitionSlots.length > 0) { + LOGGER.info( + "[PartitionTableCleaner] The TimePartitions: {} are removed from Database: {}", + removedTimePartitionSlots, + databaseName); + } + } + @Override public boolean equals(Object o) { if (this == o) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/PartitionInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/PartitionInfo.java index fe48dd5079776..8c91c99859ed0 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/PartitionInfo.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/PartitionInfo.java @@ -41,6 +41,7 @@ import org.apache.iotdb.confignode.consensus.request.write.database.PreDeleteDatabasePlan; import org.apache.iotdb.confignode.consensus.request.write.datanode.UpdateDataNodePlan; import org.apache.iotdb.confignode.consensus.request.write.partition.AddRegionLocationPlan; +import org.apache.iotdb.confignode.consensus.request.write.partition.AutoCleanPartitionTablePlan; import org.apache.iotdb.confignode.consensus.request.write.partition.CreateDataPartitionPlan; import org.apache.iotdb.confignode.consensus.request.write.partition.CreateSchemaPartitionPlan; import org.apache.iotdb.confignode.consensus.request.write.partition.RemoveRegionLocationPlan; @@ -273,7 +274,7 @@ public List getRegionMaintainEntryList() { } /** - * Thread-safely pre-delete the specific StorageGroup. + * Thread-safely pre-delete the specific database. * * @param preDeleteDatabasePlan PreDeleteStorageGroupPlan * @return {@link TSStatusCode#SUCCESS_STATUS} @@ -281,8 +282,8 @@ public List getRegionMaintainEntryList() { public TSStatus preDeleteDatabase(PreDeleteDatabasePlan preDeleteDatabasePlan) { final PreDeleteDatabasePlan.PreDeleteType preDeleteType = preDeleteDatabasePlan.getPreDeleteType(); - final String storageGroup = preDeleteDatabasePlan.getStorageGroup(); - DatabasePartitionTable databasePartitionTable = databasePartitionTables.get(storageGroup); + final String database = preDeleteDatabasePlan.getStorageGroup(); + DatabasePartitionTable databasePartitionTable = databasePartitionTables.get(database); if (databasePartitionTable == null) { return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } @@ -305,12 +306,12 @@ public boolean isDatabasePreDeleted(String database) { } /** - * Thread-safely delete StorageGroup. + * Thread-safely delete database. * - * @param plan DeleteStorageGroupPlan + * @param plan DeleteDatabasePlan */ public void deleteDatabase(DeleteDatabasePlan plan) { - // Clean the StorageGroupTable cache + // Clean the databaseTable cache databasePartitionTables.remove(plan.getName()); } @@ -325,24 +326,24 @@ public DataSet getSchemaPartition(GetSchemaPartitionPlan plan) { // TODO: Replace this map with new SchemaPartition Map schemaPartition = new ConcurrentHashMap<>(); - if (plan.getPartitionSlotsMap().size() == 0) { + if (plan.getPartitionSlotsMap().isEmpty()) { // Return all SchemaPartitions when the queried PartitionSlots are empty databasePartitionTables.forEach( - (storageGroup, databasePartitionTable) -> { + (database, databasePartitionTable) -> { if (databasePartitionTable.isNotPreDeleted()) { - schemaPartition.put(storageGroup, new SchemaPartitionTable()); + schemaPartition.put(database, new SchemaPartitionTable()); databasePartitionTable.getSchemaPartition( - new ArrayList<>(), schemaPartition.get(storageGroup)); + new ArrayList<>(), schemaPartition.get(database)); - if (schemaPartition.get(storageGroup).getSchemaPartitionMap().isEmpty()) { + if (schemaPartition.get(database).getSchemaPartitionMap().isEmpty()) { // Remove empty Map - schemaPartition.remove(storageGroup); + schemaPartition.remove(database); } } }); } else { - // Return the SchemaPartition for each StorageGroup + // Return the SchemaPartition for each database plan.getPartitionSlotsMap() .forEach( (database, partitionSlots) -> { @@ -498,6 +499,24 @@ public TSStatus createDataPartition(CreateDataPartitionPlan plan) { return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } + /** + * Remove PartitionTable where the TimeSlot is expired. + * + * @param plan Including TTL and current TimeSlot + */ + public TSStatus autoCleanPartitionTable(AutoCleanPartitionTablePlan plan) { + plan.getDatabaseTTLMap() + .forEach( + (database, ttl) -> { + if (isDatabaseExisted(database) && 0 < ttl && ttl < Long.MAX_VALUE) { + databasePartitionTables + .get(database) + .autoCleanPartitionTable(ttl, plan.getCurrentTimeSlot()); + } + }); + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } + /** Get SchemaNodeManagementPartition through matched Database. */ public DataSet getSchemaNodeManagementPartition(List matchedDatabases) { SchemaNodeManagementResp schemaNodeManagementResp = new SchemaNodeManagementResp(); @@ -506,16 +525,16 @@ public DataSet getSchemaNodeManagementPartition(List matchedDatabases) { matchedDatabases.stream() .filter(this::isDatabaseExisted) .forEach( - storageGroup -> { - schemaPartitionMap.put(storageGroup, new SchemaPartitionTable()); + database -> { + schemaPartitionMap.put(database, new SchemaPartitionTable()); databasePartitionTables - .get(storageGroup) - .getSchemaPartition(new ArrayList<>(), schemaPartitionMap.get(storageGroup)); + .get(database) + .getSchemaPartition(new ArrayList<>(), schemaPartitionMap.get(database)); - if (schemaPartitionMap.get(storageGroup).getSchemaPartitionMap().isEmpty()) { + if (schemaPartitionMap.get(database).getSchemaPartitionMap().isEmpty()) { // Remove empty Map - schemaPartitionMap.remove(storageGroup); + schemaPartitionMap.remove(database); } }); @@ -534,10 +553,10 @@ public DataSet getRegionInfoList(GetRegionInfoListPlan regionsInfoPlan) { return regionResp; } TShowRegionReq showRegionReq = regionsInfoPlan.getShowRegionReq(); - final List storageGroups = showRegionReq != null ? showRegionReq.getDatabases() : null; + final List databases = showRegionReq != null ? showRegionReq.getDatabases() : null; databasePartitionTables.forEach( - (storageGroup, databasePartitionTable) -> { - if (storageGroups != null && !storageGroups.contains(storageGroup)) { + (database, databasePartitionTable) -> { + if (databases != null && !databases.contains(database)) { return; } regionInfoList.addAll(databasePartitionTable.getRegionInfoList(regionsInfoPlan)); @@ -592,7 +611,7 @@ public TSStatus removeRegionLocation(RemoveRegionLocationPlan req) { .forEach( databasePartitionTable -> databasePartitionTable.removeRegionLocation( - req.getRegionId(), req.getDeprecatedLocation())); + req.getRegionId(), req.getDeprecatedLocation().getDataNodeId())); return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } @@ -602,7 +621,7 @@ public TSStatus removeRegionLocation(RemoveRegionLocationPlan req) { * @param regionId regionId * @return database name */ - public String getRegionStorageGroup(TConsensusGroupId regionId) { + public String getRegionDatabase(TConsensusGroupId regionId) { Optional sgPartitionTableOptional = databasePartitionTables.values().stream() .filter(s -> s.containRegionGroup(regionId)) @@ -617,9 +636,9 @@ public String getRegionStorageGroup(TConsensusGroupId regionId) { /** * Only Leader use this interface. Filter unassigned SchemaPartitionSlots. * - * @param partitionSlotsMap Map> - * @return Map>, SchemaPartitionSlots that is not - * assigned in partitionSlotsMap + * @param partitionSlotsMap Map> + * @return Map>, SchemaPartitionSlots that is not assigned in + * partitionSlotsMap */ public Map> filterUnassignedSchemaPartitionSlots( Map> partitionSlotsMap) { @@ -642,9 +661,9 @@ public Map> filterUnassignedSchemaPartitionSl /** * Only Leader use this interface. Filter unassigned SchemaPartitionSlots * - * @param partitionSlotsMap Map> - * @return Map>, DataPartitionSlots - * that is not assigned in partitionSlotsMap + * @param partitionSlotsMap Map> + * @return Map>, DataPartitionSlots that is not + * assigned in partitionSlotsMap */ public Map> filterUnassignedDataPartitionSlots( Map> partitionSlotsMap) { @@ -806,8 +825,8 @@ public int countDataNodeScatterWidth( * * @param database DatabaseName * @param type SchemaRegion or DataRegion - * @return Number of Regions currently owned by the specific StorageGroup - * @throws DatabaseNotExistsException When the specific StorageGroup doesn't exist + * @return Number of Regions currently owned by the specific database + * @throws DatabaseNotExistsException When the specific database doesn't exist */ public int getRegionGroupCount(String database, TConsensusGroupType type) throws DatabaseNotExistsException { @@ -865,7 +884,9 @@ public List getAllRegionGroupIds(String database, TConsensusG * @return The assigned SeriesPartitionSlots count */ public int getAssignedSeriesPartitionSlotsCount(String database) { - return databasePartitionTables.get(database).getAssignedSeriesPartitionSlotsCount(); + return Optional.ofNullable(databasePartitionTables.get(database)) + .map(DatabasePartitionTable::getAssignedSeriesPartitionSlotsCount) + .orElse(0); } /** @@ -877,13 +898,15 @@ public int getAssignedSeriesPartitionSlotsCount(String database) { * @return The assigned TimePartitionSlots count */ public long getAssignedTimePartitionSlotsCount(String database) { - return databasePartitionTables.get(database).getTimeSlotCount(); + return Optional.ofNullable(databasePartitionTables.get(database)) + .map(DatabasePartitionTable::getTimeSlotCount) + .orElse(0L); } /** - * Get the DataNodes who contain the specific StorageGroup's Schema or Data. + * Get the DataNodes who contain the specific database's Schema or Data. * - * @param database The specific StorageGroup's name + * @param database The specific database's name * @param type SchemaRegion or DataRegion * @return Set {@literal <}TDataNodeLocation{@literal >}, the related DataNodes */ @@ -897,7 +920,7 @@ public Set getDatabaseRelatedDataNodes( * * @param database DatabaseName * @param type SchemaRegion or DataRegion - * @return The StorageGroup's Running or Available Regions that sorted by the number of allocated + * @return The database's Running or Available Regions that sorted by the number of allocated * slots */ public List> getRegionGroupSlotsCounter( @@ -955,12 +978,12 @@ public boolean processTakeSnapshot(File snapshotDir) throws TException, IOExcept // serialize nextRegionGroupId ReadWriteIOUtils.write(nextRegionGroupId.get(), bufferedOutputStream); - // serialize StorageGroupPartitionTable + // serialize databasePartitionTable ReadWriteIOUtils.write(databasePartitionTables.size(), bufferedOutputStream); - for (Map.Entry storageGroupPartitionTableEntry : + for (Map.Entry databasePartitionTableEntry : databasePartitionTables.entrySet()) { - ReadWriteIOUtils.write(storageGroupPartitionTableEntry.getKey(), bufferedOutputStream); - storageGroupPartitionTableEntry.getValue().serialize(bufferedOutputStream, protocol); + ReadWriteIOUtils.write(databasePartitionTableEntry.getKey(), bufferedOutputStream); + databasePartitionTableEntry.getValue().serialize(bufferedOutputStream, protocol); } // serialize regionCleanList @@ -1012,16 +1035,16 @@ public void processLoadSnapshot(File snapshotDir) throws TException, IOException // start to restore nextRegionGroupId.set(ReadWriteIOUtils.readInt(fileInputStream)); - // restore StorageGroupPartitionTable + // restore databasePartitionTable int length = ReadWriteIOUtils.readInt(fileInputStream); for (int i = 0; i < length; i++) { - String storageGroup = ReadWriteIOUtils.readString(fileInputStream); - if (storageGroup == null) { - throw new IOException("Failed to load snapshot because get null StorageGroup name"); + final String database = ReadWriteIOUtils.readString(fileInputStream); + if (database == null) { + throw new IOException("Failed to load snapshot because get null database name"); } - DatabasePartitionTable databasePartitionTable = new DatabasePartitionTable(storageGroup); + final DatabasePartitionTable databasePartitionTable = new DatabasePartitionTable(database); databasePartitionTable.deserialize(fileInputStream, protocol); - databasePartitionTables.put(storageGroup, databasePartitionTable); + databasePartitionTables.put(database, databasePartitionTable); } // restore deletedRegionSet diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/RegionGroup.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/RegionGroup.java index 0b268e751b199..6f9860b6bce61 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/RegionGroup.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/RegionGroup.java @@ -63,15 +63,15 @@ public RegionGroup(long createTime, TRegionReplicaSet replicaSet) { this.totalTimeSlotCount = new AtomicLong(0); } - public long getCreateTime() { + public synchronized long getCreateTime() { return createTime; } - public TConsensusGroupId getId() { + public synchronized TConsensusGroupId getId() { return replicaSet.getRegionId(); } - public TRegionReplicaSet getReplicaSet() { + public synchronized TRegionReplicaSet getReplicaSet() { return replicaSet.deepCopy(); } @@ -80,7 +80,7 @@ public TRegionReplicaSet getReplicaSet() { * * @param newDataNodeLocation The new DataNodeLocation. */ - public void updateDataNode(TDataNodeLocation newDataNodeLocation) { + public synchronized void updateDataNode(TDataNodeLocation newDataNodeLocation) { for (int i = 0; i < replicaSet.getDataNodeLocationsSize(); i++) { if (replicaSet.getDataNodeLocations().get(i).getDataNodeId() == newDataNodeLocation.getDataNodeId()) { @@ -90,20 +90,22 @@ public void updateDataNode(TDataNodeLocation newDataNodeLocation) { } } - public void addRegionLocation(TDataNodeLocation node) { + public synchronized void addRegionLocation(TDataNodeLocation node) { replicaSet.addToDataNodeLocations(node); replicaSet.getDataNodeLocations().sort(TDataNodeLocation::compareTo); } - public void removeRegionLocation(TDataNodeLocation node) { - replicaSet.getDataNodeLocations().remove(node); + public synchronized void removeRegionLocation(int nodeId) { + replicaSet + .getDataNodeLocations() + .removeIf(tDataNodeLocation -> nodeId == tDataNodeLocation.getDataNodeId()); replicaSet.getDataNodeLocations().sort(TDataNodeLocation::compareTo); } /** * @param deltaMap Map */ - public void updateSlotCountMap(Map deltaMap) { + public synchronized void updateSlotCountMap(Map deltaMap) { deltaMap.forEach( ((seriesPartitionSlot, delta) -> { slotCountMap @@ -113,11 +115,11 @@ public void updateSlotCountMap(Map deltaMap) { })); } - public int getSeriesSlotCount() { + public synchronized int getSeriesSlotCount() { return slotCountMap.size(); } - public long getTimeSlotCount() { + public synchronized long getTimeSlotCount() { return totalTimeSlotCount.get(); } @@ -127,12 +129,12 @@ public long getTimeSlotCount() { * @param dataNodeId The specified DataNodeId. * @return True if the RegionGroup belongs to the specified DataNode. */ - public boolean belongsToDataNode(int dataNodeId) { + public synchronized boolean belongsToDataNode(int dataNodeId) { return replicaSet.getDataNodeLocations().stream() .anyMatch(dataNodeLocation -> dataNodeLocation.getDataNodeId() == dataNodeId); } - public void serialize(OutputStream outputStream, TProtocol protocol) + public synchronized void serialize(OutputStream outputStream, TProtocol protocol) throws IOException, TException { ReadWriteIOUtils.write(createTime, outputStream); replicaSet.write(protocol); @@ -146,7 +148,7 @@ public void serialize(OutputStream outputStream, TProtocol protocol) ReadWriteIOUtils.write(totalTimeSlotCount.get(), outputStream); } - public void deserialize(InputStream inputStream, TProtocol protocol) + public synchronized void deserialize(InputStream inputStream, TProtocol protocol) throws IOException, TException { this.createTime = ReadWriteIOUtils.readLong(inputStream); replicaSet.read(protocol); @@ -163,7 +165,7 @@ public void deserialize(InputStream inputStream, TProtocol protocol) } @Override - public boolean equals(Object o) { + public synchronized boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; RegionGroup that = (RegionGroup) o; @@ -181,7 +183,7 @@ public boolean equals(Object o) { } @Override - public int hashCode() { + public synchronized int hashCode() { return Objects.hash(createTime, replicaSet, slotCountMap, totalTimeSlotCount); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipeInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipeInfo.java index 7c1823a7d0cef..d09e5c82845a0 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipeInfo.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipeInfo.java @@ -20,7 +20,7 @@ package org.apache.iotdb.confignode.persistence.pipe; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.pipe.task.meta.PipeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta; import org.apache.iotdb.commons.snapshot.SnapshotProcessor; import org.apache.iotdb.confignode.consensus.request.write.pipe.runtime.PipeHandleLeaderChangePlan; import org.apache.iotdb.confignode.consensus.request.write.pipe.runtime.PipeHandleMetaChangePlan; @@ -31,9 +31,9 @@ import org.apache.iotdb.confignode.consensus.request.write.pipe.task.SetPipeStatusPlanV2; import org.apache.iotdb.confignode.manager.pipe.agent.PipeConfigNodeAgent; import org.apache.iotdb.confignode.manager.pipe.agent.runtime.PipeConfigRegionListener; +import org.apache.iotdb.confignode.manager.pipe.agent.task.PipeConfigNodeSubtask; import org.apache.iotdb.confignode.manager.pipe.agent.task.PipeConfigNodeTaskAgent; -import org.apache.iotdb.confignode.manager.pipe.execution.PipeConfigNodeSubtask; -import org.apache.iotdb.confignode.manager.pipe.metric.PipeTemporaryMetaMetrics; +import org.apache.iotdb.confignode.manager.pipe.metric.overview.PipeTemporaryMetaInCoordinatorMetrics; import org.apache.iotdb.mpp.rpc.thrift.TPushPipeMetaRespExceptionMessage; import org.apache.iotdb.pipe.api.exception.PipeException; import org.apache.iotdb.rpc.TSStatusCode; @@ -94,7 +94,7 @@ public TSStatus createPipe(final CreatePipePlanV2 plan) { throw new PipeException("Failed to increase listener reference", e); } }); - PipeTemporaryMetaMetrics.getInstance() + PipeTemporaryMetaInCoordinatorMetrics.getInstance() .handleTemporaryMetaChanges(pipeTaskInfo.getPipeMetaList()); return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } else { @@ -114,7 +114,7 @@ public TSStatus setPipeStatus(final SetPipeStatusPlanV2 plan) { PipeConfigNodeAgent.task() .handleSinglePipeMetaChanges(pipeTaskInfo.getPipeMetaByPipeName(plan.getPipeName())); - PipeTemporaryMetaMetrics.getInstance() + PipeTemporaryMetaInCoordinatorMetrics.getInstance() .handleTemporaryMetaChanges(pipeTaskInfo.getPipeMetaList()); return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } catch (final Exception e) { @@ -143,7 +143,7 @@ public TSStatus dropPipe(final DropPipePlanV2 plan) { throw new PipeException("Failed to decrease listener reference", e); } }); - PipeTemporaryMetaMetrics.getInstance() + PipeTemporaryMetaInCoordinatorMetrics.getInstance() .handleTemporaryMetaChanges(pipeTaskInfo.getPipeMetaList()); return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } else { @@ -159,14 +159,35 @@ public TSStatus dropPipe(final DropPipePlanV2 plan) { public TSStatus alterPipe(final AlterPipePlanV2 plan) { try { + final Optional pipeMetaBeforeAlter = + Optional.ofNullable( + pipeTaskInfo.getPipeMetaByPipeName(plan.getPipeStaticMeta().getPipeName())); + pipeTaskInfo.alterPipe(plan); - PipeConfigNodeAgent.task() - .handleSinglePipeMetaChanges( - pipeTaskInfo.getPipeMetaByPipeName(plan.getPipeStaticMeta().getPipeName())); - PipeTemporaryMetaMetrics.getInstance() - .handleTemporaryMetaChanges(pipeTaskInfo.getPipeMetaList()); - return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + final TPushPipeMetaRespExceptionMessage message = + PipeConfigNodeAgent.task() + .handleSinglePipeMetaChanges( + pipeTaskInfo.getPipeMetaByPipeName(plan.getPipeStaticMeta().getPipeName())); + if (message == null) { + PipeConfigNodeAgent.runtime() + .increaseListenerReference(plan.getPipeStaticMeta().getExtractorParameters()); + pipeMetaBeforeAlter.ifPresent( + meta -> { + try { + PipeConfigNodeAgent.runtime() + .decreaseListenerReference(meta.getStaticMeta().getExtractorParameters()); + } catch (final Exception e) { + throw new PipeException("Failed to decrease listener reference", e); + } + }); + PipeTemporaryMetaInCoordinatorMetrics.getInstance() + .handleTemporaryMetaChanges(pipeTaskInfo.getPipeMetaList()); + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } else { + return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode()) + .setMessage(message.getMessage()); + } } catch (final Exception e) { LOGGER.error("Failed to alter pipe", e); return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode()) @@ -185,7 +206,7 @@ public TSStatus alterPipe(final AlterPipePlanV2 plan) { public TSStatus operateMultiplePipes(final OperateMultiplePipesPlanV2 plans) { try { final TSStatus status = pipeTaskInfo.operateMultiplePipes(plans); - PipeTemporaryMetaMetrics.getInstance() + PipeTemporaryMetaInCoordinatorMetrics.getInstance() .handleTemporaryMetaChanges(pipeTaskInfo.getPipeMetaList()); return status; } catch (final Exception e) { @@ -204,7 +225,7 @@ public TSStatus handleLeaderChange(final PipeHandleLeaderChangePlan plan) { pipeMetaListFromCoordinator.add(pipeMeta); } PipeConfigNodeAgent.task().handlePipeMetaChanges(pipeMetaListFromCoordinator); - PipeTemporaryMetaMetrics.getInstance() + PipeTemporaryMetaInCoordinatorMetrics.getInstance() .handleTemporaryMetaChanges(pipeTaskInfo.getPipeMetaList()); return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } catch (final Exception e) { @@ -224,7 +245,7 @@ public TSStatus handleMetaChanges(final PipeHandleMetaChangePlan plan) { pipeTaskInfo.getPipeMetaByPipeName(pipeMeta.getStaticMeta().getPipeName())); } PipeConfigNodeAgent.task().handlePipeMetaChanges(pipeMetaListFromCoordinator); - PipeTemporaryMetaMetrics.getInstance() + PipeTemporaryMetaInCoordinatorMetrics.getInstance() .handleTemporaryMetaChanges(pipeTaskInfo.getPipeMetaList()); return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } catch (final Exception e) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipePluginInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipePluginInfo.java index 64d235e7f2312..36206c05fcd16 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipePluginInfo.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipePluginInfo.java @@ -21,12 +21,12 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.executable.ExecutableManager; -import org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant; -import org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant; +import org.apache.iotdb.commons.pipe.agent.plugin.meta.ConfigNodePipePluginMetaKeeper; +import org.apache.iotdb.commons.pipe.agent.plugin.meta.PipePluginMeta; +import org.apache.iotdb.commons.pipe.agent.plugin.service.PipePluginExecutableManager; import org.apache.iotdb.commons.pipe.config.constant.PipeProcessorConstant; -import org.apache.iotdb.commons.pipe.plugin.meta.ConfigNodePipePluginMetaKeeper; -import org.apache.iotdb.commons.pipe.plugin.meta.PipePluginMeta; -import org.apache.iotdb.commons.pipe.plugin.service.PipePluginExecutableManager; +import org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant; +import org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant; import org.apache.iotdb.commons.snapshot.SnapshotProcessor; import org.apache.iotdb.confignode.conf.ConfigNodeConfig; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; @@ -58,9 +58,9 @@ import java.util.Objects; import java.util.concurrent.locks.ReentrantLock; -import static org.apache.iotdb.commons.pipe.plugin.builtin.BuiltinPipePlugin.DO_NOTHING_PROCESSOR; -import static org.apache.iotdb.commons.pipe.plugin.builtin.BuiltinPipePlugin.IOTDB_EXTRACTOR; -import static org.apache.iotdb.commons.pipe.plugin.builtin.BuiltinPipePlugin.IOTDB_THRIFT_CONNECTOR; +import static org.apache.iotdb.commons.pipe.agent.plugin.builtin.BuiltinPipePlugin.DO_NOTHING_PROCESSOR; +import static org.apache.iotdb.commons.pipe.agent.plugin.builtin.BuiltinPipePlugin.IOTDB_EXTRACTOR; +import static org.apache.iotdb.commons.pipe.agent.plugin.builtin.BuiltinPipePlugin.IOTDB_THRIFT_CONNECTOR; public class PipePluginInfo implements SnapshotProcessor { @@ -149,7 +149,7 @@ public void checkPipePluginExistence( final PipeParameters extractorParameters = new PipeParameters(extractorAttributes); final String extractorPluginName = extractorParameters.getStringOrDefault( - Arrays.asList(PipeExtractorConstant.EXTRACTOR_KEY, PipeExtractorConstant.SOURCE_KEY), + Arrays.asList(PipeSourceConstant.EXTRACTOR_KEY, PipeSourceConstant.SOURCE_KEY), IOTDB_EXTRACTOR.getPipePluginName()); if (!pipePluginMetaKeeper.containsPipePlugin(extractorPluginName)) { final String exceptionMessage = @@ -176,7 +176,7 @@ public void checkPipePluginExistence( final PipeParameters connectorParameters = new PipeParameters(connectorAttributes); final String connectorPluginName = connectorParameters.getStringOrDefault( - Arrays.asList(PipeConnectorConstant.CONNECTOR_KEY, PipeConnectorConstant.SINK_KEY), + Arrays.asList(PipeSinkConstant.CONNECTOR_KEY, PipeSinkConstant.SINK_KEY), IOTDB_THRIFT_CONNECTOR.getPipePluginName()); if (!pipePluginMetaKeeper.containsPipePlugin(connectorPluginName)) { final String exceptionMessage = diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipeTaskInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipeTaskInfo.java index ddc6ae6ac60df..8d5aa795e14da 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipeTaskInfo.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipeTaskInfo.java @@ -23,19 +23,20 @@ import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex; import org.apache.iotdb.commons.exception.pipe.PipeRuntimeCriticalException; import org.apache.iotdb.commons.exception.pipe.PipeRuntimeException; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.BuiltinPipePlugin; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMetaKeeper; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeRuntimeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStaticMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStatus; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTemporaryMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTemporaryMetaInCoordinator; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeType; import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant; -import org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant; import org.apache.iotdb.commons.pipe.config.constant.PipeProcessorConstant; -import org.apache.iotdb.commons.pipe.plugin.builtin.BuiltinPipePlugin; -import org.apache.iotdb.commons.pipe.task.meta.PipeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeMetaKeeper; -import org.apache.iotdb.commons.pipe.task.meta.PipeRuntimeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeStaticMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeStatus; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeTemporaryMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeType; +import org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant; +import org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant; import org.apache.iotdb.commons.snapshot.SnapshotProcessor; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.write.pipe.runtime.PipeHandleLeaderChangePlan; @@ -76,7 +77,9 @@ import java.util.stream.Collectors; import java.util.stream.StreamSupport; -import static org.apache.iotdb.commons.pipe.plugin.builtin.BuiltinPipePlugin.IOTDB_THRIFT_CONNECTOR; +import static org.apache.iotdb.commons.pipe.agent.plugin.builtin.BuiltinPipePlugin.IOTDB_THRIFT_CONNECTOR; +import static org.apache.iotdb.commons.pipe.config.constant.PipeRPCMessageConstant.PIPE_ALREADY_EXIST_MSG; +import static org.apache.iotdb.commons.pipe.config.constant.PipeRPCMessageConstant.PIPE_NOT_EXIST_MSG; public class PipeTaskInfo implements SnapshotProcessor { @@ -178,8 +181,8 @@ private boolean checkBeforeCreatePipeInternal(final TCreatePipeReq createPipeReq final String exceptionMessage = String.format( - "Failed to create pipe %s, the pipe with the same name has been created", - createPipeRequest.getPipeName()); + "Failed to create pipe %s, %s", + createPipeRequest.getPipeName(), PIPE_ALREADY_EXIST_MSG); LOGGER.warn(exceptionMessage); throw new PipeException(exceptionMessage); } @@ -203,7 +206,7 @@ private boolean checkAndUpdateRequestBeforeAlterPipeInternal(final TAlterPipeReq final String exceptionMessage = String.format( - "Failed to alter pipe %s, the pipe does not exist", alterPipeRequest.getPipeName()); + "Failed to alter pipe %s, %s", alterPipeRequest.getPipeName(), PIPE_NOT_EXIST_MSG); LOGGER.warn(exceptionMessage); throw new PipeException(exceptionMessage); } @@ -280,7 +283,7 @@ public void checkBeforeStartPipe(final String pipeName) throws PipeException { private void checkBeforeStartPipeInternal(final String pipeName) throws PipeException { if (!isPipeExisted(pipeName)) { final String exceptionMessage = - String.format("Failed to start pipe %s, the pipe does not exist", pipeName); + String.format("Failed to start pipe %s, %s", pipeName, PIPE_NOT_EXIST_MSG); LOGGER.warn(exceptionMessage); throw new PipeException(exceptionMessage); } @@ -306,7 +309,7 @@ public void checkBeforeStopPipe(final String pipeName) throws PipeException { private void checkBeforeStopPipeInternal(final String pipeName) throws PipeException { if (!isPipeExisted(pipeName)) { final String exceptionMessage = - String.format("Failed to stop pipe %s, the pipe does not exist", pipeName); + String.format("Failed to stop pipe %s, %s", pipeName, PIPE_NOT_EXIST_MSG); LOGGER.warn(exceptionMessage); throw new PipeException(exceptionMessage); } @@ -393,7 +396,7 @@ private void validatePipePluginUsageByPipeInternal(String pluginName) { PipeParameters extractorParameters = pipeMeta.getStaticMeta().getExtractorParameters(); final String extractorPluginName = extractorParameters.getStringOrDefault( - Arrays.asList(PipeExtractorConstant.EXTRACTOR_KEY, PipeExtractorConstant.SOURCE_KEY), + Arrays.asList(PipeSourceConstant.EXTRACTOR_KEY, PipeSourceConstant.SOURCE_KEY), BuiltinPipePlugin.IOTDB_EXTRACTOR.getPipePluginName()); if (pluginName.equals(extractorPluginName)) { String exceptionMessage = @@ -417,7 +420,7 @@ private void validatePipePluginUsageByPipeInternal(String pluginName) { PipeParameters connectorParameters = pipeMeta.getStaticMeta().getConnectorParameters(); final String connectorPluginName = connectorParameters.getStringOrDefault( - Arrays.asList(PipeConnectorConstant.CONNECTOR_KEY, PipeConnectorConstant.SINK_KEY), + Arrays.asList(PipeSinkConstant.CONNECTOR_KEY, PipeSinkConstant.SINK_KEY), IOTDB_THRIFT_CONNECTOR.getPipePluginName()); if (pluginName.equals(connectorPluginName)) { String exceptionMessage = @@ -602,7 +605,8 @@ private TSStatus handleLeaderChangeInternal(final PipeHandleLeaderChangePlan pla .get(consensusGroupId.getId()) .setLeaderNodeId(newLeader); // New region leader may contain un-transferred events - pipeMeta.getTemporaryMeta().markDataNodeUncompleted(newLeader); + ((PipeTemporaryMetaInCoordinator) pipeMeta.getTemporaryMeta()) + .markDataNodeUncompleted(newLeader); } else { consensusGroupIdToTaskMetaMap.remove(consensusGroupId.getId()); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/CNPhysicalPlanGenerator.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/CNPhysicalPlanGenerator.java index 3469a3a5806e2..f949b76547001 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/CNPhysicalPlanGenerator.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/CNPhysicalPlanGenerator.java @@ -29,7 +29,7 @@ import org.apache.iotdb.commons.utils.ThriftConfigNodeSerDeUtils; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; -import org.apache.iotdb.confignode.consensus.request.auth.AuthorPlan; +import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan; import org.apache.iotdb.confignode.consensus.request.write.database.DatabaseSchemaPlan; import org.apache.iotdb.confignode.consensus.request.write.database.SetTTLPlan; import org.apache.iotdb.confignode.consensus.request.write.template.CommitSetSchemaTemplatePlan; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ClusterSchemaInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ClusterSchemaInfo.java index 90dbf1a08e481..c5c60afbb26cb 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ClusterSchemaInfo.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ClusterSchemaInfo.java @@ -414,9 +414,11 @@ public TSStatus adjustMaxRegionGroupCount(AdjustMaxRegionGroupNumPlan plan) { databaseSchema.setMaxDataRegionGroupNum(entry.getValue().getRight()); } result.setCode(TSStatusCode.SUCCESS_STATUS.getStatusCode()); - } catch (MetadataException e) { - LOGGER.error(ERROR_NAME, e); - result.setCode(TSStatusCode.DATABASE_NOT_EXIST.getStatusCode()); + } catch (final MetadataException e) { + LOGGER.info( + "Database inconsistency detected when adjusting max region group count, message: {}, will be corrected by the following adjusting plans", + e.getMessage()); + result.setCode(e.getErrorCode()).setMessage(e.getMessage()); } finally { databaseReadWriteLock.writeLock().unlock(); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ConfigMTree.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ConfigMTree.java index 54f8d8663f074..6d0d13d4c652b 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ConfigMTree.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ConfigMTree.java @@ -31,6 +31,7 @@ import org.apache.iotdb.confignode.persistence.schema.mnode.IConfigMNode; import org.apache.iotdb.confignode.persistence.schema.mnode.factory.ConfigMNodeFactory; import org.apache.iotdb.db.exception.metadata.DatabaseAlreadySetException; +import org.apache.iotdb.db.exception.metadata.DatabaseConflictException; import org.apache.iotdb.db.exception.metadata.DatabaseNotSetException; import org.apache.iotdb.db.exception.metadata.PathNotExistException; import org.apache.iotdb.db.schemaengine.schemaregion.mtree.traverser.collector.DatabaseCollector; @@ -114,7 +115,7 @@ public void setStorageGroup(PartialPath path) throws MetadataException { store.addChild(cur, nodeNames[i], nodeFactory.createInternalMNode(cur, nodeNames[i])); } else if (temp.isDatabase()) { // before create database, check whether the database already exists - throw new DatabaseAlreadySetException(temp.getFullPath()); + throw new DatabaseConflictException(temp.getFullPath(), false); } cur = store.getChild(cur, nodeNames[i]); i++; @@ -128,7 +129,7 @@ public void setStorageGroup(PartialPath path) throws MetadataException { if (store.getChild(cur, nodeNames[i]).isDatabase()) { throw new DatabaseAlreadySetException(path.getFullPath()); } else { - throw new DatabaseAlreadySetException(path.getFullPath(), true); + throw new DatabaseConflictException(path.getFullPath(), true); } } else { IDatabaseMNode databaseMNode = @@ -137,7 +138,7 @@ public void setStorageGroup(PartialPath path) throws MetadataException { IConfigMNode result = store.addChild(cur, nodeNames[i], databaseMNode.getAsMNode()); if (result != databaseMNode) { - throw new DatabaseAlreadySetException(path.getFullPath(), true); + throw new DatabaseConflictException(path.getFullPath(), true); } } } @@ -262,7 +263,7 @@ public IDatabaseMNode getDatabaseNodeByDatabasePath(PartialPath da throw new DatabaseNotSetException(databasePath.getFullPath()); } if (cur.isDatabase()) { - throw new DatabaseAlreadySetException(cur.getFullPath()); + throw new DatabaseConflictException(cur.getFullPath(), false); } } @@ -273,7 +274,7 @@ public IDatabaseMNode getDatabaseNodeByDatabasePath(PartialPath da if (cur.isDatabase()) { return cur.getAsDatabaseMNode(); } else { - throw new DatabaseAlreadySetException(databasePath.getFullPath(), true); + throw new DatabaseConflictException(databasePath.getFullPath(), true); } } @@ -331,7 +332,8 @@ public boolean isDatabaseAlreadySet(PartialPath path) { * * @param path a full path or a prefix path */ - public void checkDatabaseAlreadySet(PartialPath path) throws DatabaseAlreadySetException { + public void checkDatabaseAlreadySet(PartialPath path) + throws DatabaseAlreadySetException, DatabaseConflictException { String[] nodeNames = path.getNodes(); IConfigMNode cur = root; if (!nodeNames[0].equals(root.getName())) { @@ -346,7 +348,7 @@ public void checkDatabaseAlreadySet(PartialPath path) throws DatabaseAlreadySetE throw new DatabaseAlreadySetException(cur.getFullPath()); } } - throw new DatabaseAlreadySetException(path.getFullPath(), true); + throw new DatabaseConflictException(path.getFullPath(), true); } // endregion diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/TemplateTable.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/TemplateTable.java index 201e6eb4d2ef4..23cde4e9447d0 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/TemplateTable.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/TemplateTable.java @@ -156,7 +156,7 @@ public void extendTemplate(TemplateExtendInfo templateExtendInfo) throws Metadat dataTypeList.get(i), encodingList == null ? getDefaultEncoding(dataTypeList.get(i)) : encodingList.get(i), compressionTypeList == null - ? TSFileDescriptor.getInstance().getConfig().getCompressor() + ? TSFileDescriptor.getInstance().getConfig().getCompressor(dataTypeList.get(i)) : compressionTypeList.get(i)); } else { if (!measurementSchema.getType().equals(dataTypeList.get(i)) diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/subscription/SubscriptionInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/subscription/SubscriptionInfo.java index 6b64331422be8..9a1c6acc72a27 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/subscription/SubscriptionInfo.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/subscription/SubscriptionInfo.java @@ -44,6 +44,8 @@ import org.apache.iotdb.rpc.TSStatusCode; import org.apache.iotdb.rpc.subscription.exception.SubscriptionException; +import org.apache.thrift.annotation.Nullable; +import org.apache.tsfile.utils.Pair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -54,9 +56,11 @@ import java.util.ArrayList; import java.util.List; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.StreamSupport; @@ -194,7 +198,7 @@ private void checkBeforeDropTopicInternal(String topicName) throws SubscriptionE // executed on all nodes to ensure the consistency. return; } else { - if (!topicMeta.hasSubscribedConsumerGroup()) { + if (!consumerGroupMetaKeeper.isTopicSubscribedByConsumerGroup(topicName)) { return; } } @@ -207,6 +211,37 @@ private void checkBeforeDropTopicInternal(String topicName) throws SubscriptionE throw new SubscriptionException(exceptionMessage); } + public void validatePipePluginUsageByTopic(String pipePluginName) throws SubscriptionException { + acquireReadLock(); + try { + validatePipePluginUsageByTopicInternal(pipePluginName); + } finally { + releaseReadLock(); + } + } + + public void validatePipePluginUsageByTopicInternal(String pipePluginName) + throws SubscriptionException { + acquireReadLock(); + try { + topicMetaKeeper + .getAllTopicMeta() + .forEach( + meta -> { + if (pipePluginName.equals(meta.getConfig().getAttribute().get("processor"))) { + final String exceptionMessage = + String.format( + "PipePlugin '%s' is already used by Topic '%s' as a processor.", + pipePluginName, meta.getTopicName()); + LOGGER.warn(exceptionMessage); + throw new SubscriptionException(exceptionMessage); + } + }); + } finally { + releaseReadLock(); + } + } + public void validateBeforeAlteringTopic(TopicMeta topicMeta) throws SubscriptionException { acquireReadLock(); try { @@ -475,6 +510,16 @@ public ConsumerGroupMeta deepCopyConsumerGroupMeta(String consumerGroupName) { } } + public boolean isTopicSubscribedByConsumerGroup( + final String topicName, final String consumerGroupId) { + acquireReadLock(); + try { + return consumerGroupMetaKeeper.isTopicSubscribedByConsumerGroup(topicName, consumerGroupId); + } finally { + releaseReadLock(); + } + } + public TSStatus alterConsumerGroup(AlterConsumerGroupPlan plan) { acquireWriteLock(); try { @@ -562,15 +607,18 @@ public void validateBeforeUnsubscribe(TUnsubscribeReq unsubscribeReq) private void checkBeforeUnsubscribeInternal(TUnsubscribeReq unsubscribeReq) throws SubscriptionException { // 1. Check if the consumer exists - if (!isConsumerExisted(unsubscribeReq.getConsumerGroupId(), unsubscribeReq.getConsumerId())) { - // There is no consumer with the same consumerId and consumerGroupId, - // we should end the procedure - final String exceptionMessage = - String.format( - "Failed to unsubscribe because the consumer %s in consumer group %s does not exist", - unsubscribeReq.getConsumerId(), unsubscribeReq.getConsumerGroupId()); - LOGGER.warn(exceptionMessage); - throw new SubscriptionException(exceptionMessage); + // NOTE: consumer id may be null if drop subscription by session + if (Objects.nonNull(unsubscribeReq.getConsumerId())) { + if (!isConsumerExisted(unsubscribeReq.getConsumerGroupId(), unsubscribeReq.getConsumerId())) { + // There is no consumer with the same consumerId and consumerGroupId, + // we should end the procedure + final String exceptionMessage = + String.format( + "Failed to unsubscribe because the consumer %s in consumer group %s does not exist", + unsubscribeReq.getConsumerId(), unsubscribeReq.getConsumerGroupId()); + LOGGER.warn(exceptionMessage); + throw new SubscriptionException(exceptionMessage); + } } // 2. Check if all topics exist. No need to check if already subscribed. @@ -597,16 +645,28 @@ public DataSet showSubscriptions() { } private List getAllSubscriptionMeta() { + return getAllSubscriptionMetaInternal(null); + } + + private List getAllSubscriptionMetaInternal( + @Nullable Predicate predicate) { List allSubscriptions = new ArrayList<>(); for (TopicMeta topicMeta : topicMetaKeeper.getAllTopicMeta()) { - for (String consumerGroupId : topicMeta.getSubscribedConsumerGroupIds()) { + if (Objects.nonNull(predicate) && !predicate.test(topicMeta)) { + continue; + } + for (String consumerGroupId : + consumerGroupMetaKeeper.getSubscribedConsumerGroupIds(topicMeta.getTopicName())) { Set subscribedConsumerIDs = consumerGroupMetaKeeper.getConsumersSubscribingTopic( consumerGroupId, topicMeta.getTopicName()); + Optional creationTime = + consumerGroupMetaKeeper.getSubscriptionCreationTime( + consumerGroupId, topicMeta.getTopicName()); if (!subscribedConsumerIDs.isEmpty()) { allSubscriptions.add( new SubscriptionMeta( - topicMeta.getTopicName(), consumerGroupId, subscribedConsumerIDs)); + topicMeta, consumerGroupId, subscribedConsumerIDs, creationTime.orElse(null))); } } } @@ -619,6 +679,24 @@ public List getAllConsumerGroupMeta() { .collect(Collectors.toList()); } + public Optional> parseSubscriptionId(String subscriptionId) { + acquireReadLock(); + try { + List allSubscriptions = getAllSubscriptionMetaInternal(null); + for (SubscriptionMeta subscriptionMeta : allSubscriptions) { + if (Objects.equals(subscriptionId, subscriptionMeta.getSubscriptionId())) { + return Optional.of( + new Pair<>( + subscriptionMeta.getTopicMeta().getTopicName(), + subscriptionMeta.getConsumerGroupId())); + } + } + return Optional.empty(); + } finally { + releaseReadLock(); + } + } + ///////////////////////////////// Snapshot ///////////////////////////////// @Override diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/CompletedProcedureRecycler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/CompletedProcedureRecycler.java index ccf3afeb4e645..cea15a1bfb687 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/CompletedProcedureRecycler.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/CompletedProcedureRecycler.java @@ -31,7 +31,7 @@ /** Internal cleaner that removes the completed procedure results after a TTL. */ public class CompletedProcedureRecycler extends InternalProcedure { private static final Logger LOG = LoggerFactory.getLogger(CompletedProcedureRecycler.class); - private static final int DEFAULT_BATCH_SIZE = 32; + private static final int DEFAULT_BATCH_SIZE = 8; private final long evictTTL; private final Map> completed; private final IProcedureStore store; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/InternalProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/InternalProcedure.java index 817a8e381c388..f9ff68bde7ebd 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/InternalProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/InternalProcedure.java @@ -19,9 +19,6 @@ package org.apache.iotdb.confignode.procedure; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; - import java.io.IOException; import java.nio.ByteBuffer; @@ -38,8 +35,7 @@ protected InternalProcedure(long toMillis) { protected abstract void periodicExecute(final Env env); @Override - protected Procedure[] execute(Env env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + protected Procedure[] execute(Env env) throws InterruptedException { throw new UnsupportedOperationException(); } @@ -48,11 +44,6 @@ protected void rollback(Env env) throws IOException, InterruptedException { throw new UnsupportedOperationException(); } - @Override - protected boolean abort(Env env) { - throw new UnsupportedOperationException(); - } - @Override public void deserialize(ByteBuffer byteBuffer) {} } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/PartitionTableAutoCleaner.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/PartitionTableAutoCleaner.java new file mode 100644 index 0000000000000..8a08e5efe99e8 --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/PartitionTableAutoCleaner.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.procedure; + +import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; +import org.apache.iotdb.commons.conf.CommonConfig; +import org.apache.iotdb.commons.conf.CommonDescriptor; +import org.apache.iotdb.confignode.consensus.request.write.partition.AutoCleanPartitionTablePlan; +import org.apache.iotdb.confignode.manager.ConfigManager; +import org.apache.iotdb.consensus.exception.ConsensusException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +import static org.apache.iotdb.confignode.manager.partition.PartitionManager.CONSENSUS_WRITE_ERROR; + +/** A cleaner that automatically deletes the expired mapping within the partition table. */ +public class PartitionTableAutoCleaner extends InternalProcedure { + + private static final Logger LOGGER = LoggerFactory.getLogger(PartitionTableAutoCleaner.class); + + private static final CommonConfig COMMON_CONFIG = CommonDescriptor.getInstance().getConfig(); + + private static final String timestampPrecision = + CommonDescriptor.getInstance().getConfig().getTimestampPrecision(); + + private final ConfigManager configManager; + + public PartitionTableAutoCleaner(ConfigManager configManager) { + super(COMMON_CONFIG.getTTLCheckInterval()); + this.configManager = configManager; + LOGGER.info( + "[PartitionTableCleaner] The PartitionTableAutoCleaner is started with cycle={}ms", + COMMON_CONFIG.getTTLCheckInterval()); + } + + @Override + protected void periodicExecute(Env env) { + List databases = configManager.getClusterSchemaManager().getDatabaseNames(); + Map databaseTTLMap = new TreeMap<>(); + for (String database : databases) { + long databaseTTL = configManager.getTTLManager().getDatabaseMaxTTL(database); + databaseTTLMap.put(database, databaseTTL); + } + LOGGER.info( + "[PartitionTableCleaner] Periodically activate PartitionTableAutoCleaner, databaseTTL: {}", + databaseTTLMap); + for (String database : databases) { + long databaseTTL = databaseTTLMap.get(database); + if (!configManager.getPartitionManager().isDatabaseExist(database) + || databaseTTL < 0 + || databaseTTL == Long.MAX_VALUE) { + // Remove the entry if the database or the TTL does not exist + databaseTTLMap.remove(database); + } + } + if (!databaseTTLMap.isEmpty()) { + LOGGER.info( + "[PartitionTableCleaner] Periodically activate PartitionTableAutoCleaner for: {}", + databaseTTLMap); + // Only clean the partition table when necessary + TTimePartitionSlot currentTimePartitionSlot = getCurrentTimePartitionSlot(); + try { + configManager + .getConsensusManager() + .write(new AutoCleanPartitionTablePlan(databaseTTLMap, currentTimePartitionSlot)); + } catch (ConsensusException e) { + LOGGER.warn(CONSENSUS_WRITE_ERROR, e); + } + } + } + + /** + * @return The time partition slot corresponding to current timestamp. Note that we do not shift + * the start time to the correct starting point, since this interface only constructs a time + * reference position for the partition table cleaner. + */ + private static TTimePartitionSlot getCurrentTimePartitionSlot() { + if ("ms".equals(timestampPrecision)) { + return new TTimePartitionSlot(System.currentTimeMillis()); + } else if ("us".equals(timestampPrecision)) { + return new TTimePartitionSlot(System.currentTimeMillis() * 1000); + } else { + return new TTimePartitionSlot(System.currentTimeMillis() * 1000_000); + } + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/Procedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/Procedure.java index 7566087df0aa5..91af03d3971be 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/Procedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/Procedure.java @@ -20,11 +20,7 @@ package org.apache.iotdb.confignode.procedure; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; -import org.apache.iotdb.confignode.procedure.exception.ProcedureAbortedException; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureTimeoutException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.apache.iotdb.confignode.procedure.state.ProcedureLockState; import org.apache.iotdb.confignode.procedure.state.ProcedureState; import org.apache.iotdb.confignode.procedure.store.IProcedureStore; @@ -34,7 +30,6 @@ import java.io.DataOutputStream; import java.io.IOException; -import java.lang.reflect.InvocationTargetException; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.ArrayList; @@ -70,20 +65,6 @@ public abstract class Procedure implements Comparable> { private int[] stackIndexes = null; - private boolean persist = true; - - public boolean needPersistance() { - return this.persist; - } - - public void resetPersistance() { - this.persist = true; - } - - public final void skipPersistance() { - this.persist = false; - } - public final boolean hasLock() { return locked; } @@ -97,14 +78,9 @@ public final boolean hasLock() { * @param env the environment passed to the ProcedureExecutor * @return a set of sub-procedures to run or ourselves if there is more work to do or null if the * procedure is done. - * @throws ProcedureYieldException the procedure will be added back to the queue and retried - * later. * @throws InterruptedException the procedure will be added back to the queue and retried later. - * @throws ProcedureSuspendedException Signal to the executor that Procedure has suspended itself - * and has set itself up waiting for an external event to wake it back up again. */ - protected abstract Procedure[] execute(Env env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException; + protected abstract Procedure[] execute(Env env) throws InterruptedException; /** * The code to undo what was done by the execute() code. It is called when the procedure or one of @@ -119,17 +95,6 @@ protected abstract Procedure[] execute(Env env) protected abstract void rollback(Env env) throws IOException, InterruptedException, ProcedureException; - /** - * The abort() call is asynchronous and each procedure must decide how to deal with it, if they - * want to be abortable. The simplest implementation is to have an AtomicBoolean set in the - * abort() method and then the execute() will check if the abort flag is set or not. abort() may - * be called multiple times from the client, so the implementation must be idempotent. - * - *

NOTE: abort() is not like Thread.interrupt(). It is just a notification that allows the - * procedure implementor abort. - */ - protected abstract boolean abort(Env env); - public void serialize(DataOutputStream stream) throws IOException { // procid stream.writeLong(this.procId); @@ -155,11 +120,14 @@ public void serialize(DataOutputStream stream) throws IOException { // exceptions if (hasException()) { + // symbol of exception stream.write((byte) 1); + // exception's name String exceptionClassName = exception.getClass().getName(); byte[] exceptionClassNameBytes = exceptionClassName.getBytes(StandardCharsets.UTF_8); stream.writeInt(exceptionClassNameBytes.length); stream.write(exceptionClassNameBytes); + // exception's message String message = this.exception.getMessage(); if (message != null) { byte[] messageBytes = message.getBytes(StandardCharsets.UTF_8); @@ -169,6 +137,7 @@ public void serialize(DataOutputStream stream) throws IOException { stream.writeInt(-1); } } else { + // symbol of no exception stream.write((byte) 0); } @@ -206,9 +175,10 @@ public void deserialize(ByteBuffer byteBuffer) { } this.setStackIndexes(indexList); } - // exceptions + + // exception if (byteBuffer.get() == 1) { - Class exceptionClass = deserializeTypeInfo(byteBuffer); + deserializeTypeInfoForCompatibility(byteBuffer); int messageBytesLength = byteBuffer.getInt(); String errMsg = null; if (messageBytesLength > 0) { @@ -216,19 +186,7 @@ public void deserialize(ByteBuffer byteBuffer) { byteBuffer.get(messageBytes); errMsg = new String(messageBytes, StandardCharsets.UTF_8); } - ProcedureException exception; - try { - exception = - (ProcedureException) exceptionClass.getConstructor(String.class).newInstance(errMsg); - } catch (InstantiationException - | IllegalAccessException - | InvocationTargetException - | NoSuchMethodException e) { - LOG.warn("Instantiation exception class failed", e); - exception = new ProcedureException(errMsg); - } - - setFailure(exception); + setFailure(new ProcedureException(errMsg)); } // result @@ -237,7 +195,7 @@ public void deserialize(ByteBuffer byteBuffer) { byte[] resultArr = new byte[resultLen]; byteBuffer.get(resultArr); } - // has lock + // has lock if (byteBuffer.get() == 1) { this.lockedWhenLoading(); } @@ -249,40 +207,11 @@ public void deserialize(ByteBuffer byteBuffer) { * @param byteBuffer bytebuffer * @return Procedure */ - public static Class deserializeTypeInfo(ByteBuffer byteBuffer) { + @Deprecated + public static void deserializeTypeInfoForCompatibility(ByteBuffer byteBuffer) { int classNameBytesLen = byteBuffer.getInt(); byte[] classNameBytes = new byte[classNameBytesLen]; byteBuffer.get(classNameBytes); - String className = new String(classNameBytes, StandardCharsets.UTF_8); - Class clazz; - try { - clazz = Class.forName(className); - } catch (ClassNotFoundException e) { - throw new RuntimeException("Invalid procedure class", e); - } - return clazz; - } - - public static Procedure newInstance(ByteBuffer byteBuffer) { - Class procedureClass = deserializeTypeInfo(byteBuffer); - Procedure procedure; - try { - procedure = (Procedure) procedureClass.newInstance(); - } catch (InstantiationException | IllegalAccessException e) { - throw new RuntimeException("Instantiation failed", e); - } - return procedure; - } - - /** - * The {@link #doAcquireLock(Object, IProcedureStore)} will be split into two steps, first, it - * will call us to determine whether we need to wait for initialization, second, it will call - * {@link #acquireLock(Object)} to actually handle the lock for this procedure. - * - * @return true means we need to wait until the environment has been initialized, otherwise true. - */ - protected boolean waitInitialized(Env env) { - return false; } /** @@ -314,34 +243,6 @@ protected boolean holdLock(Env env) { return false; } - /** - * Called before the procedure is recovered and added into the queue. - * - * @param env environment - */ - protected final void beforeRecover(Env env) { - // no op - } - - /** - * Called when the procedure is recovered and added into the queue. - * - * @param env environment - */ - protected final void afterRecover(Env env) { - // no op - } - - /** - * Called when the procedure is completed (success or rollback). The procedure may use this method - * to clean up in-memory states. This operation will not be retried on failure. - * - * @param env environment - */ - protected void completionCleanup(Env env) { - // no op - } - /** * To make executor yield between each execution step to give other procedures a chance to run. * @@ -359,8 +260,7 @@ protected boolean isYieldAfterExecution(Env env) { * @param env execute environment * @return sub procedures */ - protected Procedure[] doExecute(Env env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + protected Procedure[] doExecute(Env env) throws InterruptedException { try { updateTimestamp(); return execute(env); @@ -393,9 +293,6 @@ public void doRollback(Env env) throws IOException, InterruptedException, Proced * @return ProcedureLockState */ public final ProcedureLockState doAcquireLock(Env env, IProcedureStore store) { - if (waitInitialized(env)) { - return ProcedureLockState.LOCK_EVENT_WAIT; - } if (lockedWhenLoading) { lockedWhenLoading = false; locked = true; @@ -754,20 +651,9 @@ protected synchronized void setFailure(final ProcedureException exception) { } } - protected void setAbortFailure(final String source, final String msg) { - setFailure(source, new ProcedureAbortedException(msg)); - } - /** * Called by the ProcedureExecutor when the timeout set by setTimeout() is expired. * - *

Another usage for this method is to implement retrying. A procedure can set the state to - * {@code WAITING_TIMEOUT} by calling {@code setState} method, and throw a {@link - * ProcedureSuspendedException} to halt the execution of the procedure, and do not forget a call - * {@link #setTimeout(long)} method to set the timeout. And you should also override this method - * to wake up the procedure, and also return false to tell the ProcedureExecutor that the timeout - * event has been handled. - * * @return true to let the framework handle the timeout as abort, false in case the procedure * handled the timeout itself. */ @@ -776,7 +662,7 @@ protected synchronized boolean setTimeoutFailure(Env env) { long timeDiff = System.currentTimeMillis() - lastUpdate; setFailure( "ProcedureExecutor", - new ProcedureTimeoutException("Operation timed out after " + timeDiff + " ms.")); + new ProcedureException("Operation timed out after " + timeDiff + " ms.")); return true; } return false; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/ProcedureExecutor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/ProcedureExecutor.java index 65dfe8cc3a1e3..781dcdd030ec0 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/ProcedureExecutor.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/ProcedureExecutor.java @@ -21,9 +21,8 @@ import org.apache.iotdb.commons.concurrent.ThreadName; import org.apache.iotdb.commons.utils.TestOnly; +import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.apache.iotdb.confignode.procedure.scheduler.ProcedureScheduler; import org.apache.iotdb.confignode.procedure.scheduler.SimpleProcedureScheduler; import org.apache.iotdb.confignode.procedure.state.ProcedureLockState; @@ -41,6 +40,7 @@ import java.util.Deque; import java.util.HashSet; import java.util.List; +import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; @@ -127,8 +127,8 @@ private void recover() { if (!proc.hasParent()) { rollbackStack.put(proc.getProcId(), new RootProcedureStack<>()); } + procedures.putIfAbsent(proc.getProcId(), proc); } - procedures.putIfAbsent(proc.getProcId(), proc); } List> runnableList = new ArrayList<>(); List> failedList = new ArrayList<>(); @@ -184,24 +184,14 @@ private void recover() { // executing, we need to set its state to RUNNABLE. procedure.setState(ProcedureState.RUNNABLE); runnableList.add(procedure); - } else { - procedure.afterRecover(environment); } }); restoreLocks(); - waitingTimeoutList.forEach( - procedure -> { - procedure.afterRecover(environment); - timeoutExecutor.add(procedure); - }); + waitingTimeoutList.forEach(timeoutExecutor::add); failedList.forEach(scheduler::addBack); - runnableList.forEach( - procedure -> { - procedure.afterRecover(environment); - scheduler.addBack(procedure); - }); + runnableList.forEach(scheduler::addBack); scheduler.signalAll(); } @@ -287,7 +277,7 @@ public void startCompletedCleaner(long cleanTimeInterval, long cleanEvictTTL) { new CompletedProcedureRecycler(store, completed, cleanTimeInterval, cleanEvictTTL)); } - private void addInternalProcedure(InternalProcedure interalProcedure) { + public void addInternalProcedure(InternalProcedure interalProcedure) { if (interalProcedure == null) { return; } @@ -329,68 +319,79 @@ private void executeProcedure(Procedure proc) { LOG.warn("Rollback stack is null for {}", proc.getProcId()); return; } - do { - if (!rootProcStack.acquire()) { - if (rootProcStack.setRollback()) { - switch (executeRootStackRollback(rootProcId, rootProcStack)) { - case LOCK_ACQUIRED: - break; - case LOCK_EVENT_WAIT: - LOG.info("LOCK_EVENT_WAIT rollback " + proc); - rootProcStack.unsetRollback(); - break; - case LOCK_YIELD_WAIT: - rootProcStack.unsetRollback(); - scheduler.yield(proc); - break; - default: - throw new UnsupportedOperationException(); - } - } else { - if (!proc.wasExecuted()) { - switch (executeRollback(proc)) { + ProcedureLockState lockState = null; + try { + do { + if (!rootProcStack.acquire()) { + if (rootProcStack.setRollback()) { + lockState = executeRootStackRollback(rootProcId, rootProcStack); + switch (lockState) { case LOCK_ACQUIRED: break; case LOCK_EVENT_WAIT: - LOG.info("LOCK_EVENT_WAIT can't rollback child running for {}", proc); + LOG.info("LOCK_EVENT_WAIT rollback {}", proc); + rootProcStack.unsetRollback(); break; case LOCK_YIELD_WAIT: + rootProcStack.unsetRollback(); scheduler.yield(proc); break; default: throw new UnsupportedOperationException(); } + } else { + if (!proc.wasExecuted()) { + switch (executeRollback(proc)) { + case LOCK_ACQUIRED: + break; + case LOCK_EVENT_WAIT: + LOG.info("LOCK_EVENT_WAIT can't rollback child running for {}", proc); + break; + case LOCK_YIELD_WAIT: + scheduler.yield(proc); + break; + default: + throw new UnsupportedOperationException(); + } + } } - } - break; - } - ProcedureLockState lockState = acquireLock(proc); - switch (lockState) { - case LOCK_ACQUIRED: - executeProcedure(rootProcStack, proc); - break; - case LOCK_YIELD_WAIT: - case LOCK_EVENT_WAIT: - LOG.info("{} lockstate is {}", proc, lockState); break; - default: - throw new UnsupportedOperationException(); - } - rootProcStack.release(); - - if (proc.isSuccess()) { - // update metrics on finishing the procedure - proc.updateMetricsOnFinish(getEnvironment(), proc.elapsedTime(), true); - LOG.debug("{} finished in {}ms successfully.", proc, proc.elapsedTime()); - if (proc.getProcId() == rootProcId) { - rootProcedureCleanup(proc); - } else { - executeCompletionCleanup(proc); } - return; - } + lockState = acquireLock(proc); + switch (lockState) { + case LOCK_ACQUIRED: + executeProcedure(rootProcStack, proc); + break; + case LOCK_YIELD_WAIT: + case LOCK_EVENT_WAIT: + LOG.info("{} lockstate is {}", proc, lockState); + break; + default: + throw new UnsupportedOperationException(); + } + rootProcStack.release(); + + if (proc.isSuccess()) { + // update metrics on finishing the procedure + proc.updateMetricsOnFinish(getEnvironment(), proc.elapsedTime(), true); + LOG.debug("{} finished in {}ms successfully.", proc, proc.elapsedTime()); + if (proc.getProcId() == rootProcId) { + rootProcedureCleanup(proc); + } else { + executeCompletionCleanup(proc); + } + return; + } - } while (rootProcStack.isFailed()); + } while (rootProcStack.isFailed()); + } finally { + // Only after procedure has completed execution can it be allowed to be rescheduled to prevent + // data races + if (Objects.equals(lockState, ProcedureLockState.LOCK_EVENT_WAIT)) { + LOG.info("procedureId {} wait for lock.", proc.getProcId()); + ((ConfigNodeProcedureEnv) this.environment).getNodeLock().waitProcedure(proc); + } + } } /** @@ -400,26 +401,21 @@ private void executeProcedure(Procedure proc) { * @param proc procedure */ private void executeProcedure(RootProcedureStack rootProcStack, Procedure proc) { - Preconditions.checkArgument( - proc.getState() == ProcedureState.RUNNABLE, "NOT RUNNABLE! " + proc); - boolean suspended = false; + if (proc.getState() != ProcedureState.RUNNABLE) { + LOG.error( + "The executing procedure should in RUNNABLE state, but it's not. Procedure is {}", proc); + return; + } boolean reExecute; Procedure[] subprocs = null; do { reExecute = false; - proc.resetPersistance(); try { subprocs = proc.doExecute(this.environment); if (subprocs != null && subprocs.length == 0) { subprocs = null; } - } catch (ProcedureSuspendedException e) { - LOG.debug("Suspend {}", proc); - suspended = true; - } catch (ProcedureYieldException e) { - LOG.debug("Yield {}", proc); - yieldProcedure(proc); } catch (InterruptedException e) { LOG.warn("Interrupt during execution, suspend or retry it later.", e); yieldProcedure(proc); @@ -439,22 +435,20 @@ private void executeProcedure(RootProcedureStack rootProcStack, Procedure p } } else if (proc.getState() == ProcedureState.WAITING_TIMEOUT) { LOG.info("Added into timeoutExecutor {}", proc); - } else if (!suspended) { + } else { proc.setState(ProcedureState.SUCCESS); } } // add procedure into rollback stack. rootProcStack.addRollbackStep(proc); - if (proc.needPersistance()) { - updateStoreOnExecution(rootProcStack, proc, subprocs); - } + updateStoreOnExecution(rootProcStack, proc, subprocs); if (!store.isRunning()) { return; } - if (proc.isRunnable() && !suspended && proc.isYieldAfterExecution(this.environment)) { + if (proc.isRunnable() && proc.isYieldAfterExecution(this.environment)) { yieldProcedure(proc); return; } @@ -465,7 +459,7 @@ private void executeProcedure(RootProcedureStack rootProcStack, Procedure p } releaseLock(proc, false); - if (!suspended && proc.isFinished() && proc.hasParent()) { + if (proc.isFinished() && proc.hasParent()) { countDownChildren(rootProcStack, proc); } } @@ -502,6 +496,7 @@ private void submitChildrenProcedures(Procedure[] subprocs) { subproc.updateMetricsOnSubmit(getEnvironment()); procedures.put(subproc.getProcId(), subproc); scheduler.addFront(subproc); + LOG.info("Sub-Procedure pid={} has been submitted", subproc.getProcId()); } } @@ -677,11 +672,6 @@ private void executeCompletionCleanup(Procedure proc) { if (proc.hasLock()) { releaseLock(proc, true); } - try { - proc.completionCleanup(this.environment); - } catch (Throwable e) { - LOG.error("CODE-BUG:Uncaught runtime exception for procedure {}", proc, e); - } } private void rootProcedureCleanup(Procedure proc) { @@ -715,7 +705,7 @@ private class WorkerThread extends StoppableThread { protected long keepAliveTime = -1; public WorkerThread(ThreadGroup threadGroup) { - this(threadGroup, "ProcExecWorker-"); + this(threadGroup, "ProcedureCoreWorker-"); } public WorkerThread(ThreadGroup threadGroup, String prefix) { @@ -735,26 +725,28 @@ public void run() { while (isRunning() && keepAlive(lastUpdated)) { Procedure procedure = scheduler.poll(keepAliveTime, TimeUnit.MILLISECONDS); if (procedure == null) { + Thread.sleep(1000); continue; } this.activeProcedure.set(procedure); - int activeCount = activeExecutorCount.incrementAndGet(); + activeExecutorCount.incrementAndGet(); startTime.set(System.currentTimeMillis()); executeProcedure(procedure); - activeCount = activeExecutorCount.decrementAndGet(); - LOG.trace("Halt pid={}, activeCount={}", procedure.getProcId(), activeCount); + activeExecutorCount.decrementAndGet(); + LOG.trace( + "Halt pid={}, activeCount={}", procedure.getProcId(), activeExecutorCount.get()); this.activeProcedure.set(null); lastUpdated = System.currentTimeMillis(); startTime.set(lastUpdated); } - } catch (Throwable throwable) { + } catch (Exception e) { if (this.activeProcedure.get() != null) { LOG.warn( - "Procedure Worker {} terminated {}", + "Exception happened when worker {} execute procedure {}", getName(), this.activeProcedure.get(), - throwable); + e); } } finally { LOG.info("Procedure worker {} terminated.", getName()); @@ -780,12 +772,12 @@ public long getCurrentRunTime() { } } - // A worker thread which can be added when core workers are stuck. Will timeout after - // keepAliveTime if there is no procedure to run. - private final class KeepAliveWorkerThread extends WorkerThread { + // A temporary worker thread will be launched when too many core workers are stuck. + // They will timeout after keepAliveTime if there is no procedure to run. + private final class TemporaryWorkerThread extends WorkerThread { - public KeepAliveWorkerThread(ThreadGroup group) { - super(group, "KAProcExecWorker-"); + public TemporaryWorkerThread(ThreadGroup group) { + super(group, "ProcedureTemporaryWorker-"); this.keepAliveTime = TimeUnit.SECONDS.toMillis(10); } @@ -807,22 +799,25 @@ public WorkerMonitor() { updateTimestamp(); } - private int checkForStuckWorkers() { + private int calculateRunningAndStuckWorkers() { // Check if any of the worker is stuck - int stuckCount = 0; + int runningCount = 0, stuckCount = 0; for (WorkerThread worker : workerThreads) { - if (worker.activeProcedure.get() == null - || worker.getCurrentRunTime() < DEFAULT_WORKER_STUCK_THRESHOLD) { + if (worker.activeProcedure.get() == null) { continue; } - + runningCount++; // WARN the worker is stuck - stuckCount++; - LOG.warn( - "Worker stuck {}({}), run time {} ms", - worker, - worker.activeProcedure.get().getProcType(), - worker.getCurrentRunTime()); + if (worker.getCurrentRunTime() < DEFAULT_WORKER_STUCK_THRESHOLD) { + stuckCount++; + LOG.warn( + "Worker stuck {}({}), run time {} ms", + worker, + worker.activeProcedure.get().getProcType(), + worker.getCurrentRunTime()); + } + LOG.info( + "Procedure workers: {} is running, {} is running and stuck", runningCount, stuckCount); } return stuckCount; } @@ -838,7 +833,7 @@ private void checkThreadCount(final int stuckCount) { // Let's add new worker thread more aggressively, as they will timeout finally if there is no // work to do. if (stuckPerc >= DEFAULT_WORKER_ADD_STUCK_PERCENTAGE && workerThreads.size() < maxPoolSize) { - final KeepAliveWorkerThread worker = new KeepAliveWorkerThread(threadGroup); + final TemporaryWorkerThread worker = new TemporaryWorkerThread(threadGroup); workerThreads.add(worker); worker.start(); LOG.debug("Added new worker thread {}", worker); @@ -847,7 +842,7 @@ private void checkThreadCount(final int stuckCount) { @Override protected void periodicExecute(Env env) { - final int stuckCount = checkForStuckWorkers(); + final int stuckCount = calculateRunningAndStuckWorkers(); checkThreadCount(stuckCount); updateTimestamp(); } @@ -926,52 +921,6 @@ public long submitProcedure(Procedure procedure) { return pushProcedure(procedure); } - /** - * Abort a specified procedure. - * - * @param procId procedure id - * @param force whether abort the running procdure. - * @return true if the procedure exists and has received the abort. - */ - public boolean abort(long procId, boolean force) { - Procedure procedure = procedures.get(procId); - if (procedure != null) { - if (!force && procedure.wasExecuted()) { - return false; - } - return procedure.abort(this.environment); - } - return false; - } - - public boolean abort(long procId) { - return abort(procId, true); - } - - public Procedure getResult(long procId) { - CompletedProcedureContainer retainer = completed.get(procId); - if (retainer == null) { - return null; - } else { - return retainer.getProcedure(); - } - } - - /** - * Query a procedure result. - * - * @param procId procedure id - * @return procedure or retainer - */ - public Procedure getResultOrProcedure(long procId) { - CompletedProcedureContainer retainer = completed.get(procId); - if (retainer == null) { - return procedures.get(procId); - } else { - return retainer.getProcedure(); - } - } - public ProcedureScheduler getScheduler() { return scheduler; } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/TimeoutExecutorThread.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/TimeoutExecutorThread.java index d4f919c01be3e..5aaf9a623f523 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/TimeoutExecutorThread.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/TimeoutExecutorThread.java @@ -100,7 +100,7 @@ public long getDelay(TimeUnit unit) { @Override public int compareTo(Delayed other) { - return Long.compareUnsigned( + return Long.compare( this.getDelay(TimeUnit.MILLISECONDS), other.getDelay(TimeUnit.MILLISECONDS)); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/ConfigNodeProcedureEnv.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/ConfigNodeProcedureEnv.java index c2040a05328a5..6914f8aacf56b 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/ConfigNodeProcedureEnv.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/ConfigNodeProcedureEnv.java @@ -29,13 +29,14 @@ import org.apache.iotdb.commons.cluster.NodeStatus; import org.apache.iotdb.commons.cluster.NodeType; import org.apache.iotdb.commons.cluster.RegionStatus; +import org.apache.iotdb.commons.pipe.agent.plugin.meta.PipePluginMeta; import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.pipe.plugin.meta.PipePluginMeta; import org.apache.iotdb.commons.trigger.TriggerInformation; import org.apache.iotdb.confignode.client.CnToCnNodeRequestType; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; +import org.apache.iotdb.confignode.client.sync.CnToDnSyncRequestType; import org.apache.iotdb.confignode.client.sync.SyncConfigNodeClientPool; import org.apache.iotdb.confignode.client.sync.SyncDataNodeClientPool; import org.apache.iotdb.confignode.consensus.request.write.confignode.RemoveConfigNodePlan; @@ -52,7 +53,6 @@ import org.apache.iotdb.confignode.manager.node.NodeManager; import org.apache.iotdb.confignode.manager.partition.PartitionManager; import org.apache.iotdb.confignode.manager.schema.ClusterSchemaManager; -import org.apache.iotdb.confignode.persistence.node.NodeInfo; import org.apache.iotdb.confignode.persistence.partition.PartitionInfo; import org.apache.iotdb.confignode.persistence.schema.ClusterSchemaInfo; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; @@ -61,6 +61,7 @@ import org.apache.iotdb.confignode.rpc.thrift.TAddConsensusGroupReq; import org.apache.iotdb.confignode.rpc.thrift.TNodeVersionInfo; import org.apache.iotdb.consensus.exception.ConsensusException; +import org.apache.iotdb.db.protocol.client.ConfigNodeInfo; import org.apache.iotdb.mpp.rpc.thrift.TActiveTriggerInstanceReq; import org.apache.iotdb.mpp.rpc.thrift.TCreateDataRegionReq; import org.apache.iotdb.mpp.rpc.thrift.TCreatePipePluginInstanceReq; @@ -70,6 +71,7 @@ import org.apache.iotdb.mpp.rpc.thrift.TDropTriggerInstanceReq; import org.apache.iotdb.mpp.rpc.thrift.TInactiveTriggerInstanceReq; import org.apache.iotdb.mpp.rpc.thrift.TInvalidateCacheReq; +import org.apache.iotdb.mpp.rpc.thrift.TNotifyRegionMigrationReq; import org.apache.iotdb.mpp.rpc.thrift.TPushConsumerGroupMetaReq; import org.apache.iotdb.mpp.rpc.thrift.TPushConsumerGroupMetaResp; import org.apache.iotdb.mpp.rpc.thrift.TPushMultiPipeMetaReq; @@ -110,18 +112,23 @@ public class ConfigNodeProcedureEnv { private final ReentrantLock schedulerLock = new ReentrantLock(true); + private final ReentrantLock submitRegionMigrateLock = new ReentrantLock(true); + private final ConfigManager configManager; private final ProcedureScheduler scheduler; private final RegionMaintainHandler regionMaintainHandler; + private final RemoveDataNodeHandler removeDataNodeHandler; + private final ReentrantLock removeConfigNodeLock; public ConfigNodeProcedureEnv(ConfigManager configManager, ProcedureScheduler scheduler) { this.configManager = configManager; this.scheduler = scheduler; this.regionMaintainHandler = new RegionMaintainHandler(configManager); + this.removeDataNodeHandler = new RemoveDataNodeHandler(configManager); this.removeConfigNodeLock = new ReentrantLock(); } @@ -186,7 +193,7 @@ public boolean invalidateCache(String storageGroupName) throws IOException, TExc .sendSyncRequestToDataNodeWithRetry( dataNodeConfiguration.getLocation().getInternalEndPoint(), invalidateCacheReq, - CnToDnRequestType.INVALIDATE_PARTITION_CACHE); + CnToDnSyncRequestType.INVALIDATE_PARTITION_CACHE); final TSStatus invalidateSchemaStatus = (TSStatus) @@ -194,7 +201,7 @@ public boolean invalidateCache(String storageGroupName) throws IOException, TExc .sendSyncRequestToDataNodeWithRetry( dataNodeConfiguration.getLocation().getInternalEndPoint(), invalidateCacheReq, - CnToDnRequestType.INVALIDATE_SCHEMA_CACHE); + CnToDnSyncRequestType.INVALIDATE_SCHEMA_CACHE); if (!verifySucceed(invalidatePartitionStatus, invalidateSchemaStatus)) { LOG.error( @@ -217,21 +224,6 @@ public boolean verifySucceed(TSStatus... status) { .allMatch(tsStatus -> tsStatus.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()); } - public boolean checkEnoughDataNodeAfterRemoving(TDataNodeLocation removedDatanode) { - final int existedDataNodeNum = - getNodeManager() - .filterDataNodeThroughStatus( - NodeStatus.Running, NodeStatus.ReadOnly, NodeStatus.Removing) - .size(); - int dataNodeNumAfterRemoving; - if (getLoadManager().getNodeStatus(removedDatanode.getDataNodeId()) != NodeStatus.Unknown) { - dataNodeNumAfterRemoving = existedDataNodeNum - 1; - } else { - dataNodeNumAfterRemoving = existedDataNodeNum; - } - return dataNodeNumAfterRemoving >= NodeInfo.getMinimumDataNode(); - } - /** * Let the remotely new ConfigNode build the ConsensusGroup. * @@ -327,14 +319,15 @@ public void deleteConfigNodePeer(TConfigNodeLocation removedConfigNode) * @param tConfigNodeLocation config node location * @throws ProcedureException if failed status */ - public void stopConfigNode(TConfigNodeLocation tConfigNodeLocation) throws ProcedureException { + public void stopAndClearConfigNode(TConfigNodeLocation tConfigNodeLocation) + throws ProcedureException { TSStatus tsStatus = (TSStatus) SyncConfigNodeClientPool.getInstance() .sendSyncRequestToConfigNodeWithRetry( tConfigNodeLocation.getInternalEndPoint(), tConfigNodeLocation, - CnToCnNodeRequestType.STOP_CONFIG_NODE); + CnToCnNodeRequestType.STOP_AND_CLEAR_CONFIG_NODE); if (tsStatus.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { throw new ProcedureException(tsStatus.getMessage()); @@ -389,14 +382,14 @@ public void markDataNodeAsRemovingAndBroadcast(TDataNodeLocation dataNodeLocatio .sendSyncRequestToDataNodeWithGivenRetry( dataNodeLocation.getInternalEndPoint(), NodeStatus.Removing.getStatus(), - CnToDnRequestType.SET_SYSTEM_STATUS, + CnToDnSyncRequestType.SET_SYSTEM_STATUS, 1); } else { SyncDataNodeClientPool.getInstance() .sendSyncRequestToDataNodeWithRetry( dataNodeLocation.getInternalEndPoint(), NodeStatus.Removing.getStatus(), - CnToDnRequestType.SET_SYSTEM_STATUS); + CnToDnSyncRequestType.SET_SYSTEM_STATUS); } long currentTime = System.nanoTime(); @@ -472,7 +465,7 @@ public Map doRegionCreation( private DataNodeAsyncRequestContext getCreateSchemaRegionClientHandler(CreateRegionGroupsPlan createRegionGroupsPlan) { DataNodeAsyncRequestContext clientHandler = - new DataNodeAsyncRequestContext<>(CnToDnRequestType.CREATE_SCHEMA_REGION); + new DataNodeAsyncRequestContext<>(CnToDnAsyncRequestType.CREATE_SCHEMA_REGION); int requestId = 0; for (Map.Entry> sgRegionsEntry : @@ -495,7 +488,7 @@ public Map doRegionCreation( private DataNodeAsyncRequestContext getCreateDataRegionClientHandler(CreateRegionGroupsPlan createRegionGroupsPlan) { DataNodeAsyncRequestContext clientHandler = - new DataNodeAsyncRequestContext<>(CnToDnRequestType.CREATE_DATA_REGION); + new DataNodeAsyncRequestContext<>(CnToDnAsyncRequestType.CREATE_DATA_REGION); int requestId = 0; for (Map.Entry> sgRegionsEntry : @@ -531,6 +524,28 @@ private TCreateDataRegionReq genCreateDataRegionReq( return req; } + public List notifyRegionMigrationToAllDataNodes( + TConsensusGroupId consensusGroupId, boolean isStart) { + final Map dataNodeLocationMap = + configManager.getNodeManager().getRegisteredDataNodeLocations(); + final TNotifyRegionMigrationReq request = + new TNotifyRegionMigrationReq( + configManager + .getConsensusManager() + .getConsensusImpl() + .getLogicalClock(ConfigNodeInfo.CONFIG_REGION_ID), + System.nanoTime(), + configManager.getProcedureManager().getRegionOperationConsensusIds()); + request.setRegionId(consensusGroupId); + request.setIsStart(isStart); + + final DataNodeAsyncRequestContext clientHandler = + new DataNodeAsyncRequestContext<>( + CnToDnAsyncRequestType.NOTIFY_REGION_MIGRATION, request, dataNodeLocationMap); + CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); + return clientHandler.getResponseList(); + } + public void persistRegionGroup(CreateRegionGroupsPlan createRegionGroupsPlan) { // Persist the allocation result try { @@ -590,7 +605,7 @@ public List createTriggerOnDataNodes( DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.CREATE_TRIGGER_INSTANCE, request, dataNodeLocationMap); + CnToDnAsyncRequestType.CREATE_TRIGGER_INSTANCE, request, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); return clientHandler.getResponseList(); } @@ -604,7 +619,7 @@ public List dropTriggerOnDataNodes(String triggerName, boolean needToD DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.DROP_TRIGGER_INSTANCE, request, dataNodeLocationMap); + CnToDnAsyncRequestType.DROP_TRIGGER_INSTANCE, request, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); return clientHandler.getResponseList(); } @@ -617,7 +632,7 @@ public List activeTriggerOnDataNodes(String triggerName) { DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.ACTIVE_TRIGGER_INSTANCE, request, dataNodeLocationMap); + CnToDnAsyncRequestType.ACTIVE_TRIGGER_INSTANCE, request, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); return clientHandler.getResponseList(); } @@ -630,7 +645,7 @@ public List inactiveTriggerOnDataNodes(String triggerName) { DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.INACTIVE_TRIGGER_INSTANCE, request, dataNodeLocationMap); + CnToDnAsyncRequestType.INACTIVE_TRIGGER_INSTANCE, request, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); return clientHandler.getResponseList(); } @@ -644,7 +659,7 @@ public List createPipePluginOnDataNodes(PipePluginMeta pipePluginMeta, final DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.CREATE_PIPE_PLUGIN, request, dataNodeLocationMap); + CnToDnAsyncRequestType.CREATE_PIPE_PLUGIN, request, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); return clientHandler.getResponseList(); } @@ -658,7 +673,7 @@ public List dropPipePluginOnDataNodes( DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.DROP_PIPE_PLUGIN, request, dataNodeLocationMap); + CnToDnAsyncRequestType.DROP_PIPE_PLUGIN, request, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); return clientHandler.getResponseList(); } @@ -671,7 +686,7 @@ public Map pushAllPipeMetaToDataNodes( final DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.PIPE_PUSH_ALL_META, request, dataNodeLocationMap); + CnToDnAsyncRequestType.PIPE_PUSH_ALL_META, request, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance() .sendAsyncRequestToNodeWithRetryAndTimeoutInMs( clientHandler, @@ -686,7 +701,7 @@ public Map pushSinglePipeMetaToDataNodes(ByteBuffer final DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.PIPE_PUSH_SINGLE_META, request, dataNodeLocationMap); + CnToDnAsyncRequestType.PIPE_PUSH_SINGLE_META, request, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance() .sendAsyncRequestToNodeWithRetryAndTimeoutInMs( clientHandler, @@ -702,7 +717,7 @@ public Map dropSinglePipeOnDataNodes(String pipeName final DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.PIPE_PUSH_SINGLE_META, request, dataNodeLocationMap); + CnToDnAsyncRequestType.PIPE_PUSH_SINGLE_META, request, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance() .sendAsyncRequestToNodeWithRetryAndTimeoutInMs( clientHandler, @@ -719,7 +734,7 @@ public Map pushMultiPipeMetaToDataNodes( final DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.PIPE_PUSH_MULTI_META, request, dataNodeLocationMap); + CnToDnAsyncRequestType.PIPE_PUSH_MULTI_META, request, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance() .sendAsyncRequestToNodeWithRetryAndTimeoutInMs( clientHandler, @@ -735,7 +750,7 @@ public Map dropMultiPipeOnDataNodes(List pip final DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.PIPE_PUSH_MULTI_META, request, dataNodeLocationMap); + CnToDnAsyncRequestType.PIPE_PUSH_MULTI_META, request, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance() .sendAsyncRequestToNodeWithRetryAndTimeoutInMs( clientHandler, @@ -751,7 +766,7 @@ public Map pushAllTopicMetaToDataNodes( final DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.TOPIC_PUSH_ALL_META, request, dataNodeLocationMap); + CnToDnAsyncRequestType.TOPIC_PUSH_ALL_META, request, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance() .sendAsyncRequestToNodeWithRetryAndTimeoutInMs( clientHandler, @@ -766,7 +781,7 @@ public List pushSingleTopicOnDataNode(ByteBuffer topicMeta) { final DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.TOPIC_PUSH_SINGLE_META, request, dataNodeLocationMap); + CnToDnAsyncRequestType.TOPIC_PUSH_SINGLE_META, request, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); return clientHandler.getResponseList().stream() .map(TPushTopicMetaResp::getStatus) @@ -781,7 +796,7 @@ public List dropSingleTopicOnDataNode(String topicNameToDrop) { final DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.TOPIC_PUSH_SINGLE_META, request, dataNodeLocationMap); + CnToDnAsyncRequestType.TOPIC_PUSH_SINGLE_META, request, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); return clientHandler.getResponseList().stream() .map(TPushTopicMetaResp::getStatus) @@ -797,7 +812,7 @@ public Map pushMultiTopicMetaToDataNodes( final DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.TOPIC_PUSH_MULTI_META, request, dataNodeLocationMap); + CnToDnAsyncRequestType.TOPIC_PUSH_MULTI_META, request, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance() .sendAsyncRequestToNodeWithRetryAndTimeoutInMs( clientHandler, @@ -813,7 +828,7 @@ public Map dropMultiTopicOnDataNodes(List t final DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.TOPIC_PUSH_MULTI_META, request, dataNodeLocationMap); + CnToDnAsyncRequestType.TOPIC_PUSH_MULTI_META, request, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance() .sendAsyncRequestToNodeWithRetryAndTimeoutInMs( clientHandler, @@ -831,7 +846,7 @@ public Map pushAllConsumerGroupMetaToDataNo final DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.CONSUMER_GROUP_PUSH_ALL_META, request, dataNodeLocationMap); + CnToDnAsyncRequestType.CONSUMER_GROUP_PUSH_ALL_META, request, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance() .sendAsyncRequestToNodeWithRetryAndTimeoutInMs( clientHandler, @@ -848,7 +863,9 @@ public List pushSingleConsumerGroupOnDataNode(ByteBuffer consumerGroup final DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.CONSUMER_GROUP_PUSH_SINGLE_META, request, dataNodeLocationMap); + CnToDnAsyncRequestType.CONSUMER_GROUP_PUSH_SINGLE_META, + request, + dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); return clientHandler.getResponseList().stream() .map(TPushConsumerGroupMetaResp::getStatus) @@ -864,7 +881,9 @@ public List dropSingleConsumerGroupOnDataNode(String consumerGroupName final DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.CONSUMER_GROUP_PUSH_SINGLE_META, request, dataNodeLocationMap); + CnToDnAsyncRequestType.CONSUMER_GROUP_PUSH_SINGLE_META, + request, + dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); return clientHandler.getResponseList().stream() .map(TPushConsumerGroupMetaResp::getStatus) @@ -879,18 +898,22 @@ public ProcedureScheduler getScheduler() { return scheduler; } - public LockQueue getRegionMigrateLock() { - return regionMaintainHandler.getRegionMigrateLock(); - } - public ReentrantLock getSchedulerLock() { return schedulerLock; } + public ReentrantLock getSubmitRegionMigrateLock() { + return submitRegionMigrateLock; + } + public RegionMaintainHandler getRegionMaintainHandler() { return regionMaintainHandler; } + public RemoveDataNodeHandler getRemoveDataNodeHandler() { + return removeDataNodeHandler; + } + private ConsensusManager getConsensusManager() { return configManager.getConsensusManager(); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/RegionMaintainHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/RegionMaintainHandler.java index 7ef330f460f3b..c1b7ecdab934c 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/RegionMaintainHandler.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/RegionMaintainHandler.java @@ -32,28 +32,20 @@ import org.apache.iotdb.commons.client.sync.SyncDataNodeInternalServiceClient; import org.apache.iotdb.commons.cluster.NodeStatus; import org.apache.iotdb.commons.cluster.RegionStatus; -import org.apache.iotdb.commons.service.metric.MetricService; import org.apache.iotdb.commons.utils.CommonDateTimeUtils; -import org.apache.iotdb.commons.utils.NodeUrlUtils; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; +import org.apache.iotdb.confignode.client.sync.CnToDnSyncRequestType; import org.apache.iotdb.confignode.client.sync.SyncDataNodeClientPool; import org.apache.iotdb.confignode.conf.ConfigNodeConfig; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; -import org.apache.iotdb.confignode.consensus.request.write.datanode.RemoveDataNodePlan; import org.apache.iotdb.confignode.consensus.request.write.partition.AddRegionLocationPlan; import org.apache.iotdb.confignode.consensus.request.write.partition.RemoveRegionLocationPlan; -import org.apache.iotdb.confignode.consensus.response.datanode.DataNodeToStatusResp; import org.apache.iotdb.confignode.manager.ConfigManager; import org.apache.iotdb.confignode.manager.load.cache.consensus.ConsensusGroupHeartbeatSample; -import org.apache.iotdb.confignode.manager.partition.PartitionMetrics; -import org.apache.iotdb.confignode.persistence.node.NodeInfo; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.scheduler.LockQueue; -import org.apache.iotdb.consensus.exception.ConsensusException; import org.apache.iotdb.mpp.rpc.thrift.TCreatePeerReq; -import org.apache.iotdb.mpp.rpc.thrift.TDisableDataNodeReq; import org.apache.iotdb.mpp.rpc.thrift.TMaintainPeerReq; import org.apache.iotdb.mpp.rpc.thrift.TRegionLeaderChangeResp; import org.apache.iotdb.mpp.rpc.thrift.TRegionMigrateResult; @@ -73,10 +65,8 @@ import java.util.stream.Collectors; import static org.apache.iotdb.confignode.conf.ConfigNodeConstant.REGION_MIGRATE_PROCESS; -import static org.apache.iotdb.confignode.conf.ConfigNodeConstant.REMOVE_DATANODE_PROCESS; import static org.apache.iotdb.consensus.ConsensusFactory.IOT_CONSENSUS; import static org.apache.iotdb.consensus.ConsensusFactory.RATIS_CONSENSUS; -import static org.apache.iotdb.consensus.ConsensusFactory.SIMPLE_CONSENSUS; public class RegionMaintainHandler { @@ -86,9 +76,6 @@ public class RegionMaintainHandler { private final ConfigManager configManager; - /** region migrate lock */ - private final LockQueue regionMigrateLock = new LockQueue(); - private final IClientManager dataNodeClientManager; public RegionMaintainHandler(ConfigManager configManager) { @@ -105,61 +92,8 @@ public static String getIdWithRpcEndpoint(TDataNodeLocation location) { location.getDataNodeId(), location.getClientRpcEndPoint()); } - /** - * Get all consensus group id in this node - * - * @param removedDataNode the DataNode to be removed - * @return group id list to be migrated - */ - public List getMigratedDataNodeRegions(TDataNodeLocation removedDataNode) { - return configManager.getPartitionManager().getAllReplicaSets().stream() - .filter( - replicaSet -> - replicaSet.getDataNodeLocations().contains(removedDataNode) - && replicaSet.regionId.getType() != TConsensusGroupType.ConfigRegion) - .map(TRegionReplicaSet::getRegionId) - .collect(Collectors.toList()); - } - - /** - * broadcast these datanode in RemoveDataNodeReq are disabled, so they will not accept read/write - * request - * - * @param disabledDataNode TDataNodeLocation - */ - public void broadcastDisableDataNode(TDataNodeLocation disabledDataNode) { - LOGGER.info( - "DataNodeRemoveService start broadcastDisableDataNode to cluster, disabledDataNode: {}", - getIdWithRpcEndpoint(disabledDataNode)); - - List otherOnlineDataNodes = - configManager.getNodeManager().filterDataNodeThroughStatus(NodeStatus.Running).stream() - .filter(node -> !node.getLocation().equals(disabledDataNode)) - .collect(Collectors.toList()); - - for (TDataNodeConfiguration node : otherOnlineDataNodes) { - TDisableDataNodeReq disableReq = new TDisableDataNodeReq(disabledDataNode); - TSStatus status = - (TSStatus) - SyncDataNodeClientPool.getInstance() - .sendSyncRequestToDataNodeWithRetry( - node.getLocation().getInternalEndPoint(), - disableReq, - CnToDnRequestType.DISABLE_DATA_NODE); - if (!isSucceed(status)) { - LOGGER.error( - "{}, BroadcastDisableDataNode meets error, disabledDataNode: {}, error: {}", - REMOVE_DATANODE_PROCESS, - getIdWithRpcEndpoint(disabledDataNode), - status); - return; - } - } - - LOGGER.info( - "{}, DataNodeRemoveService finished broadcastDisableDataNode to cluster, disabledDataNode: {}", - REMOVE_DATANODE_PROCESS, - getIdWithRpcEndpoint(disabledDataNode)); + public static String simplifiedLocation(TDataNodeLocation dataNodeLocation) { + return dataNodeLocation.getDataNodeId() + "@" + dataNodeLocation.getInternalEndPoint().getIp(); } /** @@ -221,8 +155,8 @@ public TSStatus createNewRegionPeer(TConsensusGroupId regionId, TDataNodeLocatio currentPeerNodes = Collections.emptyList(); } - String storageGroup = configManager.getPartitionManager().getRegionStorageGroup(regionId); - TCreatePeerReq req = new TCreatePeerReq(regionId, currentPeerNodes, storageGroup); + String database = configManager.getPartitionManager().getRegionDatabase(regionId); + TCreatePeerReq req = new TCreatePeerReq(regionId, currentPeerNodes, database); status = (TSStatus) @@ -230,7 +164,7 @@ public TSStatus createNewRegionPeer(TConsensusGroupId regionId, TDataNodeLocatio .sendSyncRequestToDataNodeWithRetry( destDataNode.getInternalEndPoint(), req, - CnToDnRequestType.CREATE_NEW_REGION_PEER); + CnToDnSyncRequestType.CREATE_NEW_REGION_PEER); if (isSucceed(status)) { LOGGER.info( @@ -276,7 +210,7 @@ public TSStatus submitAddRegionPeerTask( .sendSyncRequestToDataNodeWithRetry( coordinator.getInternalEndPoint(), maintainPeerReq, - CnToDnRequestType.ADD_REGION_PEER); + CnToDnSyncRequestType.ADD_REGION_PEER); LOGGER.info( "{}, Send action addRegionPeer finished, regionId: {}, rpcDataNode: {}, destDataNode: {}, status: {}", REGION_MIGRATE_PROCESS, @@ -313,7 +247,7 @@ public TSStatus submitRemoveRegionPeerTask( .sendSyncRequestToDataNodeWithRetry( coordinator.getInternalEndPoint(), maintainPeerReq, - CnToDnRequestType.REMOVE_REGION_PEER); + CnToDnSyncRequestType.REMOVE_REGION_PEER); LOGGER.info( "{}, Send action removeRegionPeer finished, regionId: {}, rpcDataNode: {}", REGION_MIGRATE_PROCESS, @@ -347,14 +281,14 @@ public TSStatus submitDeleteOldRegionPeerTask( .sendSyncRequestToDataNodeWithGivenRetry( originalDataNode.getInternalEndPoint(), maintainPeerReq, - CnToDnRequestType.DELETE_OLD_REGION_PEER, + CnToDnSyncRequestType.DELETE_OLD_REGION_PEER, 1) : (TSStatus) SyncDataNodeClientPool.getInstance() .sendSyncRequestToDataNodeWithRetry( originalDataNode.getInternalEndPoint(), maintainPeerReq, - CnToDnRequestType.DELETE_OLD_REGION_PEER); + CnToDnSyncRequestType.DELETE_OLD_REGION_PEER); LOGGER.info( "{}, Send action deleteOldRegionPeer finished, regionId: {}, dataNodeId: {}", REGION_MIGRATE_PROCESS, @@ -369,7 +303,7 @@ public Map resetPeerList( Map dataNodeLocationMap) { DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.RESET_PEER_LIST, + CnToDnAsyncRequestType.RESET_PEER_LIST, new TResetPeerListReq(regionId, correctDataNodeLocations), dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); @@ -400,26 +334,26 @@ public TRegionMigrateResult waitTaskFinish(long taskId, TDataNodeLocation dataNo MAX_DISCONNECTION_TOLERATE_MS); long disconnectionTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - lastReportTime); if (disconnectionTime > waitTime) { - break; + LOGGER.warn( + "{} task {} cannot get task report from DataNode {}, last report time is {} ago", + REGION_MIGRATE_PROCESS, + taskId, + dataNodeLocation, + CommonDateTimeUtils.convertMillisecondToDurationStr( + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - lastReportTime))); + TRegionMigrateResult report = new TRegionMigrateResult(); + report.setTaskStatus(TRegionMaintainTaskStatus.FAIL); + report.setFailedNodeAndReason(new HashMap<>()); + report.getFailedNodeAndReason().put(dataNodeLocation, TRegionMigrateFailedType.Disconnect); + return report; } try { TimeUnit.SECONDS.sleep(1); } catch (InterruptedException ignore) { Thread.currentThread().interrupt(); + return new TRegionMigrateResult(TRegionMaintainTaskStatus.PROCESSING); } } - LOGGER.warn( - "{} task {} cannot get task report from DataNode {}, last report time is {} ago", - REGION_MIGRATE_PROCESS, - taskId, - dataNodeLocation, - CommonDateTimeUtils.convertMillisecondToDurationStr( - TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - lastReportTime))); - TRegionMigrateResult report = new TRegionMigrateResult(); - report.setTaskStatus(TRegionMaintainTaskStatus.FAIL); - report.setFailedNodeAndReason(new HashMap<>()); - report.getFailedNodeAndReason().put(dataNodeLocation, TRegionMigrateFailedType.Disconnect); - return report; } public void addRegionLocation(TConsensusGroupId regionId, TDataNodeLocation newLocation) { @@ -463,17 +397,32 @@ public void removeRegionLocation( * @return DataNode locations */ public List findRegionLocations(TConsensusGroupId regionId) { - Optional regionReplicaSet = - configManager.getPartitionManager().getAllReplicaSets().stream() - .filter(rg -> rg.regionId.equals(regionId)) - .findAny(); + Optional regionReplicaSet = getRegionReplicaSet(regionId); if (regionReplicaSet.isPresent()) { return regionReplicaSet.get().getDataNodeLocations(); } - return Collections.emptyList(); } + public Optional getRegionReplicaSet(TConsensusGroupId regionId) { + return configManager.getPartitionManager().getAllReplicaSets().stream() + .filter(rg -> rg.regionId.equals(regionId)) + .findAny(); + } + + public String getRegionReplicaSetString(TConsensusGroupId regionId) { + Optional regionReplicaSet = getRegionReplicaSet(regionId); + if (!regionReplicaSet.isPresent()) { + return "UNKNOWN!"; + } + StringBuilder result = new StringBuilder(regionReplicaSet.get().getRegionId() + ": {"); + for (TDataNodeLocation dataNodeLocation : regionReplicaSet.get().getDataNodeLocations()) { + result.append(simplifiedLocation(dataNodeLocation)).append(", "); + } + result.append("}"); + return result.toString(); + } + private Optional pickNewReplicaNodeForRegion( List regionReplicaNodes) { List dataNodeConfigurations = @@ -486,174 +435,14 @@ private Optional pickNewReplicaNodeForRegion( .findAny(); } - private boolean isSucceed(TSStatus status) { + public boolean isSucceed(TSStatus status) { return status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode(); } - private boolean isFailed(TSStatus status) { + public boolean isFailed(TSStatus status) { return !isSucceed(status); } - /** - * Stop old data node - * - * @param dataNode old data node - */ - public void stopDataNode(TDataNodeLocation dataNode) { - LOGGER.info( - "{}, Begin to stop DataNode and kill the DataNode process {}", - REMOVE_DATANODE_PROCESS, - dataNode); - TSStatus status = - (TSStatus) - SyncDataNodeClientPool.getInstance() - .sendSyncRequestToDataNodeWithGivenRetry( - dataNode.getInternalEndPoint(), dataNode, CnToDnRequestType.STOP_DATA_NODE, 2); - configManager.getLoadManager().removeNodeCache(dataNode.getDataNodeId()); - LOGGER.info( - "{}, Stop Data Node result: {}, stoppedDataNode: {}", - REMOVE_DATANODE_PROCESS, - status, - dataNode); - } - - /** - * check if the remove datanode request illegal - * - * @param removeDataNodePlan RemoveDataNodeReq - * @return SUCCEED_STATUS when request is legal. - */ - public DataNodeToStatusResp checkRemoveDataNodeRequest(RemoveDataNodePlan removeDataNodePlan) { - DataNodeToStatusResp dataSet = new DataNodeToStatusResp(); - dataSet.setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); - - TSStatus status = checkClusterProtocol(); - if (isFailed(status)) { - dataSet.setStatus(status); - return dataSet; - } - status = checkRegionReplication(removeDataNodePlan); - if (isFailed(status)) { - dataSet.setStatus(status); - return dataSet; - } - - status = checkDataNodeExist(removeDataNodePlan); - if (isFailed(status)) { - dataSet.setStatus(status); - return dataSet; - } - - return dataSet; - } - - /** - * Check whether all DataNodes to be deleted exist in the cluster - * - * @param removeDataNodePlan RemoveDataNodeReq - * @return SUCCEED_STATUS if all DataNodes to be deleted exist in the cluster, DATANODE_NOT_EXIST - * otherwise - */ - private TSStatus checkDataNodeExist(RemoveDataNodePlan removeDataNodePlan) { - TSStatus status = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); - - List allDataNodes = - configManager.getNodeManager().getRegisteredDataNodes().stream() - .map(TDataNodeConfiguration::getLocation) - .collect(Collectors.toList()); - boolean hasNotExistNode = - removeDataNodePlan.getDataNodeLocations().stream() - .anyMatch(loc -> !allDataNodes.contains(loc)); - if (hasNotExistNode) { - status.setCode(TSStatusCode.DATANODE_NOT_EXIST.getStatusCode()); - status.setMessage("there exist Data Node in request but not in cluster"); - } - return status; - } - - /** - * Check whether the cluster has enough DataNodes to maintain RegionReplicas - * - * @param removeDataNodePlan RemoveDataNodeReq - * @return SUCCEED_STATUS if the number of DataNodes is enough, LACK_REPLICATION otherwise - */ - private TSStatus checkRegionReplication(RemoveDataNodePlan removeDataNodePlan) { - TSStatus status = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); - List removedDataNodes = removeDataNodePlan.getDataNodeLocations(); - - int availableDatanodeSize = - configManager - .getNodeManager() - .filterDataNodeThroughStatus(NodeStatus.Running, NodeStatus.ReadOnly) - .size(); - // when the configuration is one replication, it will be failed if the data node is not in - // running state. - if (CONF.getSchemaReplicationFactor() == 1 || CONF.getDataReplicationFactor() == 1) { - for (TDataNodeLocation dataNodeLocation : removedDataNodes) { - // check whether removed data node is in running state - if (!NodeStatus.Running.equals( - configManager.getLoadManager().getNodeStatus(dataNodeLocation.getDataNodeId()))) { - removedDataNodes.remove(dataNodeLocation); - LOGGER.error( - "Failed to remove data node {} because it is not in running and the configuration of cluster is one replication", - dataNodeLocation); - } - if (removedDataNodes.isEmpty()) { - status.setCode(TSStatusCode.NO_ENOUGH_DATANODE.getStatusCode()); - status.setMessage("Failed to remove all requested data nodes"); - return status; - } - } - } - - int removedDataNodeSize = - (int) - removeDataNodePlan.getDataNodeLocations().stream() - .filter( - x -> - configManager.getLoadManager().getNodeStatus(x.getDataNodeId()) - != NodeStatus.Unknown) - .count(); - if (availableDatanodeSize - removedDataNodeSize < NodeInfo.getMinimumDataNode()) { - status.setCode(TSStatusCode.NO_ENOUGH_DATANODE.getStatusCode()); - status.setMessage( - String.format( - "Can't remove datanode due to the limit of replication factor, " - + "availableDataNodeSize: %s, maxReplicaFactor: %s, max allowed removed Data Node size is: %s", - availableDatanodeSize, - NodeInfo.getMinimumDataNode(), - (availableDatanodeSize - NodeInfo.getMinimumDataNode()))); - } - return status; - } - - public LockQueue getRegionMigrateLock() { - return regionMigrateLock; - } - - /** - * Remove data node in node info - * - * @param dataNodeLocation data node location - */ - public void removeDataNodePersistence(TDataNodeLocation dataNodeLocation) { - // Remove consensus record - List removeDataNodes = Collections.singletonList(dataNodeLocation); - try { - configManager.getConsensusManager().write(new RemoveDataNodePlan(removeDataNodes)); - } catch (ConsensusException e) { - LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e); - } - - // Adjust maxRegionGroupNum - configManager.getClusterSchemaManager().adjustMaxRegionGroupNum(); - - // Remove metrics - PartitionMetrics.unbindDataNodePartitionMetricsWhenUpdate( - MetricService.getInstance(), - NodeUrlUtils.convertTEndPointUrl(dataNodeLocation.getClientRpcEndPoint())); - } - /** * Change the leader of given Region. * @@ -665,23 +454,19 @@ public void removeDataNodePersistence(TDataNodeLocation dataNodeLocation) { * @param regionId The region to be migrated * @param originalDataNode The DataNode where the region locates */ - public void transferRegionLeader(TConsensusGroupId regionId, TDataNodeLocation originalDataNode) - throws ProcedureException { + public void transferRegionLeader( + TConsensusGroupId regionId, TDataNodeLocation originalDataNode, TDataNodeLocation coodinator) + throws ProcedureException, InterruptedException { // find new leader - final int findNewLeaderTimeLimitSecond = 10; - long startTime = System.nanoTime(); Optional newLeaderNode = Optional.empty(); - while (System.nanoTime() - startTime < TimeUnit.SECONDS.toNanos(findNewLeaderTimeLimitSecond)) { - newLeaderNode = filterDataNodeWithOtherRegionReplica(regionId, originalDataNode); - if (newLeaderNode.isPresent()) { - break; - } + List excludeDataNode = new ArrayList<>(); + excludeDataNode.add(originalDataNode); + excludeDataNode.add(coodinator); + newLeaderNode = filterDataNodeWithOtherRegionReplica(regionId, excludeDataNode); + if (!newLeaderNode.isPresent()) { + // If we have no choice, we use it + newLeaderNode = Optional.of(coodinator); } - newLeaderNode.orElseThrow( - () -> - new ProcedureException( - "Cannot find the new leader after " + findNewLeaderTimeLimitSecond + " seconds")); - // ratis needs DataNode to do election by itself long timestamp = System.nanoTime(); if (TConsensusGroupType.SchemaRegion.equals(regionId.getType()) @@ -689,6 +474,18 @@ public void transferRegionLeader(TConsensusGroupId regionId, TDataNodeLocation o && RATIS_CONSENSUS.equals(CONF.getDataRegionConsensusProtocolClass())) { final int MAX_RETRY_TIME = 10; int retryTime = 0; + long sleepTime = + (CONF.getSchemaRegionRatisRpcLeaderElectionTimeoutMaxMs() + + CONF.getSchemaRegionRatisRpcLeaderElectionTimeoutMinMs()) + / 2; + Integer leaderId = configManager.getLoadManager().getRegionLeaderMap().get(regionId); + + if (leaderId != -1) { + // The migrated node is not leader, so we don't need to transfer temporarily + if (originalDataNode.getDataNodeId() != leaderId) { + return; + } + } while (true) { TRegionLeaderChangeResp resp = SyncDataNodeClientPool.getInstance() @@ -702,7 +499,9 @@ public void transferRegionLeader(TConsensusGroupId regionId, TDataNodeLocation o LOGGER.warn("[RemoveRegion] Ratis transfer leader fail, but procedure will continue."); return; } - LOGGER.warn("Call changeRegionLeader fail for the {} time", retryTime); + LOGGER.warn( + "Call changeRegionLeader fail for the {} time, will sleep {} ms", retryTime, sleepTime); + Thread.sleep(sleepTime); } } @@ -736,12 +535,26 @@ public void transferRegionLeader(TConsensusGroupId regionId, TDataNodeLocation o */ public Optional filterDataNodeWithOtherRegionReplica( TConsensusGroupId regionId, TDataNodeLocation filterLocation) { + List filterLocations = Collections.singletonList(filterLocation); + return filterDataNodeWithOtherRegionReplica(regionId, filterLocations); + } + + public Optional filterDataNodeWithOtherRegionReplica( + TConsensusGroupId regionId, List filterLocations) { return filterDataNodeWithOtherRegionReplica( - regionId, filterLocation, NodeStatus.Running, NodeStatus.ReadOnly); + regionId, filterLocations, NodeStatus.Running, NodeStatus.ReadOnly); } public Optional filterDataNodeWithOtherRegionReplica( TConsensusGroupId regionId, TDataNodeLocation filterLocation, NodeStatus... allowingStatus) { + List excludeLocations = Collections.singletonList(filterLocation); + return filterDataNodeWithOtherRegionReplica(regionId, excludeLocations, allowingStatus); + } + + public Optional filterDataNodeWithOtherRegionReplica( + TConsensusGroupId regionId, + List excludeLocations, + NodeStatus... allowingStatus) { List regionLocations = findRegionLocations(regionId); if (regionLocations.isEmpty()) { LOGGER.warn("Cannot find DataNodes contain the given region: {}", regionId); @@ -754,29 +567,19 @@ public Optional filterDataNodeWithOtherRegionReplica( configManager.getNodeManager().filterDataNodeThroughStatus(allowingStatus).stream() .map(TDataNodeConfiguration::getLocation) .collect(Collectors.toList()); + final int leaderId = configManager.getLoadManager().getRegionLeaderMap().get(regionId); Collections.shuffle(aliveDataNodes); + Optional bestChoice = Optional.empty(); for (TDataNodeLocation aliveDataNode : aliveDataNodes) { - if (regionLocations.contains(aliveDataNode) && !aliveDataNode.equals(filterLocation)) { - return Optional.of(aliveDataNode); + if (regionLocations.contains(aliveDataNode) && !excludeLocations.contains(aliveDataNode)) { + if (leaderId == aliveDataNode.getDataNodeId()) { + bestChoice = Optional.of(aliveDataNode); + break; + } else if (!bestChoice.isPresent()) { + bestChoice = Optional.of(aliveDataNode); + } } } - - return Optional.empty(); - } - - /** - * Check the protocol of the cluster, standalone is not supported to remove data node currently - * - * @return SUCCEED_STATUS if the Cluster is not standalone protocol, REMOVE_DATANODE_FAILED - * otherwise - */ - private TSStatus checkClusterProtocol() { - TSStatus status = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); - if (CONF.getDataRegionConsensusProtocolClass().equals(SIMPLE_CONSENSUS) - || CONF.getSchemaRegionConsensusProtocolClass().equals(SIMPLE_CONSENSUS)) { - status.setCode(TSStatusCode.REMOVE_DATANODE_ERROR.getStatusCode()); - status.setMessage("SimpleConsensus protocol is not supported to remove data node"); - } - return status; + return bestChoice; } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/RemoveDataNodeHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/RemoveDataNodeHandler.java new file mode 100644 index 0000000000000..eaa16f47907f4 --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/RemoveDataNodeHandler.java @@ -0,0 +1,495 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.procedure.env; + +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TDataNodeConfiguration; +import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.cluster.NodeStatus; +import org.apache.iotdb.commons.cluster.NodeType; +import org.apache.iotdb.commons.cluster.RegionStatus; +import org.apache.iotdb.commons.service.metric.MetricService; +import org.apache.iotdb.commons.utils.NodeUrlUtils; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; +import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; +import org.apache.iotdb.confignode.conf.ConfigNodeConfig; +import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; +import org.apache.iotdb.confignode.consensus.request.write.datanode.RemoveDataNodePlan; +import org.apache.iotdb.confignode.consensus.response.datanode.DataNodeToStatusResp; +import org.apache.iotdb.confignode.manager.ConfigManager; +import org.apache.iotdb.confignode.manager.load.cache.node.NodeHeartbeatSample; +import org.apache.iotdb.confignode.manager.load.cache.region.RegionHeartbeatSample; +import org.apache.iotdb.confignode.manager.partition.PartitionMetrics; +import org.apache.iotdb.confignode.persistence.node.NodeInfo; +import org.apache.iotdb.confignode.procedure.impl.region.RegionMigrationPlan; +import org.apache.iotdb.consensus.exception.ConsensusException; +import org.apache.iotdb.mpp.rpc.thrift.TCleanDataNodeCacheReq; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.stream.Collectors; + +import static org.apache.iotdb.confignode.conf.ConfigNodeConstant.REMOVE_DATANODE_PROCESS; +import static org.apache.iotdb.consensus.ConsensusFactory.SIMPLE_CONSENSUS; +import static org.apache.iotdb.db.service.RegionMigrateService.isFailed; +import static org.apache.iotdb.db.service.RegionMigrateService.isSucceed; + +public class RemoveDataNodeHandler { + + private static final Logger LOGGER = LoggerFactory.getLogger(RemoveDataNodeHandler.class); + + private static final ConfigNodeConfig CONF = ConfigNodeDescriptor.getInstance().getConf(); + + private final ConfigManager configManager; + + public RemoveDataNodeHandler(ConfigManager configManager) { + this.configManager = configManager; + } + + /** + * Check if the data nodes are sufficient after removing. + * + * @param removedDataNodes List + * @return true if the number of DataNodes is enough, false otherwise + */ + public boolean checkEnoughDataNodeAfterRemoving(List removedDataNodes) { + int availableDatanodeSize = + configManager + .getNodeManager() + .filterDataNodeThroughStatus(NodeStatus.Running, NodeStatus.ReadOnly) + .size(); + + int removedDataNodeSize = + (int) + removedDataNodes.stream() + .filter( + x -> + configManager.getLoadManager().getNodeStatus(x.getDataNodeId()) + != NodeStatus.Unknown) + .count(); + + return availableDatanodeSize - removedDataNodeSize >= NodeInfo.getMinimumDataNode(); + } + + /** + * Changes the status of a batch of specified DataNodes to the given status. This is done to + * prevent the DataNodes from receiving read or write requests when they are being removed or are + * in a restricted state. + * + * @param removedDataNodes the locations of the DataNodes whose statuses need to be changed + * @param nodeStatusMap a map containing the new status to assign to each DataNode (e.g., + * Removing, Running, etc.) + */ + public void changeDataNodeStatus( + List removedDataNodes, Map nodeStatusMap) { + LOGGER.info( + "{}, Begin to change DataNode status, nodeStatusMap: {}", + REMOVE_DATANODE_PROCESS, + nodeStatusMap); + + DataNodeAsyncRequestContext changeDataNodeStatusContext = + new DataNodeAsyncRequestContext<>(CnToDnAsyncRequestType.SET_SYSTEM_STATUS); + + for (TDataNodeLocation dataNode : removedDataNodes) { + changeDataNodeStatusContext.putRequest( + dataNode.getDataNodeId(), nodeStatusMap.get(dataNode.getDataNodeId()).getStatus()); + changeDataNodeStatusContext.putNodeLocation(dataNode.getDataNodeId(), dataNode); + } + + CnToDnInternalServiceAsyncRequestManager.getInstance() + .sendAsyncRequestWithRetry(changeDataNodeStatusContext); + + for (Map.Entry entry : + changeDataNodeStatusContext.getResponseMap().entrySet()) { + int dataNodeId = entry.getKey(); + NodeStatus nodeStatus = nodeStatusMap.get(dataNodeId); + RegionStatus regionStatus = RegionStatus.valueOf(nodeStatus.getStatus()); + + if (!isSucceed(entry.getValue())) { + LOGGER.error( + "{}, Failed to change DataNode status, dataNodeId={}, nodeStatus={}", + REMOVE_DATANODE_PROCESS, + dataNodeId, + nodeStatus); + continue; + } + + // Force updating NodeStatus + long currentTime = System.nanoTime(); + configManager + .getLoadManager() + .forceUpdateNodeCache( + NodeType.DataNode, dataNodeId, new NodeHeartbeatSample(currentTime, nodeStatus)); + + LOGGER.info( + "{}, Force update NodeCache: dataNodeId={}, nodeStatus={}, currentTime={}", + REMOVE_DATANODE_PROCESS, + dataNodeId, + nodeStatus, + currentTime); + + // Force update RegionStatus + if (regionStatus != RegionStatus.Removing) { + Map> heartbeatSampleMap = + new TreeMap<>(); + configManager + .getPartitionManager() + .getAllReplicaSets(dataNodeId) + .forEach( + replicaSet -> + heartbeatSampleMap.put( + replicaSet.getRegionId(), + Collections.singletonMap( + dataNodeId, new RegionHeartbeatSample(currentTime, regionStatus)))); + configManager.getLoadManager().forceUpdateRegionGroupCache(heartbeatSampleMap); + } + } + } + + /** + * Retrieves all region migration plans for the specified removed DataNodes. + * + * @param removedDataNodes the list of DataNodes from which to obtain migration plans + * @return a list of region migration plans associated with the removed DataNodes + */ + public List getRegionMigrationPlans( + List removedDataNodes) { + List regionMigrationPlans = new ArrayList<>(); + for (TDataNodeLocation removedDataNode : removedDataNodes) { + List migratedDataNodeRegions = getMigratedDataNodeRegions(removedDataNode); + regionMigrationPlans.addAll( + migratedDataNodeRegions.stream() + .map(regionId -> RegionMigrationPlan.create(regionId, removedDataNode)) + .collect(Collectors.toList())); + } + return regionMigrationPlans; + } + + /** + * Broadcasts DataNodes' status change, preventing disabled DataNodes from accepting read or write + * requests. + * + * @param dataNodes the list of DataNodes that require broadcast status changes + */ + public void broadcastDataNodeStatusChange(List dataNodes) { + String dataNodesString = + dataNodes.stream() + .map(RegionMaintainHandler::getIdWithRpcEndpoint) + .collect(Collectors.joining(", ")); + LOGGER.info( + "{}, BroadcastDataNodeStatusChange start, dataNode: {}", + REMOVE_DATANODE_PROCESS, + dataNodesString); + + List otherOnlineDataNodes = + configManager.getNodeManager().filterDataNodeThroughStatus(NodeStatus.Running).stream() + .filter(node -> !dataNodes.contains(node.getLocation())) + .collect(Collectors.toList()); + + DataNodeAsyncRequestContext cleanDataNodeCacheContext = + new DataNodeAsyncRequestContext<>(CnToDnAsyncRequestType.CLEAN_DATA_NODE_CACHE); + + for (TDataNodeConfiguration node : otherOnlineDataNodes) { + TCleanDataNodeCacheReq disableReq = new TCleanDataNodeCacheReq(dataNodes); + cleanDataNodeCacheContext.putRequest(node.getLocation().getDataNodeId(), disableReq); + cleanDataNodeCacheContext.putNodeLocation( + node.getLocation().getDataNodeId(), node.getLocation()); + } + + CnToDnInternalServiceAsyncRequestManager.getInstance() + .sendAsyncRequestWithRetry(cleanDataNodeCacheContext); + + for (Map.Entry entry : + cleanDataNodeCacheContext.getResponseMap().entrySet()) { + if (!isSucceed(entry.getValue())) { + LOGGER.error( + "{}, BroadcastDataNodeStatusChange meets error, status change dataNodes: {}, error datanode: {}", + REMOVE_DATANODE_PROCESS, + dataNodesString, + entry.getValue()); + return; + } + } + + LOGGER.info( + "{}, BroadcastDataNodeStatusChange finished, dataNode: {}", + REMOVE_DATANODE_PROCESS, + dataNodesString); + } + + /** + * Removes a batch of DataNodes from the node information. + * + * @param removedDataNodes the list of DataNodeLocations to be removed + */ + public void removeDataNodePersistence(List removedDataNodes) { + // Remove consensus record + try { + configManager.getConsensusManager().write(new RemoveDataNodePlan(removedDataNodes)); + } catch (ConsensusException e) { + LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e); + } + + // Adjust maxRegionGroupNum + configManager.getClusterSchemaManager().adjustMaxRegionGroupNum(); + + // Remove metrics + for (TDataNodeLocation dataNodeLocation : removedDataNodes) { + PartitionMetrics.unbindDataNodePartitionMetricsWhenUpdate( + MetricService.getInstance(), + NodeUrlUtils.convertTEndPointUrl(dataNodeLocation.getClientRpcEndPoint())); + } + } + + /** + * Stops the specified old DataNodes. + * + * @param removedDataNodes the list of DataNodeLocations to be stopped + */ + public void stopDataNodes(List removedDataNodes) { + + LOGGER.info( + "{}, Begin to stop DataNodes and kill the DataNode process: {}", + REMOVE_DATANODE_PROCESS, + removedDataNodes); + + DataNodeAsyncRequestContext stopDataNodesContext = + new DataNodeAsyncRequestContext<>(CnToDnAsyncRequestType.STOP_AND_CLEAR_DATA_NODE); + + for (TDataNodeLocation dataNode : removedDataNodes) { + stopDataNodesContext.putRequest(dataNode.getDataNodeId(), dataNode); + stopDataNodesContext.putNodeLocation(dataNode.getDataNodeId(), dataNode); + } + + CnToDnInternalServiceAsyncRequestManager.getInstance() + .sendAsyncRequestWithRetry(stopDataNodesContext); + + for (Map.Entry entry : stopDataNodesContext.getResponseMap().entrySet()) { + int dataNodeId = entry.getKey(); + configManager.getLoadManager().removeNodeCache(dataNodeId); + if (!isSucceed(entry.getValue())) { + LOGGER.error( + "{}, Stop Data Node meets error, error datanode: {}", + REMOVE_DATANODE_PROCESS, + entry.getValue()); + } else { + LOGGER.info("{}, Stop Data Node {} success.", REMOVE_DATANODE_PROCESS, dataNodeId); + } + } + } + + /** + * Checks if the RemoveDataNode request is valid. + * + * @param removeDataNodePlan the RemoveDataNodeReq to be validated + * @return SUCCEED_STATUS if the request is valid + */ + public DataNodeToStatusResp checkRemoveDataNodeRequest(RemoveDataNodePlan removeDataNodePlan) { + DataNodeToStatusResp dataSet = new DataNodeToStatusResp(); + dataSet.setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); + + TSStatus status = checkClusterProtocol(); + if (isFailed(status)) { + dataSet.setStatus(status); + return dataSet; + } + status = checkRegionReplication(removeDataNodePlan); + if (isFailed(status)) { + dataSet.setStatus(status); + return dataSet; + } + + status = checkDataNodeExist(removeDataNodePlan); + if (isFailed(status)) { + dataSet.setStatus(status); + return dataSet; + } + + status = checkAllowRemoveDataNodes(removeDataNodePlan); + if (isFailed(status)) { + dataSet.setStatus(status); + return dataSet; + } + + return dataSet; + } + + /** + * Checks the cluster protocol. Removing a DataNode is not supported in standalone mode. + * + * @return SUCCEED_STATUS if the cluster is not in standalone mode, REMOVE_DATANODE_FAILED + * otherwise + */ + private TSStatus checkClusterProtocol() { + TSStatus status = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + if (CONF.getDataRegionConsensusProtocolClass().equals(SIMPLE_CONSENSUS) + || CONF.getSchemaRegionConsensusProtocolClass().equals(SIMPLE_CONSENSUS)) { + status.setCode(TSStatusCode.REMOVE_DATANODE_ERROR.getStatusCode()); + status.setMessage("SimpleConsensus protocol is not supported to remove data node"); + } + return status; + } + + /** + * Checks whether the cluster has enough DataNodes to maintain the required number of + * RegionReplicas. + * + * @param removeDataNodePlan the RemoveDataNodeReq to be evaluated + * @return SUCCEED_STATUS if the number of DataNodes is sufficient, LACK_REPLICATION otherwise + */ + public TSStatus checkRegionReplication(RemoveDataNodePlan removeDataNodePlan) { + TSStatus status = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + List removedDataNodes = removeDataNodePlan.getDataNodeLocations(); + + int availableDatanodeSize = + configManager + .getNodeManager() + .filterDataNodeThroughStatus(NodeStatus.Running, NodeStatus.ReadOnly) + .size(); + // when the configuration is one replication, it will be failed if the data node is not in + // running state. + if (CONF.getSchemaReplicationFactor() == 1 || CONF.getDataReplicationFactor() == 1) { + for (TDataNodeLocation dataNodeLocation : removedDataNodes) { + // check whether removed data node is in running state + if (!NodeStatus.Running.equals( + configManager.getLoadManager().getNodeStatus(dataNodeLocation.getDataNodeId()))) { + removedDataNodes.remove(dataNodeLocation); + LOGGER.error( + "Failed to remove data node {} because it is not in running and the configuration of cluster is one replication", + dataNodeLocation); + } + if (removedDataNodes.isEmpty()) { + status.setCode(TSStatusCode.NO_ENOUGH_DATANODE.getStatusCode()); + status.setMessage("Failed to remove all requested data nodes"); + return status; + } + } + } + + int removedDataNodeSize = + (int) + removeDataNodePlan.getDataNodeLocations().stream() + .filter( + x -> + configManager.getLoadManager().getNodeStatus(x.getDataNodeId()) + != NodeStatus.Unknown) + .count(); + if (availableDatanodeSize - removedDataNodeSize < NodeInfo.getMinimumDataNode()) { + status.setCode(TSStatusCode.NO_ENOUGH_DATANODE.getStatusCode()); + status.setMessage( + String.format( + "Can't remove datanode due to the limit of replication factor, " + + "availableDataNodeSize: %s, maxReplicaFactor: %s, max allowed removed Data Node size is: %s", + availableDatanodeSize, + NodeInfo.getMinimumDataNode(), + (availableDatanodeSize - NodeInfo.getMinimumDataNode()))); + } + return status; + } + + /** + * Checks whether all DataNodes specified for deletion exist in the cluster. + * + * @param removeDataNodePlan the RemoveDataNodeReq containing the DataNodes to be checked + * @return SUCCEED_STATUS if all specified DataNodes exist in the cluster, DATANODE_NOT_EXIST + * otherwise + */ + private TSStatus checkDataNodeExist(RemoveDataNodePlan removeDataNodePlan) { + TSStatus status = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + + List allDataNodes = + configManager.getNodeManager().getRegisteredDataNodes().stream() + .map(TDataNodeConfiguration::getLocation) + .collect(Collectors.toList()); + boolean hasNotExistNode = + removeDataNodePlan.getDataNodeLocations().stream() + .anyMatch(loc -> !allDataNodes.contains(loc)); + if (hasNotExistNode) { + status.setCode(TSStatusCode.DATANODE_NOT_EXIST.getStatusCode()); + status.setMessage("there exist Data Node in request but not in cluster"); + } + return status; + } + + /** + * Checks if it is allowed to remove the specified DataNodes from the cluster. + * + * @param removeDataNodePlan the RemoveDataNodeReq to be evaluated + * @return SUCCEED_STATUS if the request is valid, otherwise an appropriate error status + */ + public TSStatus checkAllowRemoveDataNodes(RemoveDataNodePlan removeDataNodePlan) { + return configManager + .getProcedureManager() + .checkRemoveDataNodes(removeDataNodePlan.getDataNodeLocations()); + } + + /** + * Retrieves all consensus group IDs from the specified removed DataNodes. + * + * @param removedDataNodes the list of removed DataNodes + * @return a set of TConsensusGroupId representing the consensus groups associated with the + * removed DataNodes + */ + public Set getRemovedDataNodesRegionSet( + List removedDataNodes) { + return removedDataNodes.stream() + .map(this::getMigratedDataNodeRegions) + .flatMap(List::stream) + .collect(Collectors.toSet()); + } + + /** + * Retrieves all consensus group IDs from the specified DataNode. + * + * @param removedDataNode the DataNode to be removed + * @return a list of group IDs that need to be migrated + */ + public List getMigratedDataNodeRegions(TDataNodeLocation removedDataNode) { + return configManager.getPartitionManager().getAllReplicaSets().stream() + .filter(replicaSet -> replicaSet.getDataNodeLocations().contains(removedDataNode)) + .map(TRegionReplicaSet::getRegionId) + .collect(Collectors.toList()); + } + + /** + * Retrieves all DataNodes related to the specified DataNode. + * + * @param removedDataNode the DataNode to be removed + * @return a set of TDataNodeLocation representing the DataNodes associated with the specified + * DataNode + */ + public Set getRelatedDataNodeLocations(TDataNodeLocation removedDataNode) { + return configManager.getPartitionManager().getAllReplicaSets().stream() + .filter(replicaSet -> replicaSet.getDataNodeLocations().contains(removedDataNode)) + .flatMap(replicaSet -> replicaSet.getDataNodeLocations().stream()) + .collect(Collectors.toSet()); + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/StateMachineProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/StateMachineProcedure.java index 87743003ff045..f698735ee95be 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/StateMachineProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/StateMachineProcedure.java @@ -21,9 +21,8 @@ import org.apache.iotdb.confignode.procedure.Procedure; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; +import org.apache.thrift.annotation.Nullable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -31,9 +30,8 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.ConcurrentLinkedDeque; /** * Procedure described by a series of steps. @@ -50,24 +48,21 @@ public abstract class StateMachineProcedure extends Procedure private static final int EOF_STATE = Integer.MIN_VALUE; - private final AtomicBoolean aborted = new AtomicBoolean(false); - private Flow stateFlow = Flow.HAS_MORE_STATE; - protected int stateCount = 0; - private int[] states = null; + private final ConcurrentLinkedDeque states = new ConcurrentLinkedDeque<>(); - private List> subProcList = null; + private final List> subProcList = new ArrayList<>(); - /** Cycles on same state. Good for figuring if we are stuck. */ + /** Cycles on the same state. Good for figuring if we are stuck. */ private int cycles = 0; - /** Ordinal of the previous state. So we can tell if we are progressing or not. */ - private int previousState; + private static final int NO_NEXT_STATE = -1; + private int nextState = NO_NEXT_STATE; /** Mark whether this procedure is called by a pipe forwarded request. */ protected boolean isGeneratedByPipe; - private boolean stateDeserialized = false; + private boolean isStateDeserialized = false; protected StateMachineProcedure() { this(false); @@ -93,8 +88,7 @@ protected final int getCycles() { * @return Flow.NO_MORE_STATE if the procedure is completed, Flow.HAS_MORE_STATE if there is * another step. */ - protected abstract Flow executeFromState(Env env, TState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException; + protected abstract Flow executeFromState(Env env, TState state) throws InterruptedException; /** * Called to perform the rollback of the specified state. @@ -135,20 +129,6 @@ protected abstract void rollbackState(Env env, TState state) */ protected void setNextState(final TState state) { setNextState(getStateId(state)); - failIfAborted(); - } - - /** - * By default, the executor will try ro run all the steps of the procedure start to finish. Return - * true to make the executor yield between execution steps to give other procedures time to run - * their steps. - * - * @param state the state we are going to execute next. - * @return Return true if the executor should yield before the execution of the specified step. - * Defaults to return false. - */ - protected boolean isYieldBeforeExecuteFromState(Env env, TState state) { - return false; } /** @@ -157,105 +137,91 @@ protected boolean isYieldBeforeExecuteFromState(Env env, TState state) { * @param childProcedure the child procedure */ protected void addChildProcedure(Procedure childProcedure) { - if (childProcedure == null) { - return; - } - if (subProcList == null) { - subProcList = new ArrayList<>(); - } subProcList.add(childProcedure); } @Override - protected Procedure[] execute(final Env env) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + protected Procedure[] execute(final Env env) throws InterruptedException { updateTimestamp(); try { - failIfAborted(); - - if (!hasMoreState() || isFailed()) { + if (noMoreState() || isFailed()) { return null; } TState state = getCurrentState(); - if (stateCount == 0) { - setNextState(getStateId(state)); - } - LOG.debug("{} {}; cycles={}", state, this, cycles); - // Keep running count of cycles - if (getStateId(state) != this.previousState) { - this.previousState = getStateId(state); - this.cycles = 0; - } else { - this.cycles++; + // init for the first execution + if (states.isEmpty()) { + setNextState(getStateId(state)); + addNextStateAndCalculateCycles(); } LOG.trace("{}", this); stateFlow = executeFromState(env, state); - setStateDeserialized(false); - if (!hasMoreState()) { - setNextState(EOF_STATE); + if (!isFailed()) { + addNextStateAndCalculateCycles(); } + setStateDeserialized(false); - if (subProcList != null && !subProcList.isEmpty()) { - Procedure[] subProcedures = subProcList.toArray(new Procedure[subProcList.size()]); - subProcList = null; + if (!subProcList.isEmpty()) { + Procedure[] subProcedures = subProcList.toArray(new Procedure[0]); + subProcList.clear(); return subProcedures; } - return (isWaiting() || isFailed() || !hasMoreState()) ? null : new Procedure[] {this}; + return (isWaiting() || isFailed() || noMoreState()) ? null : new Procedure[] {this}; } finally { updateTimestamp(); } } + private void addNextStateAndCalculateCycles() { + int stateToBeAdded = EOF_STATE; + if (Flow.HAS_MORE_STATE == stateFlow) { + if (nextState == NO_NEXT_STATE) { + LOG.error( + "StateMachineProcedure pid={} not set next state, but return HAS_MORE_STATE. It is likely that there is some problem with the code. Please check the code. This procedure is about to be terminated: {}", + getProcId(), + this); + stateFlow = Flow.NO_MORE_STATE; + } else { + stateToBeAdded = nextState; + } + } + if (Flow.NO_MORE_STATE == stateFlow) { + if (nextState != NO_NEXT_STATE) { + LOG.warn( + "StateMachineProcedure pid={} set next state to {}, but return NO_MORE_STATE", + getProcId(), + nextState); + } + } + if (getStateId(getCurrentState()) == stateToBeAdded) { + cycles++; + } else { + cycles = 0; + } + states.add(stateToBeAdded); + nextState = NO_NEXT_STATE; + } + @Override protected void rollback(final Env env) throws IOException, InterruptedException, ProcedureException { if (isEofState()) { - stateCount--; + states.removeLast(); } try { updateTimestamp(); rollbackState(env, getCurrentState()); } finally { - stateCount--; + states.removeLast(); updateTimestamp(); } } protected boolean isEofState() { - return stateCount > 0 && states[stateCount - 1] == EOF_STATE; - } - - @Override - protected boolean abort(final Env env) { - LOG.debug("Abort requested for {}", this); - if (!hasMoreState()) { - LOG.warn("Ignore abort request on {} because it has already been finished", this); - return false; - } - if (!isRollbackSupported(getCurrentState())) { - LOG.warn("Ignore abort request on {} because it does not support rollback", this); - return false; - } - aborted.set(true); - return true; - } - - /** - * If procedure has more states then abort it otherwise procedure is finished and abort can be - * ignored. - */ - protected final void failIfAborted() { - if (aborted.get()) { - if (hasMoreState()) { - setAbortFailure(getClass().getSimpleName(), "abort requested"); - } else { - LOG.warn("Ignoring abort request on state='{}' for {}", getCurrentState(), this); - } - } + return !states.isEmpty() && states.getLast() == EOF_STATE; } /** @@ -266,26 +232,19 @@ protected boolean isRollbackSupported(final TState state) { return false; } - @Override - protected boolean isYieldAfterExecution(final Env env) { - return isYieldBeforeExecuteFromState(env, getCurrentState()); - } - - private boolean hasMoreState() { - return stateFlow != Flow.NO_MORE_STATE; + private boolean noMoreState() { + return stateFlow == Flow.NO_MORE_STATE; } + @Nullable protected TState getCurrentState() { - return stateCount > 0 ? getState(states[stateCount - 1]) : getInitialState(); - } - - /** - * This method is used from test code as it cannot be assumed that state transition will happen - * sequentially. Some procedures may skip steps/ states, some may add intermediate steps in - * future. - */ - public int getCurrentStateId() { - return getStateId(getCurrentState()); + if (!states.isEmpty()) { + if (states.getLast() == EOF_STATE) { + return null; + } + return getState(states.getLast()); + } + return getInitialState(); } /** @@ -294,15 +253,7 @@ public int getCurrentStateId() { * @param stateId the ordinal() of the state enum (or state id) */ private void setNextState(final int stateId) { - if (states == null || states.length == stateCount) { - int newCapacity = stateCount + 8; - if (states != null) { - states = Arrays.copyOf(states, newCapacity); - } else { - states = new int[newCapacity]; - } - } - states[stateCount++] = stateId; + nextState = stateId; } @Override @@ -316,26 +267,27 @@ protected void toStringState(StringBuilder builder) { @Override public void serialize(DataOutputStream stream) throws IOException { super.serialize(stream); - stream.writeInt(stateCount); - for (int i = 0; i < stateCount; ++i) { - stream.writeInt(states[i]); + + // Ensure that the Size does not differ from the actual length during the reading process + final ArrayList copyStates = new ArrayList<>(states); + stream.writeInt(copyStates.size()); + for (int state : copyStates) { + stream.writeInt(state); } } @Override public void deserialize(ByteBuffer byteBuffer) { super.deserialize(byteBuffer); - stateCount = byteBuffer.getInt(); + int stateCount = byteBuffer.getInt(); + states.clear(); if (stateCount > 0) { - states = new int[stateCount]; for (int i = 0; i < stateCount; ++i) { - states[i] = byteBuffer.getInt(); + states.add(byteBuffer.getInt()); } if (isEofState()) { stateFlow = Flow.NO_MORE_STATE; } - } else { - states = null; } this.setStateDeserialized(true); } @@ -347,10 +299,10 @@ public void deserialize(ByteBuffer byteBuffer) { * the code in this stage, which is the purpose of this variable. */ public boolean isStateDeserialized() { - return stateDeserialized; + return isStateDeserialized; } private void setStateDeserialized(boolean isDeserialized) { - this.stateDeserialized = isDeserialized; + this.isStateDeserialized = isDeserialized; } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/cq/CreateCQProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/cq/CreateCQProcedure.java index 579f51612bacf..2b274be607ca9 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/cq/CreateCQProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/cq/CreateCQProcedure.java @@ -28,8 +28,6 @@ import org.apache.iotdb.confignode.manager.cq.CQScheduleTask; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.apache.iotdb.confignode.procedure.impl.node.AbstractNodeProcedure; import org.apache.iotdb.confignode.procedure.state.cq.CreateCQState; import org.apache.iotdb.confignode.procedure.store.ProcedureType; @@ -84,7 +82,7 @@ public CreateCQProcedure(TCreateCQReq req, ScheduledExecutorService executor) { @Override protected Flow executeFromState(ConfigNodeProcedureEnv env, CreateCQState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws InterruptedException { try { switch (state) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/model/CreateModelProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/model/CreateModelProcedure.java new file mode 100644 index 0000000000000..8282608466d62 --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/model/CreateModelProcedure.java @@ -0,0 +1,250 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.procedure.impl.model; + +import org.apache.iotdb.common.rpc.thrift.TAINodeConfiguration; +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.client.ainode.AINodeClient; +import org.apache.iotdb.commons.client.ainode.AINodeClientManager; +import org.apache.iotdb.commons.exception.ainode.LoadModelException; +import org.apache.iotdb.commons.model.ModelInformation; +import org.apache.iotdb.commons.model.ModelStatus; +import org.apache.iotdb.commons.model.exception.ModelManagementException; +import org.apache.iotdb.confignode.consensus.request.write.model.CreateModelPlan; +import org.apache.iotdb.confignode.consensus.request.write.model.UpdateModelInfoPlan; +import org.apache.iotdb.confignode.manager.ConfigManager; +import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; +import org.apache.iotdb.confignode.procedure.exception.ProcedureException; +import org.apache.iotdb.confignode.procedure.impl.node.AbstractNodeProcedure; +import org.apache.iotdb.confignode.procedure.state.model.CreateModelState; +import org.apache.iotdb.confignode.procedure.store.ProcedureType; +import org.apache.iotdb.consensus.exception.ConsensusException; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.apache.tsfile.utils.ReadWriteIOUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +public class CreateModelProcedure extends AbstractNodeProcedure { + + private static final Logger LOGGER = LoggerFactory.getLogger(CreateModelProcedure.class); + private static final int RETRY_THRESHOLD = 0; + + private String modelName; + + private String uri; + + private ModelInformation modelInformation = null; + + private List aiNodeIds; + + private String loadErrorMsg = ""; + + public CreateModelProcedure() { + super(); + } + + public CreateModelProcedure(String modelName, String uri) { + super(); + this.modelName = modelName; + this.uri = uri; + this.aiNodeIds = new ArrayList<>(); + } + + @Override + protected Flow executeFromState(ConfigNodeProcedureEnv env, CreateModelState state) { + if (modelName == null || uri == null) { + return Flow.NO_MORE_STATE; + } + try { + switch (state) { + case LOADING: + initModel(env); + loadModel(env); + setNextState(CreateModelState.ACTIVE); + break; + case ACTIVE: + modelInformation.updateStatus(ModelStatus.ACTIVE); + updateModel(env); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException( + String.format("Unknown state during executing createModelProcedure, %s", state)); + } + } catch (Exception e) { + if (isRollbackSupported(state)) { + LOGGER.error("Fail in CreateModelProcedure", e); + setFailure(new ProcedureException(e.getMessage())); + } else { + LOGGER.error( + "Retrievable error trying to create model [{}], state [{}]", modelName, state, e); + if (getCycles() > RETRY_THRESHOLD) { + modelInformation = new ModelInformation(modelName, ModelStatus.UNAVAILABLE); + modelInformation.setAttribute(loadErrorMsg); + updateModel(env); + setFailure( + new ProcedureException( + String.format("Fail to create model [%s] at STATE [%s]", modelName, state))); + } + } + } + return Flow.HAS_MORE_STATE; + } + + private void initModel(ConfigNodeProcedureEnv env) throws ConsensusException { + LOGGER.info("Start to add model [{}]", modelName); + + ConfigManager configManager = env.getConfigManager(); + TSStatus response = configManager.getConsensusManager().write(new CreateModelPlan(modelName)); + if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + throw new ModelManagementException( + String.format( + "Failed to add model [%s] in ModelTable on Config Nodes: %s", + modelName, response.getMessage())); + } + } + + private void checkModelInformationEquals(ModelInformation receiveModelInfo) { + if (modelInformation == null) { + modelInformation = receiveModelInfo; + } else { + if (!modelInformation.equals(receiveModelInfo)) { + throw new ModelManagementException( + String.format( + "Failed to load model [%s] on AI Nodes, model information is not equal in different nodes", + modelName)); + } + } + } + + private void loadModel(ConfigNodeProcedureEnv env) { + for (TAINodeConfiguration curNodeConfig : + env.getConfigManager().getNodeManager().getRegisteredAINodes()) { + try (AINodeClient client = + AINodeClientManager.getInstance() + .borrowClient(curNodeConfig.getLocation().getInternalEndPoint())) { + ModelInformation resp = client.registerModel(modelName, uri); + checkModelInformationEquals(resp); + aiNodeIds.add(curNodeConfig.getLocation().aiNodeId); + } catch (LoadModelException e) { + LOGGER.warn(e.getMessage()); + loadErrorMsg = e.getMessage(); + } catch (Exception e) { + LOGGER.warn( + "Failed to load model on AINode {} from ConfigNode", + curNodeConfig.getLocation().getInternalEndPoint()); + loadErrorMsg = e.getMessage(); + } + } + + if (aiNodeIds.isEmpty()) { + throw new ModelManagementException( + String.format("CREATE MODEL [%s] failed on all AINodes:[%s]", modelName, loadErrorMsg)); + } + } + + private void updateModel(ConfigNodeProcedureEnv env) { + LOGGER.info("Start to update model [{}]", modelName); + + ConfigManager configManager = env.getConfigManager(); + try { + TSStatus response = + configManager + .getConsensusManager() + .write(new UpdateModelInfoPlan(modelName, modelInformation, aiNodeIds)); + if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + throw new ModelManagementException( + String.format( + "Failed to update model [%s] in ModelTable on Config Nodes: %s", + modelName, response.getMessage())); + } + } catch (Exception e) { + throw new ModelManagementException( + String.format( + "Failed to update model [%s] in ModelTable on Config Nodes: %s", + modelName, e.getMessage())); + } + } + + @Override + protected void rollbackState(ConfigNodeProcedureEnv env, CreateModelState state) + throws IOException, InterruptedException, ProcedureException { + // do nothing + } + + @Override + protected boolean isRollbackSupported(CreateModelState state) { + return false; + } + + @Override + protected CreateModelState getState(int stateId) { + return CreateModelState.values()[stateId]; + } + + @Override + protected int getStateId(CreateModelState createModelState) { + return createModelState.ordinal(); + } + + @Override + protected CreateModelState getInitialState() { + return CreateModelState.LOADING; + } + + @Override + public void serialize(DataOutputStream stream) throws IOException { + stream.writeShort(ProcedureType.CREATE_MODEL_PROCEDURE.getTypeCode()); + super.serialize(stream); + ReadWriteIOUtils.write(modelName, stream); + ReadWriteIOUtils.write(uri, stream); + } + + @Override + public void deserialize(ByteBuffer byteBuffer) { + super.deserialize(byteBuffer); + modelName = ReadWriteIOUtils.readString(byteBuffer); + uri = ReadWriteIOUtils.readString(byteBuffer); + } + + @Override + public boolean equals(Object that) { + if (that instanceof CreateModelProcedure) { + CreateModelProcedure thatProc = (CreateModelProcedure) that; + return thatProc.getProcId() == this.getProcId() + && thatProc.getState() == this.getState() + && Objects.equals(thatProc.modelName, this.modelName) + && Objects.equals(thatProc.uri, this.uri); + } + return false; + } + + @Override + public int hashCode() { + return Objects.hash(getProcId(), getState(), modelName, uri); + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/model/DropModelProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/model/DropModelProcedure.java new file mode 100644 index 0000000000000..5a8f832540319 --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/model/DropModelProcedure.java @@ -0,0 +1,196 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.procedure.impl.model; + +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.client.ainode.AINodeClient; +import org.apache.iotdb.commons.client.ainode.AINodeClientManager; +import org.apache.iotdb.commons.model.exception.ModelManagementException; +import org.apache.iotdb.confignode.consensus.request.write.model.DropModelPlan; +import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; +import org.apache.iotdb.confignode.procedure.exception.ProcedureException; +import org.apache.iotdb.confignode.procedure.impl.node.AbstractNodeProcedure; +import org.apache.iotdb.confignode.procedure.state.model.DropModelState; +import org.apache.iotdb.confignode.procedure.store.ProcedureType; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.apache.thrift.TException; +import org.apache.tsfile.utils.ReadWriteIOUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Objects; + +import static org.apache.iotdb.confignode.procedure.state.model.DropModelState.CONFIG_NODE_DROPPED; + +public class DropModelProcedure extends AbstractNodeProcedure { + + private static final Logger LOGGER = LoggerFactory.getLogger(DropModelProcedure.class); + private static final int RETRY_THRESHOLD = 1; + + private String modelName; + + public DropModelProcedure() { + super(); + } + + public DropModelProcedure(String modelName) { + super(); + this.modelName = modelName; + } + + @Override + protected Flow executeFromState(ConfigNodeProcedureEnv env, DropModelState state) { + if (modelName == null) { + return Flow.NO_MORE_STATE; + } + try { + switch (state) { + case AI_NODE_DROPPED: + LOGGER.info("Start to drop model [{}] on AI Nodes", modelName); + dropModelOnAINode(env); + setNextState(CONFIG_NODE_DROPPED); + break; + case CONFIG_NODE_DROPPED: + dropModelOnConfigNode(env); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException( + String.format("Unknown state during executing dropModelProcedure, %s", state)); + } + } catch (Exception e) { + if (isRollbackSupported(state)) { + LOGGER.error("Fail in DropModelProcedure", e); + setFailure(new ProcedureException(e.getMessage())); + } else { + LOGGER.error( + "Retrievable error trying to drop model [{}], state [{}]", modelName, state, e); + if (getCycles() > RETRY_THRESHOLD) { + setFailure( + new ProcedureException( + String.format( + "Fail to drop model [%s] at STATE [%s], %s", + modelName, state, e.getMessage()))); + } + } + } + return Flow.HAS_MORE_STATE; + } + + private void dropModelOnAINode(ConfigNodeProcedureEnv env) { + LOGGER.info("Start to drop model file [{}] on AI Node", modelName); + + List nodeIds = + env.getConfigManager().getModelManager().getModelDistributions(modelName); + for (Integer nodeId : nodeIds) { + try (AINodeClient client = + AINodeClientManager.getInstance() + .borrowClient( + env.getConfigManager() + .getNodeManager() + .getRegisteredAINode(nodeId) + .getLocation() + .getInternalEndPoint())) { + TSStatus status = client.deleteModel(modelName); + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + LOGGER.warn( + "Failed to drop model [{}] on AINode [{}], status: {}", + modelName, + nodeId, + status.getMessage()); + } + } catch (Exception e) { + LOGGER.warn( + "Failed to drop model [{}] on AINode [{}], status: {}", + modelName, + nodeId, + e.getMessage()); + } + } + } + + private void dropModelOnConfigNode(ConfigNodeProcedureEnv env) { + try { + TSStatus response = + env.getConfigManager().getConsensusManager().write(new DropModelPlan(modelName)); + if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + throw new TException(response.getMessage()); + } + } catch (Exception e) { + throw new ModelManagementException( + String.format( + "Fail to start training model [%s] on AI Node: %s", modelName, e.getMessage())); + } + } + + @Override + protected void rollbackState(ConfigNodeProcedureEnv env, DropModelState state) + throws IOException, InterruptedException, ProcedureException { + // no need to rollback + } + + @Override + protected DropModelState getState(int stateId) { + return DropModelState.values()[stateId]; + } + + @Override + protected int getStateId(DropModelState dropModelState) { + return dropModelState.ordinal(); + } + + @Override + protected DropModelState getInitialState() { + return DropModelState.AI_NODE_DROPPED; + } + + @Override + public void serialize(DataOutputStream stream) throws IOException { + stream.writeShort(ProcedureType.DROP_MODEL_PROCEDURE.getTypeCode()); + super.serialize(stream); + ReadWriteIOUtils.write(modelName, stream); + } + + @Override + public void deserialize(ByteBuffer byteBuffer) { + super.deserialize(byteBuffer); + modelName = ReadWriteIOUtils.readString(byteBuffer); + } + + @Override + public boolean equals(Object that) { + if (that instanceof DropModelProcedure) { + DropModelProcedure thatProc = (DropModelProcedure) that; + return thatProc.getProcId() == this.getProcId() + && thatProc.getState() == this.getState() + && (thatProc.modelName).equals(this.modelName); + } + return false; + } + + @Override + public int hashCode() { + return Objects.hash(getProcId(), getState(), modelName); + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/AbstractNodeProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/AbstractNodeProcedure.java index e2d29e1a1d212..b141027917366 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/AbstractNodeProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/AbstractNodeProcedure.java @@ -47,8 +47,9 @@ protected ProcedureLockState acquireLock(ConfigNodeProcedureEnv configNodeProced LOG.info("procedureId {} acquire lock.", getProcId()); return ProcedureLockState.LOCK_ACQUIRED; } - configNodeProcedureEnv.getNodeLock().waitProcedure(this); - LOG.info("procedureId {} wait for lock.", getProcId()); + LOG.info( + "procedureId {} acquire lock failed, will wait for lock after finishing execution.", + getProcId()); return ProcedureLockState.LOCK_EVENT_WAIT; } finally { configNodeProcedureEnv.getSchedulerLock().unlock(); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveAINodeProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveAINodeProcedure.java new file mode 100644 index 0000000000000..41676414afeed --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveAINodeProcedure.java @@ -0,0 +1,158 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.procedure.impl.node; + +import org.apache.iotdb.common.rpc.thrift.TAINodeLocation; +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils; +import org.apache.iotdb.confignode.consensus.request.write.ainode.RemoveAINodePlan; +import org.apache.iotdb.confignode.consensus.request.write.model.DropModelInNodePlan; +import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; +import org.apache.iotdb.confignode.procedure.exception.ProcedureException; +import org.apache.iotdb.confignode.procedure.state.RemoveAINodeState; +import org.apache.iotdb.confignode.procedure.store.ProcedureType; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Objects; + +public class RemoveAINodeProcedure extends AbstractNodeProcedure { + + private static final Logger LOGGER = LoggerFactory.getLogger(RemoveAINodeProcedure.class); + private static final int RETRY_THRESHOLD = 5; + + private TAINodeLocation removedAINode; + + public RemoveAINodeProcedure(TAINodeLocation removedAINode) { + super(); + this.removedAINode = removedAINode; + } + + public RemoveAINodeProcedure() { + super(); + } + + @Override + protected Flow executeFromState(ConfigNodeProcedureEnv env, RemoveAINodeState state) + throws InterruptedException { + if (removedAINode == null) { + return Flow.NO_MORE_STATE; + } + + try { + switch (state) { + case MODEL_DELETE: + env.getConfigManager() + .getConsensusManager() + .write(new DropModelInNodePlan(removedAINode.aiNodeId)); + // Cause the AINode is removed, so we don't need to remove the model file. + setNextState(RemoveAINodeState.NODE_REMOVE); + break; + case NODE_REMOVE: + TSStatus response = + env.getConfigManager() + .getConsensusManager() + .write(new RemoveAINodePlan(removedAINode)); + + if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + throw new ProcedureException( + String.format( + "Fail to remove [%s] AINode on Config Nodes [%s]", + removedAINode, response.getMessage())); + } + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException( + String.format("Unknown state during executing removeAINodeProcedure, %s", state)); + } + } catch (Exception e) { + if (isRollbackSupported(state)) { + setFailure(new ProcedureException(e.getMessage())); + } else { + LOGGER.error( + "Retrievable error trying to remove AINode [{}], state [{}]", removedAINode, state, e); + if (getCycles() > RETRY_THRESHOLD) { + setFailure( + new ProcedureException( + String.format( + "Fail to remove AINode [%s] at STATE [%s], %s", + removedAINode, state, e.getMessage()))); + } + } + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState( + ConfigNodeProcedureEnv configNodeProcedureEnv, RemoveAINodeState removeAINodeState) + throws IOException, InterruptedException, ProcedureException { + // no need to rollback + } + + @Override + protected RemoveAINodeState getState(int stateId) { + return RemoveAINodeState.values()[stateId]; + } + + @Override + protected int getStateId(RemoveAINodeState removeAINodeState) { + return removeAINodeState.ordinal(); + } + + @Override + protected RemoveAINodeState getInitialState() { + return RemoveAINodeState.MODEL_DELETE; + } + + @Override + public void serialize(DataOutputStream stream) throws IOException { + stream.writeShort(ProcedureType.REMOVE_AI_NODE_PROCEDURE.getTypeCode()); + super.serialize(stream); + ThriftCommonsSerDeUtils.serializeTAINodeLocation(removedAINode, stream); + } + + @Override + public void deserialize(ByteBuffer byteBuffer) { + super.deserialize(byteBuffer); + removedAINode = ThriftCommonsSerDeUtils.deserializeTAINodeLocation(byteBuffer); + } + + @Override + public boolean equals(Object that) { + if (that instanceof RemoveAINodeProcedure) { + RemoveAINodeProcedure thatProc = (RemoveAINodeProcedure) that; + return thatProc.getProcId() == this.getProcId() + && thatProc.getState() == this.getState() + && (thatProc.removedAINode).equals(this.removedAINode); + } + return false; + } + + @Override + public int hashCode() { + return Objects.hash(getProcId(), getState(), removedAINode); + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveConfigNodeProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveConfigNodeProcedure.java index 0910f06605dda..2780214c3a8da 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveConfigNodeProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveConfigNodeProcedure.java @@ -65,12 +65,12 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RemoveConfigNodeStat break; case DELETE_PEER: env.deleteConfigNodePeer(removedConfigNode); - setNextState(RemoveConfigNodeState.STOP_CONFIG_NODE); + setNextState(RemoveConfigNodeState.STOP_AND_CLEAR_CONFIG_NODE); LOG.info("Delete peer for ConfigNode: {}", removedConfigNode); break; - case STOP_CONFIG_NODE: - env.stopConfigNode(removedConfigNode); - LOG.info("Stop ConfigNode: {}", removedConfigNode); + case STOP_AND_CLEAR_CONFIG_NODE: + env.stopAndClearConfigNode(removedConfigNode); + LOG.info("Stop and clear ConfigNode: {}", removedConfigNode); return Flow.NO_MORE_STATE; } } catch (Exception e) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveDataNodeProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveDataNodeProcedure.java deleted file mode 100644 index 80af94a832173..0000000000000 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveDataNodeProcedure.java +++ /dev/null @@ -1,253 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.confignode.procedure.impl.node; - -import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; -import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; -import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; -import org.apache.iotdb.commons.exception.runtime.ThriftSerDeException; -import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils; -import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; -import org.apache.iotdb.confignode.procedure.env.RegionMaintainHandler; -import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.impl.region.RegionMigrateProcedure; -import org.apache.iotdb.confignode.procedure.state.RemoveDataNodeState; -import org.apache.iotdb.confignode.procedure.store.ProcedureType; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; -import java.util.stream.Collectors; - -import static org.apache.iotdb.confignode.conf.ConfigNodeConstant.REMOVE_DATANODE_PROCESS; - -/** remove data node procedure */ -public class RemoveDataNodeProcedure extends AbstractNodeProcedure { - private static final Logger LOG = LoggerFactory.getLogger(RemoveDataNodeProcedure.class); - private static final int RETRY_THRESHOLD = 5; - - private TDataNodeLocation removedDataNode; - - private List migratedDataNodeRegions = new ArrayList<>(); - - public RemoveDataNodeProcedure() { - super(); - } - - public RemoveDataNodeProcedure(TDataNodeLocation removedDataNode) { - super(); - this.removedDataNode = removedDataNode; - } - - @Override - protected Flow executeFromState(ConfigNodeProcedureEnv env, RemoveDataNodeState state) { - if (removedDataNode == null) { - return Flow.NO_MORE_STATE; - } - - RegionMaintainHandler handler = env.getRegionMaintainHandler(); - try { - switch (state) { - case REGION_REPLICA_CHECK: - if (env.checkEnoughDataNodeAfterRemoving(removedDataNode)) { - setNextState(RemoveDataNodeState.REMOVE_DATA_NODE_PREPARE); - } else { - LOG.error( - "{}, Can not remove DataNode {} " - + "because the number of DataNodes is less or equal than region replica number", - REMOVE_DATANODE_PROCESS, - removedDataNode); - return Flow.NO_MORE_STATE; - } - case REMOVE_DATA_NODE_PREPARE: - // mark the datanode as removing status and broadcast region route map - env.markDataNodeAsRemovingAndBroadcast(removedDataNode); - migratedDataNodeRegions = handler.getMigratedDataNodeRegions(removedDataNode); - LOG.info( - "{}, DataNode regions to be removed is {}", - REMOVE_DATANODE_PROCESS, - migratedDataNodeRegions); - setNextState(RemoveDataNodeState.BROADCAST_DISABLE_DATA_NODE); - break; - case BROADCAST_DISABLE_DATA_NODE: - handler.broadcastDisableDataNode(removedDataNode); - setNextState(RemoveDataNodeState.SUBMIT_REGION_MIGRATE); - break; - case SUBMIT_REGION_MIGRATE: - submitChildRegionMigrate(env); - setNextState(RemoveDataNodeState.STOP_DATA_NODE); - break; - case STOP_DATA_NODE: - if (isAllRegionMigratedSuccessfully(env)) { - LOG.info("{}, Begin to stop DataNode: {}", REMOVE_DATANODE_PROCESS, removedDataNode); - handler.removeDataNodePersistence(removedDataNode); - handler.stopDataNode(removedDataNode); - } - return Flow.NO_MORE_STATE; - } - } catch (Exception e) { - if (isRollbackSupported(state)) { - setFailure(new ProcedureException("Remove Data Node failed " + state)); - } else { - LOG.error( - "Retrievable error trying to remove data node {}, state {}", removedDataNode, state, e); - if (getCycles() > RETRY_THRESHOLD) { - setFailure(new ProcedureException("State stuck at " + state)); - } - } - } - return Flow.HAS_MORE_STATE; - } - - private void submitChildRegionMigrate(ConfigNodeProcedureEnv env) { - migratedDataNodeRegions.forEach( - regionId -> { - TDataNodeLocation destDataNode = - env.getRegionMaintainHandler().findDestDataNode(regionId); - // TODO: need to improve the coordinator selection method here, maybe through load - // balancing and other means. - final TDataNodeLocation coordinatorForAddPeer = - env.getRegionMaintainHandler() - .filterDataNodeWithOtherRegionReplica(regionId, destDataNode) - .orElse(removedDataNode); - final TDataNodeLocation coordinatorForRemovePeer = destDataNode; - if (destDataNode != null) { - RegionMigrateProcedure regionMigrateProcedure = - new RegionMigrateProcedure( - regionId, - removedDataNode, - destDataNode, - coordinatorForAddPeer, - coordinatorForRemovePeer); - addChildProcedure(regionMigrateProcedure); - LOG.info("Submit child procedure {} for regionId {}", regionMigrateProcedure, regionId); - } else { - LOG.error( - "{}, Cannot find target DataNode to migrate the region: {}", - REMOVE_DATANODE_PROCESS, - regionId); - // TODO terminate all the uncompleted remove datanode process - } - }); - } - - private boolean isAllRegionMigratedSuccessfully(ConfigNodeProcedureEnv env) { - List replicaSets = - env.getConfigManager().getPartitionManager().getAllReplicaSets(); - - List migratedFailedRegions = - replicaSets.stream() - .filter(replica -> replica.getDataNodeLocations().contains(removedDataNode)) - .map(TRegionReplicaSet::getRegionId) - .collect(Collectors.toList()); - if (!migratedFailedRegions.isEmpty()) { - LOG.warn( - "{}, Some regions are migrated failed, the StopDataNode process should not be executed, migratedFailedRegions: {}", - REMOVE_DATANODE_PROCESS, - migratedFailedRegions); - return false; - } - - return true; - } - - @Override - protected void rollbackState(ConfigNodeProcedureEnv env, RemoveDataNodeState state) - throws IOException, InterruptedException, ProcedureException {} - - @Override - protected boolean isRollbackSupported(RemoveDataNodeState state) { - return false; - } - - /** - * Used to keep procedure lock even when the procedure is yielded or suspended. - * - * @param env env - * @return true if hold the lock - */ - protected boolean holdLock(ConfigNodeProcedureEnv env) { - return true; - } - - @Override - protected RemoveDataNodeState getState(int stateId) { - return RemoveDataNodeState.values()[stateId]; - } - - @Override - protected int getStateId(RemoveDataNodeState removeDataNodeState) { - return removeDataNodeState.ordinal(); - } - - @Override - protected RemoveDataNodeState getInitialState() { - return RemoveDataNodeState.REGION_REPLICA_CHECK; - } - - @Override - public void serialize(DataOutputStream stream) throws IOException { - stream.writeShort(ProcedureType.REMOVE_DATA_NODE_PROCEDURE.getTypeCode()); - super.serialize(stream); - ThriftCommonsSerDeUtils.serializeTDataNodeLocation(removedDataNode, stream); - stream.writeInt(migratedDataNodeRegions.size()); - migratedDataNodeRegions.forEach( - tid -> ThriftCommonsSerDeUtils.serializeTConsensusGroupId(tid, stream)); - } - - @Override - public void deserialize(ByteBuffer byteBuffer) { - super.deserialize(byteBuffer); - try { - removedDataNode = ThriftCommonsSerDeUtils.deserializeTDataNodeLocation(byteBuffer); - int regionSize = byteBuffer.getInt(); - migratedDataNodeRegions = new ArrayList<>(regionSize); - for (int i = 0; i < regionSize; i++) { - migratedDataNodeRegions.add( - ThriftCommonsSerDeUtils.deserializeTConsensusGroupId(byteBuffer)); - } - } catch (ThriftSerDeException e) { - LOG.error("Error in deserialize RemoveConfigNodeProcedure", e); - } - } - - @Override - public boolean equals(Object that) { - if (that instanceof RemoveDataNodeProcedure) { - RemoveDataNodeProcedure thatProc = (RemoveDataNodeProcedure) that; - return thatProc.getProcId() == this.getProcId() - && thatProc.getState() == this.getState() - && thatProc.removedDataNode.equals(this.removedDataNode) - && thatProc.migratedDataNodeRegions.equals(this.migratedDataNodeRegions); - } - return false; - } - - @Override - public int hashCode() { - return Objects.hash(this.removedDataNode, this.migratedDataNodeRegions); - } -} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveDataNodesProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveDataNodesProcedure.java new file mode 100644 index 0000000000000..68b0550e0dfd0 --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveDataNodesProcedure.java @@ -0,0 +1,355 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.procedure.impl.node; + +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; +import org.apache.iotdb.commons.cluster.NodeStatus; +import org.apache.iotdb.commons.exception.runtime.ThriftSerDeException; +import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils; +import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; +import org.apache.iotdb.confignode.procedure.env.RemoveDataNodeHandler; +import org.apache.iotdb.confignode.procedure.exception.ProcedureException; +import org.apache.iotdb.confignode.procedure.impl.region.RegionMigrateProcedure; +import org.apache.iotdb.confignode.procedure.impl.region.RegionMigrationPlan; +import org.apache.iotdb.confignode.procedure.state.ProcedureLockState; +import org.apache.iotdb.confignode.procedure.state.RemoveDataNodeState; +import org.apache.iotdb.confignode.procedure.store.ProcedureType; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +import static org.apache.iotdb.confignode.conf.ConfigNodeConstant.REMOVE_DATANODE_PROCESS; + +/** remove data node procedure */ +public class RemoveDataNodesProcedure extends AbstractNodeProcedure { + private static final Logger LOG = LoggerFactory.getLogger(RemoveDataNodesProcedure.class); + private static final int RETRY_THRESHOLD = 5; + + private List removedDataNodes; + + private List regionMigrationPlans = new ArrayList<>(); + + private Map nodeStatusMap; + + public RemoveDataNodesProcedure() { + super(); + } + + public RemoveDataNodesProcedure( + List removedDataNodes, Map nodeStatusMap) { + super(); + this.removedDataNodes = removedDataNodes; + this.nodeStatusMap = nodeStatusMap; + } + + @Override + protected ProcedureLockState acquireLock(ConfigNodeProcedureEnv configNodeProcedureEnv) { + configNodeProcedureEnv.getSchedulerLock().lock(); + try { + LOG.info( + "procedureId {}-RemoveDataNodes skips acquiring lock, since upper layer ensures the serial execution.", + getProcId()); + return ProcedureLockState.LOCK_ACQUIRED; + } finally { + configNodeProcedureEnv.getSchedulerLock().unlock(); + } + } + + @Override + protected void releaseLock(ConfigNodeProcedureEnv configNodeProcedureEnv) { + configNodeProcedureEnv.getSchedulerLock().lock(); + try { + LOG.info( + "procedureId {}-RemoveDataNodes skips releasing lock, since it hasn't acquire any lock.", + getProcId()); + } finally { + configNodeProcedureEnv.getSchedulerLock().unlock(); + } + } + + @Override + protected Flow executeFromState(ConfigNodeProcedureEnv env, RemoveDataNodeState state) { + if (removedDataNodes.isEmpty()) { + return Flow.NO_MORE_STATE; + } + + RemoveDataNodeHandler removeDataNodeHandler = env.getRemoveDataNodeHandler(); + try { + switch (state) { + case REGION_REPLICA_CHECK: + if (removeDataNodeHandler.checkEnoughDataNodeAfterRemoving(removedDataNodes)) { + setNextState(RemoveDataNodeState.REMOVE_DATA_NODE_PREPARE); + } else { + LOG.error( + "{}, Can not remove DataNode {} " + + "because the number of DataNodes is less or equal than region replica number", + REMOVE_DATANODE_PROCESS, + removedDataNodes); + return Flow.NO_MORE_STATE; + } + case REMOVE_DATA_NODE_PREPARE: + Map removedNodeStatusMap = new HashMap<>(); + removedDataNodes.forEach( + dataNode -> removedNodeStatusMap.put(dataNode.getDataNodeId(), NodeStatus.Removing)); + removeDataNodeHandler.changeDataNodeStatus(removedDataNodes, removedNodeStatusMap); + regionMigrationPlans = removeDataNodeHandler.getRegionMigrationPlans(removedDataNodes); + LOG.info( + "{}, DataNode regions to be removed is {}", + REMOVE_DATANODE_PROCESS, + regionMigrationPlans); + setNextState(RemoveDataNodeState.BROADCAST_DISABLE_DATA_NODE); + break; + case BROADCAST_DISABLE_DATA_NODE: + removeDataNodeHandler.broadcastDataNodeStatusChange(removedDataNodes); + setNextState(RemoveDataNodeState.SUBMIT_REGION_MIGRATE); + break; + case SUBMIT_REGION_MIGRATE: + // Avoid re-submit region-migration when leader change or ConfigNode reboot + if (!isStateDeserialized()) { + submitChildRegionMigrate(env); + } + setNextState(RemoveDataNodeState.STOP_DATA_NODE); + break; + case STOP_DATA_NODE: + checkRegionStatusAndStopDataNode(env); + return Flow.NO_MORE_STATE; + } + } catch (Exception e) { + if (isRollbackSupported(state)) { + setFailure(new ProcedureException("Remove Data Node failed " + state)); + } else { + LOG.error( + "Retrievable error trying to remove data node {}, state {}", + removedDataNodes, + state, + e); + if (getCycles() > RETRY_THRESHOLD) { + setFailure(new ProcedureException("State stuck at " + state)); + } + } + } + return Flow.HAS_MORE_STATE; + } + + private void submitChildRegionMigrate(ConfigNodeProcedureEnv env) { + regionMigrationPlans.forEach( + regionMigrationPlan -> { + TConsensusGroupId regionId = regionMigrationPlan.getRegionId(); + TDataNodeLocation removedDataNode = regionMigrationPlan.getFromDataNode(); + TDataNodeLocation destDataNode = + env.getRegionMaintainHandler().findDestDataNode(regionId); + // TODO: need to improve the coordinator selection method here, maybe through load + // balancing and other means. + final TDataNodeLocation coordinatorForAddPeer = + env.getRegionMaintainHandler() + .filterDataNodeWithOtherRegionReplica(regionId, destDataNode) + .orElse(removedDataNode); + final TDataNodeLocation coordinatorForRemovePeer = destDataNode; + if (destDataNode != null) { + RegionMigrateProcedure regionMigrateProcedure = + new RegionMigrateProcedure( + regionId, + removedDataNode, + destDataNode, + coordinatorForAddPeer, + coordinatorForRemovePeer); + addChildProcedure(regionMigrateProcedure); + LOG.info( + "Submit RegionMigrateProcedure for regionId {}: removedDataNode={}, destDataNode={}, coordinatorForAddPeer={}, coordinatorForRemovePeer={}", + regionId, + simplifyTDataNodeLocation(removedDataNode), + simplifyTDataNodeLocation(destDataNode), + simplifyTDataNodeLocation(coordinatorForAddPeer), + simplifyTDataNodeLocation(coordinatorForRemovePeer)); + } else { + LOG.error( + "{}, Cannot find target DataNode to migrate the region: {}", + REMOVE_DATANODE_PROCESS, + regionId); + // TODO terminate all the uncompleted remove datanode process + } + }); + } + + private String simplifyTDataNodeLocation(TDataNodeLocation dataNodeLocation) { + return String.format( + "DataNode(id:%d, address:%s)", + dataNodeLocation.getDataNodeId(), dataNodeLocation.getInternalEndPoint().getIp()); + } + + private void checkRegionStatusAndStopDataNode(ConfigNodeProcedureEnv env) { + List replicaSets = + env.getConfigManager().getPartitionManager().getAllReplicaSets(); + List rollBackDataNodes = new ArrayList<>(); + List successDataNodes = new ArrayList<>(); + for (TDataNodeLocation dataNode : removedDataNodes) { + List migratedFailedRegions = + replicaSets.stream() + .filter( + replica -> + replica.getDataNodeLocations().stream() + .anyMatch(loc -> loc.getDataNodeId() == dataNode.dataNodeId)) + .map(TRegionReplicaSet::getRegionId) + .collect(Collectors.toList()); + if (!migratedFailedRegions.isEmpty()) { + LOG.warn( + "{}, Some regions are migrated failed in DataNode: {}, migratedFailedRegions: {}." + + "Regions that have been successfully migrated will not roll back, you can submit the RemoveDataNodes task again later.", + REMOVE_DATANODE_PROCESS, + dataNode, + migratedFailedRegions); + rollBackDataNodes.add(dataNode); + } else { + successDataNodes.add(dataNode); + } + } + if (!successDataNodes.isEmpty()) { + LOG.info( + "{}, DataNodes: {} all regions migrated successfully, start to stop them.", + REMOVE_DATANODE_PROCESS, + successDataNodes); + env.getRemoveDataNodeHandler().removeDataNodePersistence(successDataNodes); + env.getRemoveDataNodeHandler().stopDataNodes(successDataNodes); + } + if (!rollBackDataNodes.isEmpty()) { + LOG.info( + "{}, Start to roll back the DataNodes status: {}", + REMOVE_DATANODE_PROCESS, + rollBackDataNodes); + env.getRemoveDataNodeHandler().changeDataNodeStatus(rollBackDataNodes, nodeStatusMap); + env.getRemoveDataNodeHandler().broadcastDataNodeStatusChange(rollBackDataNodes); + LOG.info( + "{}, Roll back the DataNodes status successfully: {}", + REMOVE_DATANODE_PROCESS, + rollBackDataNodes); + } + } + + @Override + protected void rollbackState(ConfigNodeProcedureEnv env, RemoveDataNodeState state) + throws IOException, InterruptedException, ProcedureException {} + + @Override + protected boolean isRollbackSupported(RemoveDataNodeState state) { + return false; + } + + /** + * Used to keep procedure lock even when the procedure is yielded or suspended. + * + * @param env env + * @return true if hold the lock + */ + protected boolean holdLock(ConfigNodeProcedureEnv env) { + return true; + } + + @Override + protected RemoveDataNodeState getState(int stateId) { + return RemoveDataNodeState.values()[stateId]; + } + + @Override + protected int getStateId(RemoveDataNodeState removeDataNodeState) { + return removeDataNodeState.ordinal(); + } + + @Override + protected RemoveDataNodeState getInitialState() { + return RemoveDataNodeState.REGION_REPLICA_CHECK; + } + + @Override + public void serialize(DataOutputStream stream) throws IOException { + stream.writeShort(ProcedureType.REMOVE_DATA_NODE_PROCEDURE.getTypeCode()); + super.serialize(stream); + stream.writeInt(removedDataNodes.size()); + removedDataNodes.forEach( + dataNode -> ThriftCommonsSerDeUtils.serializeTDataNodeLocation(dataNode, stream)); + stream.writeInt(regionMigrationPlans.size()); + for (RegionMigrationPlan regionMigrationPlan : regionMigrationPlans) { + regionMigrationPlan.serialize(stream); + } + stream.writeInt(nodeStatusMap.size()); + for (Map.Entry entry : nodeStatusMap.entrySet()) { + stream.writeInt(entry.getKey()); + stream.writeByte(entry.getValue().ordinal()); + } + } + + @Override + public void deserialize(ByteBuffer byteBuffer) { + super.deserialize(byteBuffer); + try { + int removedDataNodeSize = byteBuffer.getInt(); + removedDataNodes = new ArrayList<>(removedDataNodeSize); + for (int i = 0; i < removedDataNodeSize; i++) { + removedDataNodes.add(ThriftCommonsSerDeUtils.deserializeTDataNodeLocation(byteBuffer)); + } + int regionMigrationPlanSize = byteBuffer.getInt(); + regionMigrationPlans = new ArrayList<>(regionMigrationPlanSize); + for (int i = 0; i < regionMigrationPlanSize; i++) { + regionMigrationPlans.add(RegionMigrationPlan.deserialize(byteBuffer)); + } + int nodeStatusMapSize = byteBuffer.getInt(); + nodeStatusMap = new HashMap<>(nodeStatusMapSize); + for (int i = 0; i < nodeStatusMapSize; i++) { + int dataNodeId = byteBuffer.getInt(); + NodeStatus nodeStatus = NodeStatus.values()[byteBuffer.get()]; + nodeStatusMap.put(dataNodeId, nodeStatus); + } + } catch (ThriftSerDeException e) { + LOG.error("Error in deserialize RemoveConfigNodeProcedure", e); + } + } + + @Override + public boolean equals(Object that) { + if (that instanceof RemoveDataNodesProcedure) { + RemoveDataNodesProcedure thatProc = (RemoveDataNodesProcedure) that; + return thatProc.getProcId() == this.getProcId() + && thatProc.getState() == this.getState() + && thatProc.removedDataNodes.equals(this.removedDataNodes) + && thatProc.regionMigrationPlans.equals(this.regionMigrationPlans); + } + return false; + } + + @Override + public int hashCode() { + return Objects.hash(this.removedDataNodes, this.regionMigrationPlans); + } + + public List getRemovedDataNodes() { + return removedDataNodes; + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/AbstractOperatePipeProcedureV2.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/AbstractOperatePipeProcedureV2.java index 325832a3869fc..07059d6a0d985 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/AbstractOperatePipeProcedureV2.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/AbstractOperatePipeProcedureV2.java @@ -19,13 +19,11 @@ package org.apache.iotdb.confignode.procedure.impl.pipe; -import org.apache.iotdb.commons.pipe.task.meta.PipeMeta; -import org.apache.iotdb.confignode.manager.pipe.metric.PipeProcedureMetrics; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta; +import org.apache.iotdb.confignode.manager.pipe.metric.overview.PipeProcedureMetrics; import org.apache.iotdb.confignode.persistence.pipe.PipeTaskInfo; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.apache.iotdb.confignode.procedure.impl.node.AbstractNodeProcedure; import org.apache.iotdb.confignode.procedure.impl.pipe.runtime.PipeMetaSyncProcedure; import org.apache.iotdb.confignode.procedure.state.ProcedureLockState; @@ -83,6 +81,11 @@ public abstract class AbstractOperatePipeProcedureV2 // Only used in rollback to reduce the number of network calls protected boolean isRollbackFromOperateOnDataNodesSuccessful = false; + // Only used in rollback to avoid executing rollbackFromValidateTask multiple times + // Pure in-memory object, not involved in snapshot serialization and deserialization. + // TODO: consider serializing this variable later + protected boolean isRollbackFromValidateTaskSuccessful = false; + // This variable should not be serialized into procedure store, // putting it here is just for convenience protected AtomicReference pipeTaskInfo; @@ -213,7 +216,7 @@ public abstract void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env) @Override protected Flow executeFromState(ConfigNodeProcedureEnv env, OperatePipeTaskState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws InterruptedException { if (pipeTaskInfo == null) { LOGGER.warn( "ProcedureId {}: Pipe lock is not acquired, executeFromState's execution will be skipped.", @@ -260,6 +263,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, OperatePipeTaskState getCycles() + 1, RETRY_THRESHOLD, e); + setNextState(getCurrentState()); // Wait 3s for next retry TimeUnit.MILLISECONDS.sleep(3000L); } else { @@ -275,6 +279,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, OperatePipeTaskState String.format( "ProcedureId %s: Fail to %s because %s", getProcId(), getOperation().name(), e.getMessage()))); + return Flow.NO_MORE_STATE; } } return Flow.HAS_MORE_STATE; @@ -298,10 +303,13 @@ protected void rollbackState(ConfigNodeProcedureEnv env, OperatePipeTaskState st switch (state) { case VALIDATE_TASK: - try { - rollbackFromValidateTask(env); - } catch (Exception e) { - LOGGER.warn("ProcedureId {}: Failed to rollback from validate task.", getProcId(), e); + if (!isRollbackFromValidateTaskSuccessful) { + try { + rollbackFromValidateTask(env); + isRollbackFromValidateTaskSuccessful = true; + } catch (Exception e) { + LOGGER.warn("ProcedureId {}: Failed to rollback from validate task.", getProcId(), e); + } } break; case CALCULATE_INFO_FOR_TASK: @@ -330,7 +338,7 @@ protected void rollbackState(ConfigNodeProcedureEnv env, OperatePipeTaskState st break; case OPERATE_ON_DATA_NODES: try { - // We have to make sure that rollbackFromOperateOnDataNodes is executed before + // We have to make sure that rollbackFromOperateOnDataNodes is executed after // rollbackFromWriteConfigNodeConsensus, because rollbackFromOperateOnDataNodes is // executed based on the consensus of config nodes that is written by // rollbackFromWriteConfigNodeConsensus @@ -414,12 +422,42 @@ public static Map pushPipeMetaToDataNodes( * @return Error messages for the given pipe after pushing pipe meta */ public static String parsePushPipeMetaExceptionForPipe( - String pipeName, Map respMap) { + final String pipeName, final Map respMap) { final StringBuilder exceptionMessageBuilder = new StringBuilder(); + final StringBuilder enoughMemoryMessageBuilder = new StringBuilder(); - for (Map.Entry respEntry : respMap.entrySet()) { - int dataNodeId = respEntry.getKey(); - TPushPipeMetaResp resp = respEntry.getValue(); + for (final Map.Entry respEntry : respMap.entrySet()) { + final int dataNodeId = respEntry.getKey(); + final TPushPipeMetaResp resp = respEntry.getValue(); + + if (resp.getStatus().getCode() + == TSStatusCode.PIPE_PUSH_META_NOT_ENOUGH_MEMORY.getStatusCode()) { + exceptionMessageBuilder.append(String.format("DataNodeId: %s,", dataNodeId)); + resp.getExceptionMessages() + .forEach( + message -> { + // Ignore the timeStamp for simplicity + if (pipeName == null) { + enoughMemoryMessageBuilder.append( + String.format( + "PipeName: %s, Message: %s", + message.getPipeName(), message.getMessage())); + } else if (pipeName.equals(message.getPipeName())) { + enoughMemoryMessageBuilder.append( + String.format("Message: %s", message.getMessage())); + } + }); + enoughMemoryMessageBuilder.append("."); + continue; + } + + if (resp.getStatus().getCode() == TSStatusCode.PIPE_PUSH_META_TIMEOUT.getStatusCode()) { + exceptionMessageBuilder.append( + String.format( + "DataNodeId: %s, Message: Timeout to wait for lock while processing pushPipeMeta on dataNodes.", + dataNodeId)); + continue; + } if (resp.getStatus().getCode() == TSStatusCode.PIPE_PUSH_META_ERROR.getStatusCode()) { if (!resp.isSetExceptionMessages()) { @@ -430,7 +468,7 @@ public static String parsePushPipeMetaExceptionForPipe( continue; } - AtomicBoolean hasException = new AtomicBoolean(false); + final AtomicBoolean hasException = new AtomicBoolean(false); resp.getExceptionMessages() .forEach( @@ -456,6 +494,12 @@ public static String parsePushPipeMetaExceptionForPipe( } } } + + final String enoughMemoryMessage = enoughMemoryMessageBuilder.toString(); + if (!enoughMemoryMessage.isEmpty()) { + throw new PipeException(enoughMemoryMessage); + } + return exceptionMessageBuilder.toString(); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/plugin/CreatePipePluginProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/plugin/CreatePipePluginProcedure.java index fd48507989034..e3a4719a71925 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/plugin/CreatePipePluginProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/plugin/CreatePipePluginProcedure.java @@ -20,7 +20,7 @@ package org.apache.iotdb.confignode.procedure.impl.pipe.plugin; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.pipe.plugin.meta.PipePluginMeta; +import org.apache.iotdb.commons.pipe.agent.plugin.meta.PipePluginMeta; import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.confignode.consensus.request.write.pipe.plugin.CreatePipePluginPlan; import org.apache.iotdb.confignode.consensus.request.write.pipe.plugin.DropPipePluginPlan; @@ -28,12 +28,10 @@ import org.apache.iotdb.confignode.manager.pipe.coordinator.plugin.PipePluginCoordinator; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.apache.iotdb.confignode.procedure.impl.node.AbstractNodeProcedure; import org.apache.iotdb.confignode.procedure.impl.node.AddConfigNodeProcedure; import org.apache.iotdb.confignode.procedure.impl.node.RemoveConfigNodeProcedure; -import org.apache.iotdb.confignode.procedure.impl.node.RemoveDataNodeProcedure; +import org.apache.iotdb.confignode.procedure.impl.node.RemoveDataNodesProcedure; import org.apache.iotdb.confignode.procedure.state.pipe.plugin.CreatePipePluginState; import org.apache.iotdb.confignode.procedure.store.ProcedureType; import org.apache.iotdb.consensus.exception.ConsensusException; @@ -54,7 +52,7 @@ /** * This class extends {@link AbstractNodeProcedure} to make sure that when a {@link * CreatePipePluginProcedure} is executed, the {@link AddConfigNodeProcedure}, {@link - * RemoveConfigNodeProcedure} or {@link RemoveDataNodeProcedure} will not be executed at the same + * RemoveConfigNodeProcedure} or {@link RemoveDataNodesProcedure} will not be executed at the same * time. */ public class CreatePipePluginProcedure extends AbstractNodeProcedure { @@ -85,7 +83,7 @@ public CreatePipePluginProcedure( @Override protected Flow executeFromState(ConfigNodeProcedureEnv env, CreatePipePluginState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws InterruptedException { if (pipePluginMeta == null) { return Flow.NO_MORE_STATE; } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/plugin/DropPipePluginProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/plugin/DropPipePluginProcedure.java index dc9d4ce4f8724..665a3782a91d3 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/plugin/DropPipePluginProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/plugin/DropPipePluginProcedure.java @@ -22,15 +22,15 @@ import org.apache.iotdb.confignode.consensus.request.write.pipe.plugin.DropPipePluginPlan; import org.apache.iotdb.confignode.manager.pipe.coordinator.plugin.PipePluginCoordinator; import org.apache.iotdb.confignode.manager.pipe.coordinator.task.PipeTaskCoordinator; +import org.apache.iotdb.confignode.manager.subscription.SubscriptionCoordinator; import org.apache.iotdb.confignode.persistence.pipe.PipeTaskInfo; +import org.apache.iotdb.confignode.persistence.subscription.SubscriptionInfo; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.apache.iotdb.confignode.procedure.impl.node.AbstractNodeProcedure; import org.apache.iotdb.confignode.procedure.impl.node.AddConfigNodeProcedure; import org.apache.iotdb.confignode.procedure.impl.node.RemoveConfigNodeProcedure; -import org.apache.iotdb.confignode.procedure.impl.node.RemoveDataNodeProcedure; +import org.apache.iotdb.confignode.procedure.impl.node.RemoveDataNodesProcedure; import org.apache.iotdb.confignode.procedure.state.pipe.plugin.DropPipePluginState; import org.apache.iotdb.confignode.procedure.store.ProcedureType; import org.apache.iotdb.consensus.exception.ConsensusException; @@ -51,7 +51,7 @@ /** * This class extends {@link AbstractNodeProcedure} to make sure that when a {@link * DropPipePluginProcedure} is executed, the {@link AddConfigNodeProcedure}, {@link - * RemoveConfigNodeProcedure} or {@link RemoveDataNodeProcedure} will not be executed at the same + * RemoveConfigNodeProcedure} or {@link RemoveDataNodesProcedure} will not be executed at the same * time. */ public class DropPipePluginProcedure extends AbstractNodeProcedure { @@ -80,7 +80,7 @@ public DropPipePluginProcedure(String pluginName, boolean isSetIfExistsCondition @Override protected Flow executeFromState(ConfigNodeProcedureEnv env, DropPipePluginState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws InterruptedException { if (pluginName == null) { return Flow.NO_MORE_STATE; } @@ -119,9 +119,12 @@ private Flow executeFromLock(ConfigNodeProcedureEnv env) { env.getConfigManager().getPipeManager().getPipeTaskCoordinator(); final PipePluginCoordinator pipePluginCoordinator = env.getConfigManager().getPipeManager().getPipePluginCoordinator(); + final SubscriptionCoordinator subscriptionCoordinator = + env.getConfigManager().getSubscriptionManager().getSubscriptionCoordinator(); final AtomicReference pipeTaskInfo = pipeTaskCoordinator.lock(); pipePluginCoordinator.lock(); + SubscriptionInfo subscriptionInfo = subscriptionCoordinator.getSubscriptionInfo(); try { if (pipePluginCoordinator @@ -137,6 +140,7 @@ private Flow executeFromLock(ConfigNodeProcedureEnv env) { } pipeTaskInfo.get().validatePipePluginUsageByPipe(pluginName); + subscriptionInfo.validatePipePluginUsageByTopic(pluginName); } catch (PipeException e) { // if the pipe plugin is a built-in plugin, we should not drop it LOGGER.warn(e.getMessage()); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/runtime/PipeHandleMetaChangeProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/runtime/PipeHandleMetaChangeProcedure.java index e91a6ba974ddc..401859f0a7e0a 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/runtime/PipeHandleMetaChangeProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/runtime/PipeHandleMetaChangeProcedure.java @@ -20,7 +20,7 @@ package org.apache.iotdb.confignode.procedure.impl.pipe.runtime; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.pipe.task.meta.PipeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta; import org.apache.iotdb.confignode.consensus.request.write.pipe.runtime.PipeHandleMetaChangePlan; import org.apache.iotdb.confignode.persistence.pipe.PipeTaskInfo; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/runtime/PipeMetaSyncProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/runtime/PipeMetaSyncProcedure.java index c4907d1f9e7e6..c8a734a9e35a0 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/runtime/PipeMetaSyncProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/runtime/PipeMetaSyncProcedure.java @@ -20,8 +20,8 @@ package org.apache.iotdb.confignode.procedure.impl.pipe.runtime; import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta; import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.pipe.task.meta.PipeMeta; import org.apache.iotdb.confignode.consensus.request.write.pipe.runtime.PipeHandleMetaChangePlan; import org.apache.iotdb.confignode.persistence.pipe.PipeTaskInfo; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; @@ -130,7 +130,8 @@ public void executeFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) { } @Override - public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env) throws IOException { + public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env) + throws PipeException, IOException { LOGGER.info("PipeMetaSyncProcedure: executeFromOperateOnDataNodes"); Map respMap = pushPipeMetaToDataNodes(env); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/AlterPipeProcedureV2.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/AlterPipeProcedureV2.java index 59a04f9a806e2..b11c74408a603 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/AlterPipeProcedureV2.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/AlterPipeProcedureV2.java @@ -20,11 +20,11 @@ package org.apache.iotdb.confignode.procedure.impl.pipe.task; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.pipe.task.meta.PipeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeRuntimeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeStaticMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeStatus; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeRuntimeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStaticMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStatus; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; import org.apache.iotdb.commons.schema.SchemaConstant; import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; @@ -63,7 +63,7 @@ public class AlterPipeProcedureV2 extends AbstractOperatePipeProcedureV2 { private PipeRuntimeMeta currentPipeRuntimeMeta; private PipeRuntimeMeta updatedPipeRuntimeMeta; - private ProcedureType procedureType; + private final ProcedureType procedureType; public AlterPipeProcedureV2(ProcedureType procedureType) { super(); @@ -144,12 +144,13 @@ public void executeFromCalculateInfoForTask(ConfigNodeProcedureEnv env) { .forEach( (regionGroupId, regionLeaderNodeId) -> { final String databaseName = - env.getConfigManager().getPartitionManager().getRegionStorageGroup(regionGroupId); + env.getConfigManager().getPartitionManager().getRegionDatabase(regionGroupId); final PipeTaskMeta currentPipeTaskMeta = currentConsensusGroupId2PipeTaskMeta.get(regionGroupId.getId()); if (databaseName != null && !databaseName.equals(SchemaConstant.SYSTEM_DATABASE) && !databaseName.startsWith(SchemaConstant.SYSTEM_DATABASE + ".") + && currentPipeTaskMeta != null && currentPipeTaskMeta.getLeaderNodeId() == regionLeaderNodeId) { // Pipe only collect user's data, filter metric database here. updatedConsensusGroupIdToTaskMetaMap.put( diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/CreatePipeProcedureV2.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/CreatePipeProcedureV2.java index cd581d013329e..26e9eb9bba145 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/CreatePipeProcedureV2.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/CreatePipeProcedureV2.java @@ -25,11 +25,11 @@ import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex; import org.apache.iotdb.commons.consensus.index.impl.RecoverProgressIndex; import org.apache.iotdb.commons.consensus.index.impl.SimpleProgressIndex; -import org.apache.iotdb.commons.pipe.task.meta.PipeRuntimeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeStaticMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeStatus; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeType; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeRuntimeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStaticMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStatus; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeType; import org.apache.iotdb.commons.schema.SchemaConstant; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; import org.apache.iotdb.confignode.consensus.request.write.pipe.task.CreatePipePlanV2; @@ -60,8 +60,8 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicReference; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_CONSENSUS_GROUP_ID_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_CONSENSUS_SENDER_DATANODE_ID_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_CONSENSUS_GROUP_ID_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_CONSENSUS_SENDER_DATANODE_ID_KEY; public class CreatePipeProcedureV2 extends AbstractOperatePipeProcedureV2 { @@ -180,9 +180,7 @@ public void executeFromCalculateInfoForTask(ConfigNodeProcedureEnv env) { .forEach( (regionGroupId, regionLeaderNodeId) -> { final String databaseName = - env.getConfigManager() - .getPartitionManager() - .getRegionStorageGroup(regionGroupId); + env.getConfigManager().getPartitionManager().getRegionDatabase(regionGroupId); if (databaseName != null && !databaseName.equals(SchemaConstant.SYSTEM_DATABASE) && !databaseName.startsWith(SchemaConstant.SYSTEM_DATABASE + ".")) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/StartPipeProcedureV2.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/StartPipeProcedureV2.java index 58254dc1ab008..fe36137b35f47 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/StartPipeProcedureV2.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/StartPipeProcedureV2.java @@ -20,7 +20,7 @@ package org.apache.iotdb.confignode.procedure.impl.pipe.task; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.pipe.task.meta.PipeStatus; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStatus; import org.apache.iotdb.confignode.consensus.request.write.pipe.task.SetPipeStatusPlanV2; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.impl.pipe.AbstractOperatePipeProcedureV2; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/StopPipeProcedureV2.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/StopPipeProcedureV2.java index a817e16c7aee2..5349cc65640d0 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/StopPipeProcedureV2.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/StopPipeProcedureV2.java @@ -20,7 +20,7 @@ package org.apache.iotdb.confignode.procedure.impl.pipe.task; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.pipe.task.meta.PipeStatus; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStatus; import org.apache.iotdb.confignode.consensus.request.write.pipe.task.SetPipeStatusPlanV2; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.impl.pipe.AbstractOperatePipeProcedureV2; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/AddRegionPeerProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/AddRegionPeerProcedure.java index d06f40d333127..307fcc8f99a97 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/AddRegionPeerProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/AddRegionPeerProcedure.java @@ -29,9 +29,6 @@ import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.env.RegionMaintainHandler; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; -import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure; import org.apache.iotdb.confignode.procedure.state.AddRegionPeerState; import org.apache.iotdb.confignode.procedure.store.ProcedureType; import org.apache.iotdb.db.utils.DateTimeUtils; @@ -50,17 +47,16 @@ import java.util.stream.Collectors; import static org.apache.iotdb.commons.utils.KillPoint.KillPoint.setKillPoint; +import static org.apache.iotdb.confignode.procedure.env.RegionMaintainHandler.simplifiedLocation; import static org.apache.iotdb.confignode.procedure.state.AddRegionPeerState.UPDATE_REGION_LOCATION_CACHE; import static org.apache.iotdb.rpc.TSStatusCode.SUCCESS_STATUS; -public class AddRegionPeerProcedure - extends StateMachineProcedure { +public class AddRegionPeerProcedure extends RegionOperationProcedure { private static final Logger LOGGER = LoggerFactory.getLogger(AddRegionPeerProcedure.class); - private TConsensusGroupId consensusGroupId; private TDataNodeLocation coordinator; - private TDataNodeLocation destDataNode; + private TDataNodeLocation targetDataNode; public AddRegionPeerProcedure() { super(); @@ -69,17 +65,16 @@ public AddRegionPeerProcedure() { public AddRegionPeerProcedure( TConsensusGroupId consensusGroupId, TDataNodeLocation coordinator, - TDataNodeLocation destDataNode) { - super(); - this.consensusGroupId = consensusGroupId; + TDataNodeLocation targetDataNode) { + super(consensusGroupId); this.coordinator = coordinator; - this.destDataNode = destDataNode; + this.targetDataNode = targetDataNode; } @Override protected Flow executeFromState(ConfigNodeProcedureEnv env, AddRegionPeerState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { - if (consensusGroupId == null) { + throws InterruptedException { + if (regionId == null) { return Flow.NO_MORE_STATE; } RegionMaintainHandler handler = env.getRegionMaintainHandler(); @@ -88,13 +83,13 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, AddRegionPeerState s switch (state) { case CREATE_NEW_REGION_PEER: LOGGER.info( - "[pid{}][AddRegion] started, region {} will be added to DataNode {}.", + "[pid{}][AddRegion] started, {} will be added to DataNode {}.", getProcId(), - consensusGroupId.getId(), - destDataNode.getDataNodeId()); - handler.addRegionLocation(consensusGroupId, destDataNode); - handler.forceUpdateRegionCache(consensusGroupId, destDataNode, RegionStatus.Adding); - TSStatus status = handler.createNewRegionPeer(consensusGroupId, destDataNode); + regionId, + simplifiedLocation(targetDataNode)); + handler.addRegionLocation(regionId, targetDataNode); + handler.forceUpdateRegionCache(regionId, targetDataNode, RegionStatus.Adding); + TSStatus status = handler.createNewRegionPeer(regionId, targetDataNode); setKillPoint(state); if (status.getCode() != SUCCESS_STATUS.getStatusCode()) { return warnAndRollBackAndNoMoreState(env, handler, "CREATE_NEW_REGION_PEER fail"); @@ -102,12 +97,12 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, AddRegionPeerState s setNextState(AddRegionPeerState.DO_ADD_REGION_PEER); break; case DO_ADD_REGION_PEER: - handler.forceUpdateRegionCache(consensusGroupId, destDataNode, RegionStatus.Adding); + handler.forceUpdateRegionCache(regionId, targetDataNode, RegionStatus.Adding); // We don't want to re-submit AddRegionPeerTask when leader change or ConfigNode reboot if (!this.isStateDeserialized()) { TSStatus tsStatus = handler.submitAddRegionPeerTask( - this.getProcId(), destDataNode, consensusGroupId, coordinator); + this.getProcId(), targetDataNode, regionId, coordinator); setKillPoint(state); if (tsStatus.getCode() != SUCCESS_STATUS.getStatusCode()) { return warnAndRollBackAndNoMoreState( @@ -123,8 +118,9 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, AddRegionPeerState s return warnAndRollBackAndNoMoreState( env, handler, String.format("%s result is %s", state, result.getTaskStatus())); case PROCESSING: - // should never happen - return warnAndRollBackAndNoMoreState(env, handler, "should never return PROCESSING"); + LOGGER.info( + "waitTaskFinish() returns PROCESSING, which means the waiting has been interrupted, this procedure will end without rollback"); + return Flow.NO_MORE_STATE; case SUCCESS: setNextState(UPDATE_REGION_LOCATION_CACHE); break outerSwitch; @@ -133,14 +129,14 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, AddRegionPeerState s env, handler, String.format("status %s is unsupported", result.getTaskStatus())); } case UPDATE_REGION_LOCATION_CACHE: - handler.forceUpdateRegionCache(consensusGroupId, destDataNode, RegionStatus.Running); + handler.forceUpdateRegionCache(regionId, targetDataNode, RegionStatus.Running); setKillPoint(state); LOGGER.info("[pid{}][AddRegion] state {} complete", getProcId(), state); LOGGER.info( - "[pid{}][AddRegion] success, region {} has been added to DataNode {}. Procedure took {} (start at {}).", + "[pid{}][AddRegion] success, {} has been added to DataNode {}. Procedure took {} (start at {}).", getProcId(), - consensusGroupId.getId(), - destDataNode.getDataNodeId(), + regionId, + simplifiedLocation(targetDataNode), CommonDateTimeUtils.convertMillisecondToDurationStr( System.currentTimeMillis() - getSubmittedTime()), DateTimeUtils.convertLongToDate(getSubmittedTime(), "ms")); @@ -170,18 +166,18 @@ private Flow warnAndRollBackAndNoMoreState( } else { LOGGER.warn("[pid{}][AddRegion] Start to roll back, because: {}", getProcId(), reason); } - handler.removeRegionLocation(consensusGroupId, destDataNode); + handler.removeRegionLocation(regionId, targetDataNode); List correctDataNodeLocations = env.getConfigManager().getPartitionManager().getAllReplicaSets().stream() - .filter(tRegionReplicaSet -> tRegionReplicaSet.getRegionId().equals(consensusGroupId)) + .filter(tRegionReplicaSet -> tRegionReplicaSet.getRegionId().equals(regionId)) .findAny() .orElseThrow( () -> new ProcedureException( "[pid{}][AddRegion] Cannot roll back, because cannot find the correct locations")) .getDataNodeLocations(); - if (correctDataNodeLocations.remove(destDataNode)) { + if (correctDataNodeLocations.remove(targetDataNode)) { LOGGER.warn( "[pid{}][AddRegion] It appears that consensus write has not modified the local partition table. " + "Please verify whether a leader change has occurred during this stage. " @@ -194,7 +190,7 @@ private Flow warnAndRollBackAndNoMoreState( .collect(Collectors.toList()) .toString(); List relatedDataNodeLocations = new ArrayList<>(correctDataNodeLocations); - relatedDataNodeLocations.add(destDataNode); + relatedDataNodeLocations.add(targetDataNode); Map relatedDataNodeLocationMap = relatedDataNodeLocations.stream() .collect( @@ -203,15 +199,14 @@ private Flow warnAndRollBackAndNoMoreState( LOGGER.info( "[pid{}][AddRegion] reset peer list: peer list of consensus group {} on DataNode {} will be reset to {}", getProcId(), - consensusGroupId, + regionId, relatedDataNodeLocationMap.values().stream() .map(TDataNodeLocation::getDataNodeId) .collect(Collectors.toList()), correctStr); Map resultMap = - handler.resetPeerList( - consensusGroupId, correctDataNodeLocations, relatedDataNodeLocationMap); + handler.resetPeerList(regionId, correctDataNodeLocations, relatedDataNodeLocationMap); resultMap.forEach( (dataNodeId, resetResult) -> { @@ -219,7 +214,7 @@ private Flow warnAndRollBackAndNoMoreState( LOGGER.info( "[pid{}][AddRegion] reset peer list: peer list of consensus group {} on DataNode {} has been successfully reset to {}", getProcId(), - consensusGroupId, + regionId, dataNodeId, correctStr); } else { @@ -227,7 +222,7 @@ private Flow warnAndRollBackAndNoMoreState( LOGGER.warn( "[pid{}][AddRegion] reset peer list: peer list of consensus group {} on DataNode {} failed to reset to {}, you may manually reset it", getProcId(), - consensusGroupId, + regionId, dataNodeId, correctStr); } @@ -259,8 +254,8 @@ protected AddRegionPeerState getInitialState() { public void serialize(DataOutputStream stream) throws IOException { stream.writeShort(ProcedureType.ADD_REGION_PEER_PROCEDURE.getTypeCode()); super.serialize(stream); - ThriftCommonsSerDeUtils.serializeTConsensusGroupId(consensusGroupId, stream); - ThriftCommonsSerDeUtils.serializeTDataNodeLocation(destDataNode, stream); + ThriftCommonsSerDeUtils.serializeTConsensusGroupId(regionId, stream); + ThriftCommonsSerDeUtils.serializeTDataNodeLocation(targetDataNode, stream); ThriftCommonsSerDeUtils.serializeTDataNodeLocation(coordinator, stream); } @@ -268,18 +263,14 @@ public void serialize(DataOutputStream stream) throws IOException { public void deserialize(ByteBuffer byteBuffer) { super.deserialize(byteBuffer); try { - consensusGroupId = ThriftCommonsSerDeUtils.deserializeTConsensusGroupId(byteBuffer); - destDataNode = ThriftCommonsSerDeUtils.deserializeTDataNodeLocation(byteBuffer); + regionId = ThriftCommonsSerDeUtils.deserializeTConsensusGroupId(byteBuffer); + targetDataNode = ThriftCommonsSerDeUtils.deserializeTDataNodeLocation(byteBuffer); coordinator = ThriftCommonsSerDeUtils.deserializeTDataNodeLocation(byteBuffer); } catch (ThriftSerDeException e) { LOGGER.error("Error in deserialize {}", this.getClass(), e); } } - public TConsensusGroupId getConsensusGroupId() { - return consensusGroupId; - } - public TDataNodeLocation getCoordinator() { return coordinator; } @@ -290,13 +281,25 @@ public boolean equals(Object obj) { return false; } AddRegionPeerProcedure procedure = (AddRegionPeerProcedure) obj; - return this.consensusGroupId.equals(procedure.consensusGroupId) - && this.destDataNode.equals(procedure.destDataNode) + return this.regionId.equals(procedure.regionId) + && this.targetDataNode.equals(procedure.targetDataNode) && this.coordinator.equals(procedure.coordinator); } @Override public int hashCode() { - return Objects.hash(consensusGroupId, destDataNode, coordinator); + return Objects.hash(regionId, targetDataNode, coordinator); + } + + @Override + public String toString() { + return "AddRegionPeerProcedure{" + + "regionId=" + + regionId + + ", coordinator=" + + simplifiedLocation(coordinator) + + ", targetDataNode=" + + simplifiedLocation(targetDataNode) + + '}'; } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/CreateRegionGroupsProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/CreateRegionGroupsProcedure.java index e573a35c2312e..e5c2302b5b394 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/CreateRegionGroupsProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/CreateRegionGroupsProcedure.java @@ -26,6 +26,8 @@ import org.apache.iotdb.commons.cluster.RegionStatus; import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils; +import org.apache.iotdb.confignode.conf.ConfigNodeConfig; +import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; import org.apache.iotdb.confignode.consensus.request.write.region.CreateRegionGroupsPlan; import org.apache.iotdb.confignode.consensus.request.write.region.OfferRegionMaintainTasksPlan; import org.apache.iotdb.confignode.manager.load.cache.region.RegionHeartbeatSample; @@ -60,6 +62,7 @@ public class CreateRegionGroupsProcedure private CreateRegionGroupsPlan createRegionGroupsPlan = new CreateRegionGroupsPlan(); private CreateRegionGroupsPlan persistPlan = new CreateRegionGroupsPlan(); + private static final ConfigNodeConfig CONF = ConfigNodeDescriptor.getInstance().getConf(); /** key: TConsensusGroupId value: Failed RegionReplicas */ private Map failedRegionReplicaSets = new HashMap<>(); @@ -115,8 +118,13 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, CreateRegionGroupsSt TRegionReplicaSet failedRegionReplicas = failedRegionReplicaSets.get(regionReplicaSet.getRegionId()); - if (failedRegionReplicas.getDataNodeLocationsSize() - <= (regionReplicaSet.getDataNodeLocationsSize() - 1) / 2) { + boolean canProvideService = + canRegionGroupProvideService( + regionReplicaSet.getDataNodeLocationsSize(), + failedRegionReplicas.getDataNodeLocationsSize(), + failedRegionReplicas.getRegionId()); + + if (canProvideService) { // A RegionGroup can provide service as long as there are more than // half of the RegionReplicas created successfully persistPlan.addRegionGroup(database, regionReplicaSet); @@ -179,9 +187,15 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, CreateRegionGroupsSt regionReplicaSet -> { TRegionReplicaSet failedRegionReplicas = failedRegionReplicaSets.get(regionReplicaSet.getRegionId()); - if (failedRegionReplicas == null - || failedRegionReplicas.getDataNodeLocationsSize() - <= (regionReplicaSet.getDataNodeLocationsSize() - 1) / 2) { + + boolean canProvideService = + failedRegionReplicas == null + || canRegionGroupProvideService( + regionReplicaSet.getDataNodeLocationsSize(), + failedRegionReplicas.getDataNodeLocationsSize(), + failedRegionReplicas.getRegionId()); + + if (canProvideService) { Set failedDataNodeIds = failedRegionReplicas == null ? new TreeSet<>() @@ -310,4 +324,15 @@ public int hashCode() { return Objects.hash( consensusGroupType, createRegionGroupsPlan, persistPlan, failedRegionReplicaSets); } + + public boolean canRegionGroupProvideService( + int regionGroupNodeNumber, int failedNodeNumber, TConsensusGroupId regionId) { + boolean isStrongConsistency = CONF.isConsensusGroupStrongConsistency(regionId); + int successNodeNumber = regionGroupNodeNumber - failedNodeNumber; + if (isStrongConsistency) { + return successNodeNumber > (regionGroupNodeNumber / 2); + } else { + return successNodeNumber >= 1; + } + } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/NotifyRegionMigrationProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/NotifyRegionMigrationProcedure.java new file mode 100644 index 0000000000000..a02f60c04761f --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/NotifyRegionMigrationProcedure.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.procedure.impl.region; + +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.commons.exception.runtime.ThriftSerDeException; +import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils; +import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; +import org.apache.iotdb.confignode.procedure.exception.ProcedureException; +import org.apache.iotdb.confignode.procedure.state.NotifyRegionMigrationState; +import org.apache.iotdb.confignode.procedure.store.ProcedureType; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Objects; + +/** A procedure that notifies all DNs of the ongoing region migration procedure. */ +public class NotifyRegionMigrationProcedure + extends RegionOperationProcedure { + private static final Logger LOGGER = + LoggerFactory.getLogger(NotifyRegionMigrationProcedure.class); + + private boolean isStart; + + public NotifyRegionMigrationProcedure() { + super(); + } + + public NotifyRegionMigrationProcedure(TConsensusGroupId consensusGroupId, boolean isStart) { + super(consensusGroupId); + this.isStart = isStart; + } + + @Override + protected Flow executeFromState(ConfigNodeProcedureEnv env, NotifyRegionMigrationState state) + throws InterruptedException { + if (regionId == null) { + return Flow.NO_MORE_STATE; + } + try { + LOGGER.info( + "[pid{}][NotifyRegionMigration] started, region id is {}.", getProcId(), regionId); + env.notifyRegionMigrationToAllDataNodes(regionId, isStart); + } catch (Exception e) { + LOGGER.error("[pid{}][NotifyRegionMigration] state {} failed", getProcId(), state, e); + return Flow.NO_MORE_STATE; + } + LOGGER.info("[pid{}][NotifyRegionMigration] state {} complete", getProcId(), state); + return Flow.NO_MORE_STATE; + } + + @Override + protected void rollbackState( + ConfigNodeProcedureEnv configNodeProcedureEnv, NotifyRegionMigrationState state) + throws IOException, InterruptedException, ProcedureException {} + + @Override + protected NotifyRegionMigrationState getState(int stateId) { + return NotifyRegionMigrationState.values()[stateId]; + } + + @Override + protected int getStateId(NotifyRegionMigrationState state) { + return state.ordinal(); + } + + @Override + protected NotifyRegionMigrationState getInitialState() { + return NotifyRegionMigrationState.INIT; + } + + @Override + public void serialize(DataOutputStream stream) throws IOException { + stream.writeShort(ProcedureType.NOTIFY_REGION_MIGRATION_PROCEDURE.getTypeCode()); + super.serialize(stream); + ThriftCommonsSerDeUtils.serializeTConsensusGroupId(regionId, stream); + stream.writeBoolean(isStart); + } + + @Override + public void deserialize(ByteBuffer byteBuffer) { + super.deserialize(byteBuffer); + try { + regionId = ThriftCommonsSerDeUtils.deserializeTConsensusGroupId(byteBuffer); + isStart = (byteBuffer.get() != (byte) 0); + } catch (ThriftSerDeException e) { + LOGGER.error("Error in deserialize {}", this.getClass(), e); + } + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof NotifyRegionMigrationProcedure)) { + return false; + } + NotifyRegionMigrationProcedure procedure = (NotifyRegionMigrationProcedure) obj; + return this.regionId.equals(procedure.regionId) && this.isStart == procedure.isStart; + } + + @Override + public int hashCode() { + return Objects.hash(regionId, isStart); + } + + @Override + public String toString() { + return "NotifyRegionMigrationProcedure{" + + "regionId=" + + regionId + + ", isStart=" + + isStart + + '}'; + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/ReconstructRegionProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/ReconstructRegionProcedure.java new file mode 100644 index 0000000000000..230a40098e378 --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/ReconstructRegionProcedure.java @@ -0,0 +1,174 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.procedure.impl.region; + +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; +import org.apache.iotdb.commons.exception.runtime.ThriftSerDeException; +import org.apache.iotdb.commons.utils.CommonDateTimeUtils; +import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils; +import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; +import org.apache.iotdb.confignode.procedure.exception.ProcedureException; +import org.apache.iotdb.confignode.procedure.state.ReconstructRegionState; +import org.apache.iotdb.confignode.procedure.store.ProcedureType; +import org.apache.iotdb.db.utils.DateTimeUtils; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; + +public class ReconstructRegionProcedure extends RegionOperationProcedure { + private static final Logger LOGGER = LoggerFactory.getLogger(ReconstructRegionProcedure.class); + + private TDataNodeLocation targetDataNode; + private TDataNodeLocation coordinator; + + public ReconstructRegionProcedure() {} + ; + + public ReconstructRegionProcedure( + TConsensusGroupId regionId, TDataNodeLocation targetDataNode, TDataNodeLocation coordinator) { + super(regionId); + this.targetDataNode = targetDataNode; + this.coordinator = coordinator; + } + + @Override + protected Flow executeFromState(ConfigNodeProcedureEnv env, ReconstructRegionState state) + throws InterruptedException { + try { + switch (state) { + case RECONSTRUCT_REGION_PREPARE: + LOGGER.info( + "[pid{}][ReconstructRegion] started, region {} on DataNode {}({}) will be reconstructed.", + getProcId(), + regionId.getId(), + targetDataNode.getDataNodeId(), + targetDataNode.getInternalEndPoint()); + setNextState(ReconstructRegionState.REMOVE_REGION_PEER); + break; + case REMOVE_REGION_PEER: + addChildProcedure(new RemoveRegionPeerProcedure(regionId, coordinator, targetDataNode)); + setNextState(ReconstructRegionState.CHECK_REMOVE_REGION_PEER); + break; + case CHECK_REMOVE_REGION_PEER: + if (env.getConfigManager() + .getPartitionManager() + .isDataNodeContainsRegion(targetDataNode.getDataNodeId(), regionId)) { + LOGGER.warn( + "[pid{}][ReconstructRegion] sub-procedure RemoveRegionPeerProcedure failed, ReconstructRegionProcedure will not continue", + getProcId()); + return Flow.NO_MORE_STATE; + } + setNextState(ReconstructRegionState.ADD_REGION_PEER); + break; + case ADD_REGION_PEER: + addChildProcedure(new AddRegionPeerProcedure(regionId, coordinator, targetDataNode)); + setNextState(ReconstructRegionState.CHECK_ADD_REGION_PEER); + break; + case CHECK_ADD_REGION_PEER: + if (!env.getConfigManager() + .getPartitionManager() + .isDataNodeContainsRegion(targetDataNode.getDataNodeId(), regionId)) { + LOGGER.warn( + "[pid{}][ReconstructRegion] failed, but the region {} has been removed from DataNode {}. Use 'extend region' to fix this.", + getProcId(), + regionId.getId(), + targetDataNode.getDataNodeId()); + } else { + LOGGER.info( + "[pid{}][ReconstructRegion] success, region {} has been reconstructed on DataNode {}. Procedure took {} (started at {})", + getProcId(), + regionId.getId(), + targetDataNode.getDataNodeId(), + CommonDateTimeUtils.convertMillisecondToDurationStr( + System.currentTimeMillis() - getSubmittedTime()), + DateTimeUtils.convertLongToDate(getSubmittedTime(), "ms")); + } + return Flow.NO_MORE_STATE; + default: + throw new ProcedureException("Unsupported state: " + state.name()); + } + } catch (Exception e) { + LOGGER.error("[pid{}][ReconstructRegion] state {} fail", getProcId(), state, e); + return Flow.NO_MORE_STATE; + } + LOGGER.info("[pid{}][ReconstructRegion] state {} complete", getProcId(), state); + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState( + ConfigNodeProcedureEnv configNodeProcedureEnv, ReconstructRegionState reconstructRegionState) + throws IOException, InterruptedException, ProcedureException {} + + @Override + public void serialize(DataOutputStream stream) throws IOException { + stream.writeShort(ProcedureType.RECONSTRUCT_REGION_PROCEDURE.getTypeCode()); + super.serialize(stream); + ThriftCommonsSerDeUtils.serializeTConsensusGroupId(regionId, stream); + ThriftCommonsSerDeUtils.serializeTDataNodeLocation(targetDataNode, stream); + ThriftCommonsSerDeUtils.serializeTDataNodeLocation(coordinator, stream); + } + + @Override + public void deserialize(ByteBuffer byteBuffer) { + super.deserialize(byteBuffer); + try { + regionId = ThriftCommonsSerDeUtils.deserializeTConsensusGroupId(byteBuffer); + targetDataNode = ThriftCommonsSerDeUtils.deserializeTDataNodeLocation(byteBuffer); + coordinator = ThriftCommonsSerDeUtils.deserializeTDataNodeLocation(byteBuffer); + } catch (ThriftSerDeException e) { + LOGGER.warn( + "Error in deserialize {} (procID {}). This procedure will be ignored. It may belong to old version and cannot be used now.", + this.getClass(), + this.getProcId(), + e); + throw e; + } + } + + @Override + protected ReconstructRegionState getState(int stateId) { + return ReconstructRegionState.values()[stateId]; + } + + @Override + protected int getStateId(ReconstructRegionState reconstructRegionState) { + return reconstructRegionState.ordinal(); + } + + @Override + protected ReconstructRegionState getInitialState() { + return ReconstructRegionState.RECONSTRUCT_REGION_PREPARE; + } + + @Override + public String toString() { + return super.toString() + + ", targetDataNode=" + + targetDataNode.getDataNodeId() + + ", coordinator=" + + coordinator.getDataNodeId(); + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/RegionMigrateProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/RegionMigrateProcedure.java index 62045592d5ac6..3d70f04f76d46 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/RegionMigrateProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/RegionMigrateProcedure.java @@ -27,8 +27,6 @@ import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.env.RegionMaintainHandler; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure; -import org.apache.iotdb.confignode.procedure.state.ProcedureLockState; import org.apache.iotdb.confignode.procedure.state.RegionTransitionState; import org.apache.iotdb.confignode.procedure.store.ProcedureType; import org.apache.iotdb.db.utils.DateTimeUtils; @@ -42,15 +40,13 @@ import java.util.Objects; /** Region migrate procedure */ -public class RegionMigrateProcedure - extends StateMachineProcedure { +public class RegionMigrateProcedure extends RegionOperationProcedure { private static final Logger LOGGER = LoggerFactory.getLogger(RegionMigrateProcedure.class); /** Wait region migrate finished */ - private TConsensusGroupId consensusGroupId; - private TDataNodeLocation originalDataNode; + private TDataNodeLocation destDataNode; private TDataNodeLocation coordinatorForAddPeer; private TDataNodeLocation coordinatorForRemovePeer; @@ -65,8 +61,7 @@ public RegionMigrateProcedure( TDataNodeLocation destDataNode, TDataNodeLocation coordinatorForAddPeer, TDataNodeLocation coordinatorForRemovePeer) { - super(); - this.consensusGroupId = consensusGroupId; + super(consensusGroupId); this.originalDataNode = originalDataNode; this.destDataNode = destDataNode; this.coordinatorForAddPeer = coordinatorForAddPeer; @@ -75,7 +70,7 @@ public RegionMigrateProcedure( @Override protected Flow executeFromState(ConfigNodeProcedureEnv env, RegionTransitionState state) { - if (consensusGroupId == null) { + if (regionId == null) { return Flow.NO_MORE_STATE; } RegionMaintainHandler handler = env.getRegionMaintainHandler(); @@ -83,24 +78,25 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RegionTransitionStat switch (state) { case REGION_MIGRATE_PREPARE: LOGGER.info( - "[pid{}][MigrateRegion] started, region {} will be migrated from DataNode {} to {}.", + "[pid{}][MigrateRegion] started, {} will be migrated from DataNode {} to {}.", getProcId(), - consensusGroupId.getId(), - originalDataNode.getDataNodeId(), - destDataNode.getDataNodeId()); + regionId, + RegionMaintainHandler.simplifiedLocation(originalDataNode), + RegionMaintainHandler.simplifiedLocation(destDataNode)); + addChildProcedure(new NotifyRegionMigrationProcedure(regionId, true)); setNextState(RegionTransitionState.ADD_REGION_PEER); break; case ADD_REGION_PEER: addChildProcedure( - new AddRegionPeerProcedure(consensusGroupId, coordinatorForAddPeer, destDataNode)); + new AddRegionPeerProcedure(regionId, coordinatorForAddPeer, destDataNode)); setNextState(RegionTransitionState.CHECK_ADD_REGION_PEER); break; case CHECK_ADD_REGION_PEER: if (!env.getConfigManager() .getPartitionManager() - .isDataNodeContainsRegion(destDataNode.getDataNodeId(), consensusGroupId)) { + .isDataNodeContainsRegion(destDataNode.getDataNodeId(), regionId)) { LOGGER.warn( - "[pid{}][MigrateRegion] sub-procedure AddRegionPeerProcedure fail, RegionMigrateProcedure will not continue", + "[pid{}][MigrateRegion] sub-procedure AddRegionPeerProcedure failed, RegionMigrateProcedure will not continue", getProcId()); return Flow.NO_MORE_STATE; } @@ -108,28 +104,28 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RegionTransitionStat break; case REMOVE_REGION_PEER: addChildProcedure( - new RemoveRegionPeerProcedure( - consensusGroupId, coordinatorForRemovePeer, originalDataNode)); + new RemoveRegionPeerProcedure(regionId, coordinatorForRemovePeer, originalDataNode)); setNextState(RegionTransitionState.CHECK_REMOVE_REGION_PEER); break; case CHECK_REMOVE_REGION_PEER: + String cleanHint = ""; if (env.getConfigManager() .getPartitionManager() - .isDataNodeContainsRegion(originalDataNode.getDataNodeId(), consensusGroupId)) { - LOGGER.warn( - "[pid{}][MigrateRegion] success, but you may need to manually clean the old region to make everything works fine", - getProcId()); - } else { - LOGGER.info( - "[pid{}][MigrateRegion] success, region {} has been migrated from DataNode {} to {}. Procedure took {} (started at {})", - getProcId(), - consensusGroupId.getId(), - originalDataNode.getDataNodeId(), - destDataNode.getDataNodeId(), - CommonDateTimeUtils.convertMillisecondToDurationStr( - System.currentTimeMillis() - getSubmittedTime()), - DateTimeUtils.convertLongToDate(getSubmittedTime(), "ms")); + .isDataNodeContainsRegion(originalDataNode.getDataNodeId(), regionId)) { + cleanHint = + "but you may need to restart the related DataNode to make sure everything is cleaned up. "; } + LOGGER.info( + "[pid{}][MigrateRegion] success,{} {} has been migrated from DataNode {} to {}. Procedure took {} (started at {}).", + getProcId(), + cleanHint, + regionId, + RegionMaintainHandler.simplifiedLocation(originalDataNode), + RegionMaintainHandler.simplifiedLocation(destDataNode), + CommonDateTimeUtils.convertMillisecondToDurationStr( + System.currentTimeMillis() - getSubmittedTime()), + DateTimeUtils.convertLongToDate(getSubmittedTime(), "ms")); + addChildProcedure(new NotifyRegionMigrationProcedure(regionId, false)); return Flow.NO_MORE_STATE; default: throw new ProcedureException("Unsupported state: " + state.name()); @@ -147,38 +143,6 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RegionTransitionStat protected void rollbackState(ConfigNodeProcedureEnv env, RegionTransitionState state) throws IOException, InterruptedException, ProcedureException {} - @Override - protected ProcedureLockState acquireLock(ConfigNodeProcedureEnv configNodeProcedureEnv) { - configNodeProcedureEnv.getSchedulerLock().lock(); - try { - if (configNodeProcedureEnv.getRegionMigrateLock().tryLock(this)) { - LOGGER.info("procedureId {} acquire lock.", getProcId()); - return ProcedureLockState.LOCK_ACQUIRED; - } - configNodeProcedureEnv.getRegionMigrateLock().waitProcedure(this); - - LOGGER.info("procedureId {} wait for lock.", getProcId()); - return ProcedureLockState.LOCK_EVENT_WAIT; - } finally { - configNodeProcedureEnv.getSchedulerLock().unlock(); - } - } - - @Override - protected void releaseLock(ConfigNodeProcedureEnv configNodeProcedureEnv) { - configNodeProcedureEnv.getSchedulerLock().lock(); - try { - LOGGER.info("procedureId {} release lock.", getProcId()); - if (configNodeProcedureEnv.getRegionMigrateLock().releaseLock(this)) { - configNodeProcedureEnv - .getRegionMigrateLock() - .wakeWaitingProcedures(configNodeProcedureEnv.getScheduler()); - } - } finally { - configNodeProcedureEnv.getSchedulerLock().unlock(); - } - } - @Override protected RegionTransitionState getState(int stateId) { return RegionTransitionState.values()[stateId]; @@ -200,7 +164,7 @@ public void serialize(DataOutputStream stream) throws IOException { super.serialize(stream); ThriftCommonsSerDeUtils.serializeTDataNodeLocation(originalDataNode, stream); ThriftCommonsSerDeUtils.serializeTDataNodeLocation(destDataNode, stream); - ThriftCommonsSerDeUtils.serializeTConsensusGroupId(consensusGroupId, stream); + ThriftCommonsSerDeUtils.serializeTConsensusGroupId(regionId, stream); ThriftCommonsSerDeUtils.serializeTDataNodeLocation(coordinatorForAddPeer, stream); ThriftCommonsSerDeUtils.serializeTDataNodeLocation(coordinatorForRemovePeer, stream); } @@ -211,15 +175,16 @@ public void deserialize(ByteBuffer byteBuffer) { try { originalDataNode = ThriftCommonsSerDeUtils.deserializeTDataNodeLocation(byteBuffer); destDataNode = ThriftCommonsSerDeUtils.deserializeTDataNodeLocation(byteBuffer); - consensusGroupId = ThriftCommonsSerDeUtils.deserializeTConsensusGroupId(byteBuffer); + regionId = ThriftCommonsSerDeUtils.deserializeTConsensusGroupId(byteBuffer); coordinatorForAddPeer = ThriftCommonsSerDeUtils.deserializeTDataNodeLocation(byteBuffer); coordinatorForRemovePeer = ThriftCommonsSerDeUtils.deserializeTDataNodeLocation(byteBuffer); } catch (ThriftSerDeException e) { - LOGGER.error( - "Error in deserialize {} (procID {}), this procedure may belong to old version and already cannot be used.", + LOGGER.warn( + "Error in deserialize {} (procID {}). This procedure will be ignored. It may belong to old version and cannot be used now.", this.getClass(), this.getProcId(), e); + throw e; } } @@ -231,17 +196,17 @@ public boolean equals(Object that) { && thatProc.getState() == this.getState() && thatProc.originalDataNode.equals(this.originalDataNode) && thatProc.destDataNode.equals(this.destDataNode) - && thatProc.consensusGroupId.equals(this.consensusGroupId); + && thatProc.regionId.equals(this.regionId); } return false; } @Override public int hashCode() { - return Objects.hash(this.originalDataNode, this.destDataNode, this.consensusGroupId); + return Objects.hash(this.originalDataNode, this.destDataNode, this.regionId); } - public TConsensusGroupId getConsensusGroupId() { - return consensusGroupId; + public TDataNodeLocation getDestDataNode() { + return destDataNode; } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/RegionMigrationPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/RegionMigrationPlan.java new file mode 100644 index 0000000000000..9e90a00e84a1d --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/RegionMigrationPlan.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.procedure.impl.region; + +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; +import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Objects; + +public class RegionMigrationPlan { + private TConsensusGroupId regionId; + private TDataNodeLocation fromDataNode; + private TDataNodeLocation toDataNode; + + public RegionMigrationPlan(TConsensusGroupId regionId, TDataNodeLocation fromDataNode) { + this.regionId = regionId; + this.fromDataNode = fromDataNode; + // default value is fromDataNode, which means no migration + this.toDataNode = fromDataNode; + } + + public static RegionMigrationPlan create( + TConsensusGroupId regionId, TDataNodeLocation fromDataNode) { + return new RegionMigrationPlan(regionId, fromDataNode); + } + + public TConsensusGroupId getRegionId() { + return regionId; + } + + public TDataNodeLocation getFromDataNode() { + return fromDataNode; + } + + public TDataNodeLocation getToDataNode() { + return toDataNode; + } + + public void setToDataNode(TDataNodeLocation toDataNode) { + this.toDataNode = toDataNode; + } + + public void serialize(DataOutputStream stream) throws IOException { + ThriftCommonsSerDeUtils.serializeTConsensusGroupId(regionId, stream); + ThriftCommonsSerDeUtils.serializeTDataNodeLocation(fromDataNode, stream); + ThriftCommonsSerDeUtils.serializeTDataNodeLocation(toDataNode, stream); + } + + public static RegionMigrationPlan deserialize(ByteBuffer byteBuffer) { + TConsensusGroupId regionId = ThriftCommonsSerDeUtils.deserializeTConsensusGroupId(byteBuffer); + TDataNodeLocation fromDataNode = + ThriftCommonsSerDeUtils.deserializeTDataNodeLocation(byteBuffer); + RegionMigrationPlan plan = RegionMigrationPlan.create(regionId, fromDataNode); + plan.setToDataNode(ThriftCommonsSerDeUtils.deserializeTDataNodeLocation(byteBuffer)); + return plan; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + RegionMigrationPlan that = (RegionMigrationPlan) obj; + return regionId.equals(that.regionId) + && fromDataNode.equals(that.fromDataNode) + && toDataNode.equals(that.toDataNode); + } + + @Override + public int hashCode() { + return Objects.hash(regionId, fromDataNode, toDataNode); + } + + @Override + public String toString() { + return "RegionMigrationPlan{" + + "regionId=" + + regionId + + ", fromDataNode=" + + fromDataNode + + ", toDataNode=" + + toDataNode + + '}'; + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/RegionOperationProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/RegionOperationProcedure.java new file mode 100644 index 0000000000000..ad0cb38ac82db --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/RegionOperationProcedure.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.procedure.impl.region; + +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; +import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure; + +public abstract class RegionOperationProcedure + extends StateMachineProcedure { + TConsensusGroupId regionId; + + public RegionOperationProcedure() {} + + public RegionOperationProcedure(TConsensusGroupId regionId) { + this.regionId = regionId; + } + + public void setRegionId(TConsensusGroupId regionId) { + this.regionId = regionId; + } + + public TConsensusGroupId getRegionId() { + return regionId; + } + + @Override + public String toString() { + return super.toString() + ", regionId=" + regionId; + } +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/RemoveRegionPeerProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/RemoveRegionPeerProcedure.java index 166714e90fcdc..f362a7a100800 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/RemoveRegionPeerProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/RemoveRegionPeerProcedure.java @@ -26,14 +26,10 @@ import org.apache.iotdb.commons.cluster.RegionStatus; import org.apache.iotdb.commons.exception.runtime.ThriftSerDeException; import org.apache.iotdb.commons.utils.CommonDateTimeUtils; -import org.apache.iotdb.commons.utils.KillPoint.KillPoint; import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.env.RegionMaintainHandler; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; -import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure; import org.apache.iotdb.confignode.procedure.state.RemoveRegionPeerState; import org.apache.iotdb.confignode.procedure.store.ProcedureType; import org.apache.iotdb.db.utils.DateTimeUtils; @@ -53,10 +49,8 @@ import static org.apache.iotdb.confignode.procedure.state.RemoveRegionPeerState.REMOVE_REGION_PEER; import static org.apache.iotdb.rpc.TSStatusCode.SUCCESS_STATUS; -public class RemoveRegionPeerProcedure - extends StateMachineProcedure { +public class RemoveRegionPeerProcedure extends RegionOperationProcedure { private static final Logger LOGGER = LoggerFactory.getLogger(RemoveRegionPeerProcedure.class); - private TConsensusGroupId consensusGroupId; private TDataNodeLocation coordinator; private TDataNodeLocation targetDataNode; @@ -68,16 +62,26 @@ public RemoveRegionPeerProcedure( TConsensusGroupId consensusGroupId, TDataNodeLocation coordinator, TDataNodeLocation targetDataNode) { - super(); - this.consensusGroupId = consensusGroupId; + super(consensusGroupId); this.coordinator = coordinator; this.targetDataNode = targetDataNode; } + private void handleTransferLeader(RegionMaintainHandler handler) + throws ProcedureException, InterruptedException { + LOGGER.info( + "[pid{}][RemoveRegion] started, region {} will be removed from DataNode {}.", + getProcId(), + regionId.getId(), + targetDataNode.getDataNodeId()); + handler.forceUpdateRegionCache(regionId, targetDataNode, RegionStatus.Removing); + handler.transferRegionLeader(regionId, targetDataNode, coordinator); + } + @Override protected Flow executeFromState(ConfigNodeProcedureEnv env, RemoveRegionPeerState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { - if (consensusGroupId == null) { + throws InterruptedException { + if (regionId == null) { return Flow.NO_MORE_STATE; } TSStatus tsStatus; @@ -85,27 +89,23 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RemoveRegionPeerStat try { switch (state) { case TRANSFER_REGION_LEADER: - LOGGER.info( - "[pid{}][RemoveRegion] started, region {} will be removed from DataNode {}.", - getProcId(), - consensusGroupId.getId(), - targetDataNode.getDataNodeId()); - handler.forceUpdateRegionCache(consensusGroupId, targetDataNode, RegionStatus.Removing); - handler.transferRegionLeader(consensusGroupId, targetDataNode); - KillPoint.setKillPoint(state); + handleTransferLeader(handler); + setKillPoint(state); setNextState(REMOVE_REGION_PEER); break; case REMOVE_REGION_PEER: - handler.forceUpdateRegionCache(consensusGroupId, targetDataNode, RegionStatus.Removing); + handler.forceUpdateRegionCache(regionId, targetDataNode, RegionStatus.Removing); tsStatus = handler.submitRemoveRegionPeerTask( - this.getProcId(), targetDataNode, consensusGroupId, coordinator); + this.getProcId(), targetDataNode, regionId, coordinator); setKillPoint(state); if (tsStatus.getCode() != SUCCESS_STATUS.getStatusCode()) { LOGGER.warn( - "[pid{}][RemoveRegion] {} task submitted failed, procedure will continue. You should manually clear peer list.", + "[pid{}][RemoveRegion] {} task submitted failed, ConfigNode believe current peer list of {} is {}. Procedure will continue. You should manually clear peer list.", getProcId(), - state); + state, + regionId, + handler.getRegionReplicaSetString(regionId)); setNextState(DELETE_OLD_REGION_PEER); return Flow.HAS_MORE_STATE; } @@ -113,24 +113,26 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RemoveRegionPeerStat handler.waitTaskFinish(this.getProcId(), coordinator); if (removeRegionPeerResult.getTaskStatus() != TRegionMaintainTaskStatus.SUCCESS) { LOGGER.warn( - "[pid{}][RemoveRegion] {} executed failed, procedure will continue. You should manually clear peer list.", + "[pid{}][RemoveRegion] {} executed failed, ConfigNode believe current peer list of {} is {}. Procedure will continue. You should manually clear peer list.", getProcId(), - state); + state, + regionId, + handler.getRegionReplicaSetString(regionId)); setNextState(DELETE_OLD_REGION_PEER); return Flow.HAS_MORE_STATE; } setNextState(DELETE_OLD_REGION_PEER); break; case DELETE_OLD_REGION_PEER: - handler.forceUpdateRegionCache(consensusGroupId, targetDataNode, RegionStatus.Removing); + handler.forceUpdateRegionCache(regionId, targetDataNode, RegionStatus.Removing); tsStatus = - handler.submitDeleteOldRegionPeerTask( - this.getProcId(), targetDataNode, consensusGroupId); + handler.submitDeleteOldRegionPeerTask(this.getProcId(), targetDataNode, regionId); setKillPoint(state); if (tsStatus.getCode() != SUCCESS_STATUS.getStatusCode()) { LOGGER.warn( - "[pid{}][RemoveRegion] DELETE_OLD_REGION_PEER task submitted failed, procedure will continue. You should manually delete region file.", - getProcId()); + "[pid{}][RemoveRegion] DELETE_OLD_REGION_PEER task submitted failed, procedure will continue. You should manually delete region file. {}", + getProcId(), + regionId); setNextState(REMOVE_REGION_LOCATION_CACHE); return Flow.HAS_MORE_STATE; } @@ -138,21 +140,22 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RemoveRegionPeerStat handler.waitTaskFinish(this.getProcId(), targetDataNode); if (deleteOldRegionPeerResult.getTaskStatus() != TRegionMaintainTaskStatus.SUCCESS) { LOGGER.warn( - "[pid{}][RemoveRegion] DELETE_OLD_REGION_PEER executed failed, procedure will continue. You should manually delete region file.", - getProcId()); + "[pid{}][RemoveRegion] DELETE_OLD_REGION_PEER executed failed, procedure will continue. You should manually delete region file. {}", + getProcId(), + regionId); setNextState(REMOVE_REGION_LOCATION_CACHE); return Flow.HAS_MORE_STATE; } setNextState(REMOVE_REGION_LOCATION_CACHE); break; case REMOVE_REGION_LOCATION_CACHE: - handler.removeRegionLocation(consensusGroupId, targetDataNode); + handler.removeRegionLocation(regionId, targetDataNode); setKillPoint(state); LOGGER.info("RemoveRegionPeer state {} success", state); LOGGER.info( "[pid{}][RemoveRegion] success, region {} has been removed from DataNode {}. Procedure took {} (started at {})", getProcId(), - consensusGroupId.getId(), + regionId.getId(), targetDataNode.getDataNodeId(), CommonDateTimeUtils.convertMillisecondToDurationStr( System.currentTimeMillis() - getSubmittedTime()), @@ -192,7 +195,7 @@ protected RemoveRegionPeerState getInitialState() { public void serialize(DataOutputStream stream) throws IOException { stream.writeShort(ProcedureType.REMOVE_REGION_PEER_PROCEDURE.getTypeCode()); super.serialize(stream); - ThriftCommonsSerDeUtils.serializeTConsensusGroupId(consensusGroupId, stream); + ThriftCommonsSerDeUtils.serializeTConsensusGroupId(regionId, stream); ThriftCommonsSerDeUtils.serializeTDataNodeLocation(targetDataNode, stream); ThriftCommonsSerDeUtils.serializeTDataNodeLocation(coordinator, stream); } @@ -201,7 +204,7 @@ public void serialize(DataOutputStream stream) throws IOException { public void deserialize(ByteBuffer byteBuffer) { super.deserialize(byteBuffer); try { - consensusGroupId = ThriftCommonsSerDeUtils.deserializeTConsensusGroupId(byteBuffer); + regionId = ThriftCommonsSerDeUtils.deserializeTConsensusGroupId(byteBuffer); targetDataNode = ThriftCommonsSerDeUtils.deserializeTDataNodeLocation(byteBuffer); coordinator = ThriftCommonsSerDeUtils.deserializeTDataNodeLocation(byteBuffer); } catch (ThriftSerDeException e) { @@ -209,10 +212,6 @@ public void deserialize(ByteBuffer byteBuffer) { } } - public TConsensusGroupId getConsensusGroupId() { - return consensusGroupId; - } - public TDataNodeLocation getCoordinator() { return coordinator; } @@ -227,13 +226,13 @@ public boolean equals(Object obj) { return false; } RemoveRegionPeerProcedure procedure = (RemoveRegionPeerProcedure) obj; - return this.consensusGroupId.equals(procedure.consensusGroupId) + return this.regionId.equals(procedure.regionId) && this.targetDataNode.equals(procedure.targetDataNode) && this.coordinator.equals(procedure.coordinator); } @Override public int hashCode() { - return Objects.hash(consensusGroupId, targetDataNode, coordinator); + return Objects.hash(regionId, targetDataNode, coordinator); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/AlterLogicalViewProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/AlterLogicalViewProcedure.java index 865c1d55ad6af..a0537f1417583 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/AlterLogicalViewProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/AlterLogicalViewProcedure.java @@ -30,13 +30,11 @@ import org.apache.iotdb.commons.path.PathDeserializeUtil; import org.apache.iotdb.commons.path.PathPatternTree; import org.apache.iotdb.commons.schema.view.viewExpression.ViewExpression; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure; import org.apache.iotdb.confignode.procedure.state.schema.AlterLogicalViewState; import org.apache.iotdb.confignode.procedure.store.ProcedureType; @@ -74,14 +72,14 @@ public class AlterLogicalViewProcedure private transient PathPatternTree pathPatternTree; private transient ByteBuffer patternTreeBytes; - public AlterLogicalViewProcedure(boolean isGeneratedByPipe) { + public AlterLogicalViewProcedure(final boolean isGeneratedByPipe) { super(isGeneratedByPipe); } public AlterLogicalViewProcedure( - String queryId, - Map viewPathToSourceMap, - boolean isGeneratedByPipe) { + final String queryId, + final Map viewPathToSourceMap, + final boolean isGeneratedByPipe) { super(isGeneratedByPipe); this.queryId = queryId; this.viewPathToSourceMap = viewPathToSourceMap; @@ -89,9 +87,10 @@ public AlterLogicalViewProcedure( } @Override - protected Flow executeFromState(ConfigNodeProcedureEnv env, AlterLogicalViewState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { - long startTime = System.currentTimeMillis(); + protected Flow executeFromState( + final ConfigNodeProcedureEnv env, final AlterLogicalViewState state) + throws InterruptedException { + final long startTime = System.currentTimeMillis(); try { switch (state) { case CLEAN_DATANODE_SCHEMA_CACHE: @@ -103,7 +102,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, AlterLogicalViewStat LOGGER.info("Alter view {}", viewPathToSourceMap.keySet()); try { alterLogicalView(env); - } catch (ProcedureException e) { + } catch (final ProcedureException e) { setFailure(e); } return Flow.NO_MORE_STATE; @@ -117,17 +116,17 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, AlterLogicalViewStat } } - private void invalidateCache(ConfigNodeProcedureEnv env) { - Map dataNodeLocationMap = + private void invalidateCache(final ConfigNodeProcedureEnv env) { + final Map dataNodeLocationMap = env.getConfigManager().getNodeManager().getRegisteredDataNodeLocations(); - DataNodeAsyncRequestContext clientHandler = + final DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.INVALIDATE_MATCHED_SCHEMA_CACHE, + CnToDnAsyncRequestType.INVALIDATE_MATCHED_SCHEMA_CACHE, new TInvalidateMatchedSchemaCacheReq(patternTreeBytes), dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); - Map statusMap = clientHandler.getResponseMap(); - for (TSStatus status : statusMap.values()) { + final Map statusMap = clientHandler.getResponseMap(); + for (final TSStatus status : statusMap.values()) { // all dataNodes must clear the related schemaengine cache if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { LOGGER.error( @@ -140,22 +139,22 @@ private void invalidateCache(ConfigNodeProcedureEnv env) { } } - private void alterLogicalView(ConfigNodeProcedureEnv env) throws ProcedureException { - Map targetSchemaRegionGroup = + private void alterLogicalView(final ConfigNodeProcedureEnv env) throws ProcedureException { + final Map targetSchemaRegionGroup = env.getConfigManager().getRelatedSchemaRegionGroup(pathPatternTree); - Map> schemaRegionRequestMap = + final Map> schemaRegionRequestMap = new HashMap<>(); - for (Map.Entry entry : viewPathToSourceMap.entrySet()) { + for (final Map.Entry entry : viewPathToSourceMap.entrySet()) { schemaRegionRequestMap .computeIfAbsent(getBelongedSchemaRegion(env, entry.getKey()), k -> new HashMap<>()) .put(entry.getKey(), entry.getValue()); } - AlterLogicalViewRegionTaskExecutor regionTaskExecutor = + final AlterLogicalViewRegionTaskExecutor regionTaskExecutor = new AlterLogicalViewRegionTaskExecutor<>( "Alter view", env, targetSchemaRegionGroup, - CnToDnRequestType.ALTER_VIEW, + CnToDnAsyncRequestType.ALTER_VIEW, (dataNodeLocation, consensusGroupIdList) -> { TAlterViewReq req = new TAlterViewReq().setIsGeneratedByPipe(isGeneratedByPipe); req.setSchemaRegionIdList(consensusGroupIdList); @@ -187,16 +186,16 @@ private void alterLogicalView(ConfigNodeProcedureEnv env) throws ProcedureExcept } private TConsensusGroupId getBelongedSchemaRegion( - ConfigNodeProcedureEnv env, PartialPath viewPath) throws ProcedureException { - PathPatternTree patternTree = new PathPatternTree(); + final ConfigNodeProcedureEnv env, final PartialPath viewPath) throws ProcedureException { + final PathPatternTree patternTree = new PathPatternTree(); patternTree.appendFullPath(viewPath); patternTree.constructTree(); - Map> schemaPartitionTable = + final Map> schemaPartitionTable = env.getConfigManager().getSchemaPartition(patternTree).schemaPartitionTable; if (schemaPartitionTable.isEmpty()) { throw new ProcedureException(new ViewNotExistException(viewPath.getFullPath())); } else { - Map slotMap = + final Map slotMap = schemaPartitionTable.values().iterator().next(); if (slotMap.isEmpty()) { throw new ProcedureException(new ViewNotExistException(viewPath.getFullPath())); @@ -207,24 +206,26 @@ private TConsensusGroupId getBelongedSchemaRegion( } @Override - protected boolean isRollbackSupported(AlterLogicalViewState alterLogicalViewState) { + protected boolean isRollbackSupported(final AlterLogicalViewState alterLogicalViewState) { return true; } @Override protected void rollbackState( - ConfigNodeProcedureEnv env, AlterLogicalViewState alterLogicalViewState) + final ConfigNodeProcedureEnv env, final AlterLogicalViewState alterLogicalViewState) throws IOException, InterruptedException, ProcedureException { - invalidateCache(env); + if (alterLogicalViewState == AlterLogicalViewState.CLEAN_DATANODE_SCHEMA_CACHE) { + invalidateCache(env); + } } @Override - protected AlterLogicalViewState getState(int stateId) { + protected AlterLogicalViewState getState(final int stateId) { return AlterLogicalViewState.values()[stateId]; } @Override - protected int getStateId(AlterLogicalViewState alterLogicalViewState) { + protected int getStateId(final AlterLogicalViewState alterLogicalViewState) { return alterLogicalViewState.ordinal(); } @@ -238,16 +239,16 @@ public String getQueryId() { } private void generatePathPatternTree() { - PathPatternTree patternTree = new PathPatternTree(); - for (PartialPath path : viewPathToSourceMap.keySet()) { + final PathPatternTree patternTree = new PathPatternTree(); + for (final PartialPath path : viewPathToSourceMap.keySet()) { patternTree.appendFullPath(path); } patternTree.constructTree(); - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); + final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); + final DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); try { patternTree.serialize(dataOutputStream); - } catch (IOException ignored) { + } catch (final IOException ignored) { // won't reach here } @@ -256,7 +257,7 @@ private void generatePathPatternTree() { } @Override - public void serialize(DataOutputStream stream) throws IOException { + public void serialize(final DataOutputStream stream) throws IOException { stream.writeShort( isGeneratedByPipe ? ProcedureType.PIPE_ENRICHED_ALTER_LOGICAL_VIEW_PROCEDURE.getTypeCode() @@ -264,19 +265,19 @@ public void serialize(DataOutputStream stream) throws IOException { super.serialize(stream); ReadWriteIOUtils.write(queryId, stream); ReadWriteIOUtils.write(this.viewPathToSourceMap.size(), stream); - for (Map.Entry entry : viewPathToSourceMap.entrySet()) { + for (final Map.Entry entry : viewPathToSourceMap.entrySet()) { entry.getKey().serialize(stream); ViewExpression.serialize(entry.getValue(), stream); } } @Override - public void deserialize(ByteBuffer byteBuffer) { + public void deserialize(final ByteBuffer byteBuffer) { super.deserialize(byteBuffer); queryId = ReadWriteIOUtils.readString(byteBuffer); - Map viewPathToSourceMap = new HashMap<>(); - int size = byteBuffer.getInt(); + final Map viewPathToSourceMap = new HashMap<>(); + final int size = byteBuffer.getInt(); PartialPath path; ViewExpression viewExpression; for (int i = 0; i < size; i++) { @@ -289,10 +290,14 @@ public void deserialize(ByteBuffer byteBuffer) { } @Override - public boolean equals(Object o) { - if (this == o) return true; - if (!(o instanceof AlterLogicalViewProcedure)) return false; - AlterLogicalViewProcedure that = (AlterLogicalViewProcedure) o; + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (!(o instanceof AlterLogicalViewProcedure)) { + return false; + } + final AlterLogicalViewProcedure that = (AlterLogicalViewProcedure) o; return Objects.equals(getProcId(), that.getProcId()) && Objects.equals(getCurrentState(), that.getCurrentState()) && Objects.equals(getCycles(), that.getCycles()) @@ -320,27 +325,27 @@ private class AlterLogicalViewRegionTaskExecutor private final List failureStatusList = new ArrayList<>(); AlterLogicalViewRegionTaskExecutor( - String taskName, - ConfigNodeProcedureEnv env, - Map targetSchemaRegionGroup, - CnToDnRequestType dataNodeRequestType, - BiFunction, Q> dataNodeRequestGenerator) { + final String taskName, + final ConfigNodeProcedureEnv env, + final Map targetSchemaRegionGroup, + final CnToDnAsyncRequestType dataNodeRequestType, + final BiFunction, Q> dataNodeRequestGenerator) { super(env, targetSchemaRegionGroup, false, dataNodeRequestType, dataNodeRequestGenerator); this.taskName = taskName; } @Override protected List processResponseOfOneDataNode( - TDataNodeLocation dataNodeLocation, - List consensusGroupIdList, - TSStatus response) { - List failedRegionList = new ArrayList<>(); + final TDataNodeLocation dataNodeLocation, + final List consensusGroupIdList, + final TSStatus response) { + final List failedRegionList = new ArrayList<>(); if (response.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { return failedRegionList; } if (response.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) { - List subStatusList = response.getSubStatus(); + final List subStatusList = response.getSubStatus(); TSStatus subStatus; for (int i = 0; i < subStatusList.size(); i++) { subStatus = subStatusList.get(i); @@ -359,7 +364,7 @@ protected List processResponseOfOneDataNode( return failedRegionList; } - private void collectFailure(TSStatus failureStatus) { + private void collectFailure(final TSStatus failureStatus) { if (failureStatus.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) { failureStatusList.addAll(failureStatus.getSubStatus()); } else { @@ -379,7 +384,8 @@ private void collectFailure(TSStatus failureStatus) { @Override protected void onAllReplicasetFailure( - TConsensusGroupId consensusGroupId, Set dataNodeLocationSet) { + final TConsensusGroupId consensusGroupId, + final Set dataNodeLocationSet) { setFailure( new ProcedureException( new MetadataException( diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DataNodeRegionTaskExecutor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DataNodeRegionTaskExecutor.java index af15174daf715..64a68a59c495b 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DataNodeRegionTaskExecutor.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DataNodeRegionTaskExecutor.java @@ -22,7 +22,7 @@ import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; import org.apache.iotdb.confignode.manager.ConfigManager; @@ -45,7 +45,7 @@ public abstract class DataNodeRegionTaskExecutor { protected final Map targetRegionGroup; protected final boolean executeOnAllReplicaset; - protected final CnToDnRequestType dataNodeRequestType; + protected final CnToDnAsyncRequestType dataNodeRequestType; protected final BiFunction, Q> dataNodeRequestGenerator; @@ -55,7 +55,7 @@ protected DataNodeRegionTaskExecutor( ConfigManager configManager, Map targetRegionGroup, boolean executeOnAllReplicaset, - CnToDnRequestType dataNodeRequestType, + CnToDnAsyncRequestType dataNodeRequestType, BiFunction, Q> dataNodeRequestGenerator) { this.configManager = configManager; this.targetRegionGroup = targetRegionGroup; @@ -68,7 +68,7 @@ protected DataNodeRegionTaskExecutor( ConfigNodeProcedureEnv env, Map targetRegionGroup, boolean executeOnAllReplicaset, - CnToDnRequestType dataNodeRequestType, + CnToDnAsyncRequestType dataNodeRequestType, BiFunction, Q> dataNodeRequestGenerator) { this.configManager = env.getConfigManager(); this.targetRegionGroup = targetRegionGroup; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeactivateTemplateProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeactivateTemplateProcedure.java index dd3cb6b0073ed..e1a94cf0934d2 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeactivateTemplateProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeactivateTemplateProcedure.java @@ -28,15 +28,13 @@ import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PathDeserializeUtil; import org.apache.iotdb.commons.path.PathPatternTree; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeDeactivateTemplatePlan; import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeEnrichedPlan; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure; import org.apache.iotdb.confignode.procedure.state.schema.DeactivateTemplateState; import org.apache.iotdb.confignode.procedure.store.ProcedureType; @@ -97,7 +95,7 @@ public DeactivateTemplateProcedure( @Override protected Flow executeFromState(ConfigNodeProcedureEnv env, DeactivateTemplateState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws InterruptedException { long startTime = System.currentTimeMillis(); try { switch (state) { @@ -152,7 +150,7 @@ private long constructBlackList(ConfigNodeProcedureEnv env) { "construct schema black list", env, targetSchemaRegionGroup, - CnToDnRequestType.CONSTRUCT_SCHEMA_BLACK_LIST_WITH_TEMPLATE, + CnToDnAsyncRequestType.CONSTRUCT_SCHEMA_BLACK_LIST_WITH_TEMPLATE, ((dataNodeLocation, consensusGroupIdList) -> new TConstructSchemaBlackListWithTemplateReq( consensusGroupIdList, dataNodeRequest))) { @@ -200,7 +198,7 @@ private void invalidateCache(ConfigNodeProcedureEnv env) { env.getConfigManager().getNodeManager().getRegisteredDataNodeLocations(); DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.INVALIDATE_MATCHED_SCHEMA_CACHE, + CnToDnAsyncRequestType.INVALIDATE_MATCHED_SCHEMA_CACHE, new TInvalidateMatchedSchemaCacheReq(timeSeriesPatternTreeBytes), dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance() @@ -233,7 +231,7 @@ private void deleteData(ConfigNodeProcedureEnv env) { env, relatedDataRegionGroup, true, - CnToDnRequestType.DELETE_DATA_FOR_DELETE_SCHEMA, + CnToDnAsyncRequestType.DELETE_DATA_FOR_DELETE_SCHEMA, ((dataNodeLocation, consensusGroupIdList) -> new TDeleteDataForDeleteSchemaReq( new ArrayList<>(consensusGroupIdList), timeSeriesPatternTreeBytes))); @@ -248,7 +246,7 @@ private void deactivateTemplate(ConfigNodeProcedureEnv env) { "deactivate template schema", env, env.getConfigManager().getRelatedSchemaRegionGroup(timeSeriesPatternTree), - CnToDnRequestType.DEACTIVATE_TEMPLATE, + CnToDnAsyncRequestType.DEACTIVATE_TEMPLATE, ((dataNodeLocation, consensusGroupIdList) -> new TDeactivateTemplateReq(consensusGroupIdList, dataNodeRequest) .setIsGeneratedByPipe(isGeneratedByPipe))); @@ -286,7 +284,7 @@ protected void rollbackState( "roll back schema black list", env, env.getConfigManager().getRelatedSchemaRegionGroup(timeSeriesPatternTree), - CnToDnRequestType.ROLLBACK_SCHEMA_BLACK_LIST_WITH_TEMPLATE, + CnToDnAsyncRequestType.ROLLBACK_SCHEMA_BLACK_LIST_WITH_TEMPLATE, ((dataNodeLocation, consensusGroupIdList) -> new TRollbackSchemaBlackListWithTemplateReq( consensusGroupIdList, dataNodeRequest))); @@ -439,7 +437,7 @@ private class DeactivateTemplateRegionTaskExecutor String taskName, ConfigNodeProcedureEnv env, Map targetSchemaRegionGroup, - CnToDnRequestType dataNodeRequestType, + CnToDnAsyncRequestType dataNodeRequestType, BiFunction, Q> dataNodeRequestGenerator) { super(env, targetSchemaRegionGroup, false, dataNodeRequestType, dataNodeRequestGenerator); this.taskName = taskName; @@ -450,7 +448,7 @@ private class DeactivateTemplateRegionTaskExecutor ConfigNodeProcedureEnv env, Map targetDataRegionGroup, boolean executeOnAllReplicaset, - CnToDnRequestType dataNodeRequestType, + CnToDnAsyncRequestType dataNodeRequestType, BiFunction, Q> dataNodeRequestGenerator) { super( env, diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteDatabaseProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteDatabaseProcedure.java index d47e575a3ad6a..7288090e81559 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteDatabaseProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteDatabaseProcedure.java @@ -27,7 +27,7 @@ import org.apache.iotdb.commons.exception.runtime.ThriftSerDeException; import org.apache.iotdb.commons.service.metric.MetricService; import org.apache.iotdb.commons.utils.ThriftConfigNodeSerDeUtils; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; import org.apache.iotdb.confignode.consensus.request.write.database.PreDeleteDatabasePlan; @@ -36,8 +36,6 @@ import org.apache.iotdb.confignode.persistence.partition.maintainer.RegionDeleteTask; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure; import org.apache.iotdb.confignode.procedure.state.schema.DeleteStorageGroupState; import org.apache.iotdb.confignode.procedure.store.ProcedureType; @@ -83,8 +81,9 @@ public void setDeleteDatabaseSchema(TDatabaseSchema deleteDatabaseSchema) { } @Override - protected Flow executeFromState(ConfigNodeProcedureEnv env, DeleteStorageGroupState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + protected Flow executeFromState( + final ConfigNodeProcedureEnv env, final DeleteStorageGroupState state) + throws InterruptedException { if (deleteDatabaseSchema == null) { return Flow.NO_MORE_STATE; } @@ -146,17 +145,9 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, DeleteStorageGroupSt env.getConfigManager().getConsensusManager().write(dataRegionDeleteTaskOfferPlan); } - // Delete DatabasePartitionTable - final TSStatus deleteConfigResult = - env.deleteDatabaseConfig(deleteDatabaseSchema.getName(), isGeneratedByPipe); - - // Delete Database metrics - PartitionMetrics.unbindDatabaseRelatedMetricsWhenUpdate( - MetricService.getInstance(), deleteDatabaseSchema.getName()); - // try sync delete schemaengine region DataNodeAsyncRequestContext asyncClientHandler = - new DataNodeAsyncRequestContext<>(CnToDnRequestType.DELETE_REGION); + new DataNodeAsyncRequestContext<>(CnToDnAsyncRequestType.DELETE_REGION); Map schemaRegionDeleteTaskMap = new HashMap<>(); int requestIndex = 0; for (TRegionReplicaSet schemaRegionReplicaSet : schemaRegionReplicaSets) { @@ -200,13 +191,23 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, DeleteStorageGroupSt } } + env.getConfigManager() + .getLoadManager() + .clearDataPartitionPolicyTable(deleteDatabaseSchema.getName()); + LOG.info("data partition policy table cleared."); + + // Delete Database metrics + PartitionMetrics.unbindDatabaseRelatedMetricsWhenUpdate( + MetricService.getInstance(), deleteDatabaseSchema.getName()); + + // Delete DatabasePartitionTable + final TSStatus deleteConfigResult = + env.deleteDatabaseConfig(deleteDatabaseSchema.getName(), isGeneratedByPipe); + if (deleteConfigResult.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { LOG.info( "[DeleteDatabaseProcedure] Database: {} is deleted successfully", deleteDatabaseSchema.getName()); - env.getConfigManager() - .getLoadManager() - .clearDataPartitionPolicyTable(deleteDatabaseSchema.getName()); return Flow.NO_MORE_STATE; } else if (getCycles() > RETRY_THRESHOLD) { setFailure( diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteLogicalViewProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteLogicalViewProcedure.java index 0a83770689c8e..4d92454fd22b3 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteLogicalViewProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteLogicalViewProcedure.java @@ -26,15 +26,13 @@ import org.apache.iotdb.commons.exception.MetadataException; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PathPatternTree; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeDeleteLogicalViewPlan; import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeEnrichedPlan; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure; import org.apache.iotdb.confignode.procedure.state.schema.DeleteLogicalViewState; import org.apache.iotdb.confignode.procedure.store.ProcedureType; @@ -90,8 +88,9 @@ public DeleteLogicalViewProcedure( } @Override - protected Flow executeFromState(ConfigNodeProcedureEnv env, DeleteLogicalViewState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + protected Flow executeFromState( + final ConfigNodeProcedureEnv env, final DeleteLogicalViewState state) + throws InterruptedException { long startTime = System.currentTimeMillis(); try { switch (state) { @@ -142,7 +141,7 @@ private long constructBlackList(ConfigNodeProcedureEnv env) { "construct view schema engine black list", env, targetSchemaRegionGroup, - CnToDnRequestType.CONSTRUCT_VIEW_SCHEMA_BLACK_LIST, + CnToDnAsyncRequestType.CONSTRUCT_VIEW_SCHEMA_BLACK_LIST, ((dataNodeLocation, consensusGroupIdList) -> new TConstructViewSchemaBlackListReq(consensusGroupIdList, patternTreeBytes))) { @Override @@ -186,7 +185,7 @@ private void invalidateCache(ConfigNodeProcedureEnv env) { env.getConfigManager().getNodeManager().getRegisteredDataNodeLocations(); DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.INVALIDATE_MATCHED_SCHEMA_CACHE, + CnToDnAsyncRequestType.INVALIDATE_MATCHED_SCHEMA_CACHE, new TInvalidateMatchedSchemaCacheReq(patternTreeBytes), dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); @@ -211,7 +210,7 @@ private void deleteViewSchema(ConfigNodeProcedureEnv env) { "delete view in schema engine", env, env.getConfigManager().getRelatedSchemaRegionGroup(patternTree), - CnToDnRequestType.DELETE_VIEW, + CnToDnAsyncRequestType.DELETE_VIEW, ((dataNodeLocation, consensusGroupIdList) -> new TDeleteViewSchemaReq(consensusGroupIdList, patternTreeBytes) .setIsGeneratedByPipe(isGeneratedByPipe))); @@ -247,7 +246,7 @@ protected void rollbackState( "roll back view schema engine black list", env, env.getConfigManager().getRelatedSchemaRegionGroup(patternTree), - CnToDnRequestType.ROLLBACK_VIEW_SCHEMA_BLACK_LIST, + CnToDnAsyncRequestType.ROLLBACK_VIEW_SCHEMA_BLACK_LIST, (dataNodeLocation, consensusGroupIdList) -> new TRollbackViewSchemaBlackListReq(consensusGroupIdList, patternTreeBytes)); rollbackStateTask.execute(); @@ -343,7 +342,7 @@ private class DeleteLogicalViewRegionTaskExecutor String taskName, ConfigNodeProcedureEnv env, Map targetSchemaRegionGroup, - CnToDnRequestType dataNodeRequestType, + CnToDnAsyncRequestType dataNodeRequestType, BiFunction, Q> dataNodeRequestGenerator) { super(env, targetSchemaRegionGroup, false, dataNodeRequestType, dataNodeRequestGenerator); this.taskName = taskName; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteTimeSeriesProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteTimeSeriesProcedure.java index cd9133c0cf553..c7ad4c8802108 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteTimeSeriesProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteTimeSeriesProcedure.java @@ -26,15 +26,13 @@ import org.apache.iotdb.commons.exception.MetadataException; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PathPatternTree; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeDeleteTimeSeriesPlan; import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeEnrichedPlan; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure; import org.apache.iotdb.confignode.procedure.state.schema.DeleteTimeSeriesState; import org.apache.iotdb.confignode.procedure.store.ProcedureType; @@ -97,7 +95,7 @@ public DeleteTimeSeriesProcedure( @Override protected Flow executeFromState( final ConfigNodeProcedureEnv env, final DeleteTimeSeriesState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws InterruptedException { final long startTime = System.currentTimeMillis(); try { switch (state) { @@ -154,7 +152,7 @@ private long constructBlackList(final ConfigNodeProcedureEnv env) { "construct schema engine black list", env, targetSchemaRegionGroup, - CnToDnRequestType.CONSTRUCT_SCHEMA_BLACK_LIST, + CnToDnAsyncRequestType.CONSTRUCT_SCHEMA_BLACK_LIST, ((dataNodeLocation, consensusGroupIdList) -> new TConstructSchemaBlackListReq(consensusGroupIdList, patternTreeBytes))) { @Override @@ -200,7 +198,7 @@ private void invalidateCache(final ConfigNodeProcedureEnv env) { env.getConfigManager().getNodeManager().getRegisteredDataNodeLocations(); final DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.INVALIDATE_MATCHED_SCHEMA_CACHE, + CnToDnAsyncRequestType.INVALIDATE_MATCHED_SCHEMA_CACHE, new TInvalidateMatchedSchemaCacheReq(patternTreeBytes), dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); @@ -250,7 +248,7 @@ private void executeDeleteData( env, relatedDataRegionGroup, true, - CnToDnRequestType.DELETE_DATA_FOR_DELETE_SCHEMA, + CnToDnAsyncRequestType.DELETE_DATA_FOR_DELETE_SCHEMA, ((dataNodeLocation, consensusGroupIdList) -> new TDeleteDataForDeleteSchemaReq( new ArrayList<>(consensusGroupIdList), @@ -265,7 +263,7 @@ private void deleteTimeSeriesSchema(final ConfigNodeProcedureEnv env) { "delete time series in schema engine", env, env.getConfigManager().getRelatedSchemaRegionGroup(patternTree), - CnToDnRequestType.DELETE_TIMESERIES, + CnToDnAsyncRequestType.DELETE_TIMESERIES, ((dataNodeLocation, consensusGroupIdList) -> new TDeleteTimeSeriesReq(consensusGroupIdList, patternTreeBytes) .setIsGeneratedByPipe(isGeneratedByPipe))); @@ -302,7 +300,7 @@ protected void rollbackState( "roll back schema engine black list", env, env.getConfigManager().getRelatedSchemaRegionGroup(patternTree), - CnToDnRequestType.ROLLBACK_SCHEMA_BLACK_LIST, + CnToDnAsyncRequestType.ROLLBACK_SCHEMA_BLACK_LIST, (dataNodeLocation, consensusGroupIdList) -> new TRollbackSchemaBlackListReq(consensusGroupIdList, patternTreeBytes)); rollbackStateTask.execute(); @@ -403,7 +401,7 @@ private class DeleteTimeSeriesRegionTaskExecutor final String taskName, final ConfigNodeProcedureEnv env, final Map targetSchemaRegionGroup, - final CnToDnRequestType dataNodeRequestType, + final CnToDnAsyncRequestType dataNodeRequestType, final BiFunction, Q> dataNodeRequestGenerator) { super(env, targetSchemaRegionGroup, false, dataNodeRequestType, dataNodeRequestGenerator); this.taskName = taskName; @@ -414,7 +412,7 @@ private class DeleteTimeSeriesRegionTaskExecutor final ConfigNodeProcedureEnv env, final Map targetDataRegionGroup, final boolean executeOnAllReplicaset, - final CnToDnRequestType dataNodeRequestType, + final CnToDnAsyncRequestType dataNodeRequestType, final BiFunction, Q> dataNodeRequestGenerator) { super( env, diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SchemaUtils.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SchemaUtils.java index 1df6ff508fdde..25cd4704dc6ad 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SchemaUtils.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SchemaUtils.java @@ -26,7 +26,7 @@ import org.apache.iotdb.commons.exception.MetadataException; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PathPatternTree; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.manager.ConfigManager; import org.apache.iotdb.db.exception.metadata.PathNotExistException; import org.apache.iotdb.db.schemaengine.template.Template; @@ -78,7 +78,7 @@ public static boolean checkDataNodeTemplateActivation( configManager, relatedSchemaRegionGroup, false, - CnToDnRequestType.COUNT_PATHS_USING_TEMPLATE, + CnToDnAsyncRequestType.COUNT_PATHS_USING_TEMPLATE, ((dataNodeLocation, consensusGroupIdList) -> new TCountPathsUsingTemplateReq( template.getId(), patternTreeBytes, consensusGroupIdList))) { @@ -156,7 +156,7 @@ public static void checkSchemaRegionUsingTemplate( configManager, relatedSchemaRegionGroup, false, - CnToDnRequestType.CHECK_SCHEMA_REGION_USING_TEMPLATE, + CnToDnAsyncRequestType.CHECK_SCHEMA_REGION_USING_TEMPLATE, ((dataNodeLocation, consensusGroupIdList) -> new TCheckSchemaRegionUsingTemplateReq(consensusGroupIdList))) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SetTTLProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SetTTLProcedure.java index ec9b003d7eeef..9b43af2003bc0 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SetTTLProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SetTTLProcedure.java @@ -24,7 +24,7 @@ import org.apache.iotdb.common.rpc.thrift.TSetTTLReq; import org.apache.iotdb.commons.exception.IoTDBException; import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; @@ -32,8 +32,6 @@ import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeEnrichedPlan; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure; import org.apache.iotdb.confignode.procedure.state.schema.SetTTLState; import org.apache.iotdb.confignode.procedure.store.ProcedureType; @@ -67,7 +65,7 @@ public SetTTLProcedure(SetTTLPlan plan, final boolean isGeneratedByPipe) { @Override protected Flow executeFromState(ConfigNodeProcedureEnv env, SetTTLState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws InterruptedException { long startTime = System.currentTimeMillis(); try { switch (state) { @@ -110,7 +108,7 @@ private void updateDataNodeTTL(ConfigNodeProcedureEnv env) { env.getConfigManager().getNodeManager().getRegisteredDataNodeLocations(); DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.SET_TTL, + CnToDnAsyncRequestType.SET_TTL, new TSetTTLReq( Collections.singletonList(String.join(".", plan.getPathPattern())), plan.getTTL(), diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SetTemplateProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SetTemplateProcedure.java index 2bd4068ee89fc..c0de568f877fa 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SetTemplateProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SetTemplateProcedure.java @@ -28,7 +28,7 @@ import org.apache.iotdb.commons.exception.MetadataException; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PathPatternTree; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; import org.apache.iotdb.confignode.consensus.request.read.template.CheckTemplateSettablePlan; @@ -39,8 +39,6 @@ import org.apache.iotdb.confignode.consensus.response.template.TemplateInfoResp; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure; import org.apache.iotdb.confignode.procedure.state.schema.SetTemplateState; import org.apache.iotdb.confignode.procedure.store.ProcedureType; @@ -96,8 +94,8 @@ public SetTemplateProcedure( } @Override - protected Flow executeFromState(ConfigNodeProcedureEnv env, SetTemplateState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + protected Flow executeFromState(final ConfigNodeProcedureEnv env, final SetTemplateState state) + throws InterruptedException { long startTime = System.currentTimeMillis(); try { switch (state) { @@ -212,7 +210,7 @@ private void preReleaseTemplate(ConfigNodeProcedureEnv env) { env.getConfigManager().getNodeManager().getRegisteredDataNodeLocations(); DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.UPDATE_TEMPLATE, req, dataNodeLocationMap); + CnToDnAsyncRequestType.UPDATE_TEMPLATE, req, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); Map statusMap = clientHandler.getResponseMap(); for (Map.Entry entry : statusMap.entrySet()) { @@ -282,7 +280,7 @@ private void validateTimeSeriesExistence(ConfigNodeProcedureEnv env) { env, relatedSchemaRegionGroup, false, - CnToDnRequestType.CHECK_TIMESERIES_EXISTENCE, + CnToDnAsyncRequestType.CHECK_TIMESERIES_EXISTENCE, ((dataNodeLocation, consensusGroupIdList) -> new TCheckTimeSeriesExistenceReq(patternTreeBytes, consensusGroupIdList))) { @@ -386,7 +384,7 @@ private void commitReleaseTemplate(ConfigNodeProcedureEnv env) { env.getConfigManager().getNodeManager().getRegisteredDataNodeLocations(); DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.UPDATE_TEMPLATE, req, dataNodeLocationMap); + CnToDnAsyncRequestType.UPDATE_TEMPLATE, req, dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); Map statusMap = clientHandler.getResponseMap(); for (Map.Entry entry : statusMap.entrySet()) { @@ -491,7 +489,9 @@ private void rollbackPreRelease(ConfigNodeProcedureEnv env) { DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.UPDATE_TEMPLATE, invalidateTemplateSetInfoReq, dataNodeLocationMap); + CnToDnAsyncRequestType.UPDATE_TEMPLATE, + invalidateTemplateSetInfoReq, + dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); Map statusMap = clientHandler.getResponseMap(); for (Map.Entry entry : statusMap.entrySet()) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/UnsetTemplateProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/UnsetTemplateProcedure.java index ee70f3c7a7d17..49e116c2ccd79 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/UnsetTemplateProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/UnsetTemplateProcedure.java @@ -26,13 +26,11 @@ import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PathDeserializeUtil; import org.apache.iotdb.commons.path.PathPatternTree; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure; import org.apache.iotdb.confignode.procedure.state.schema.UnsetTemplateState; import org.apache.iotdb.confignode.procedure.store.ProcedureType; @@ -69,12 +67,15 @@ public class UnsetTemplateProcedure private transient ByteBuffer addTemplateSetInfo; private transient ByteBuffer invalidateTemplateSetInfo; - public UnsetTemplateProcedure(boolean isGeneratedByPipe) { + public UnsetTemplateProcedure(final boolean isGeneratedByPipe) { super(isGeneratedByPipe); } public UnsetTemplateProcedure( - String queryId, Template template, PartialPath path, boolean isGeneratedByPipe) { + final String queryId, + final Template template, + final PartialPath path, + final boolean isGeneratedByPipe) { super(isGeneratedByPipe); this.queryId = queryId; this.template = template; @@ -82,9 +83,9 @@ public UnsetTemplateProcedure( } @Override - protected Flow executeFromState(ConfigNodeProcedureEnv env, UnsetTemplateState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { - long startTime = System.currentTimeMillis(); + protected Flow executeFromState(final ConfigNodeProcedureEnv env, final UnsetTemplateState state) + throws InterruptedException { + final long startTime = System.currentTimeMillis(); try { switch (state) { case CONSTRUCT_BLACK_LIST: @@ -127,8 +128,8 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, UnsetTemplateState s } } - private void constructBlackList(ConfigNodeProcedureEnv env) { - TSStatus status = + private void constructBlackList(final ConfigNodeProcedureEnv env) { + final TSStatus status = env.getConfigManager() .getClusterSchemaManager() .preUnsetSchemaTemplate(template.getId(), path); @@ -139,28 +140,30 @@ private void constructBlackList(ConfigNodeProcedureEnv env) { } } - private void invalidateCache(ConfigNodeProcedureEnv env) { + private void invalidateCache(final ConfigNodeProcedureEnv env) { try { executeInvalidateCache(env); setNextState(UnsetTemplateState.CHECK_DATANODE_TEMPLATE_ACTIVATION); - } catch (ProcedureException e) { + } catch (final ProcedureException e) { setFailure(e); } } - private void executeInvalidateCache(ConfigNodeProcedureEnv env) throws ProcedureException { - Map dataNodeLocationMap = + private void executeInvalidateCache(final ConfigNodeProcedureEnv env) throws ProcedureException { + final Map dataNodeLocationMap = env.getConfigManager().getNodeManager().getRegisteredDataNodeLocations(); - TUpdateTemplateReq invalidateTemplateSetInfoReq = new TUpdateTemplateReq(); + final TUpdateTemplateReq invalidateTemplateSetInfoReq = new TUpdateTemplateReq(); invalidateTemplateSetInfoReq.setType( TemplateInternalRPCUpdateType.INVALIDATE_TEMPLATE_SET_INFO.toByte()); invalidateTemplateSetInfoReq.setTemplateInfo(getInvalidateTemplateSetInfo()); - DataNodeAsyncRequestContext clientHandler = + final DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.UPDATE_TEMPLATE, invalidateTemplateSetInfoReq, dataNodeLocationMap); + CnToDnAsyncRequestType.UPDATE_TEMPLATE, + invalidateTemplateSetInfoReq, + dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); - Map statusMap = clientHandler.getResponseMap(); - for (TSStatus status : statusMap.values()) { + final Map statusMap = clientHandler.getResponseMap(); + for (final TSStatus status : statusMap.values()) { // all dataNodes must clear the related template cache if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { LOGGER.error( @@ -172,14 +175,14 @@ private void executeInvalidateCache(ConfigNodeProcedureEnv env) throws Procedure } } - private boolean checkDataNodeTemplateActivation(ConfigNodeProcedureEnv env) { - PathPatternTree patternTree = new PathPatternTree(); + private boolean checkDataNodeTemplateActivation(final ConfigNodeProcedureEnv env) { + final PathPatternTree patternTree = new PathPatternTree(); patternTree.appendPathPattern(path); patternTree.appendPathPattern(path.concatNode(MULTI_LEVEL_PATH_WILDCARD)); try { return SchemaUtils.checkDataNodeTemplateActivation( env.getConfigManager(), patternTree, template); - } catch (MetadataException e) { + } catch (final MetadataException e) { setFailure( new ProcedureException( new MetadataException( @@ -190,20 +193,19 @@ private boolean checkDataNodeTemplateActivation(ConfigNodeProcedureEnv env) { } } - private void unsetTemplate(ConfigNodeProcedureEnv env) { - TSStatus status = + private void unsetTemplate(final ConfigNodeProcedureEnv env) { + final TSStatus status = env.getConfigManager() .getClusterSchemaManager() .unsetSchemaTemplateInBlackList(template.getId(), path, isGeneratedByPipe); - if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - setNextState(UnsetTemplateState.CLEAN_DATANODE_TEMPLATE_CACHE); - } else { + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { setFailure(new ProcedureException(new IoTDBException(status.getMessage(), status.getCode()))); } } @Override - protected void rollbackState(ConfigNodeProcedureEnv env, UnsetTemplateState unsetTemplateState) + protected void rollbackState( + final ConfigNodeProcedureEnv env, final UnsetTemplateState unsetTemplateState) throws IOException, InterruptedException, ProcedureException { if (alreadyRollback) { return; @@ -212,7 +214,7 @@ protected void rollbackState(ConfigNodeProcedureEnv env, UnsetTemplateState unse ProcedureException rollbackException; try { executeRollbackInvalidateCache(env); - TSStatus status = + final TSStatus status = env.getConfigManager() .getClusterSchemaManager() .rollbackPreUnsetSchemaTemplate(template.getId(), path); @@ -228,13 +230,13 @@ protected void rollbackState(ConfigNodeProcedureEnv env, UnsetTemplateState unse new MetadataException( "Rollback template pre unset failed because of" + status.getMessage())); } - } catch (ProcedureException e) { + } catch (final ProcedureException e) { rollbackException = e; } try { executeInvalidateCache(env); setFailure(rollbackException); - } catch (ProcedureException exception) { + } catch (final ProcedureException exception) { setFailure( new ProcedureException( new MetadataException( @@ -248,11 +250,13 @@ private void executeRollbackInvalidateCache(ConfigNodeProcedureEnv env) env.getConfigManager().getNodeManager().getRegisteredDataNodeLocations(); TUpdateTemplateReq rollbackTemplateSetInfoReq = new TUpdateTemplateReq(); rollbackTemplateSetInfoReq.setType( - TemplateInternalRPCUpdateType.ADD_TEMPLATE_SET_INFO.toByte()); + TemplateInternalRPCUpdateType.ROLLBACK_INVALIDATE_TEMPLATE_SET_INFO.toByte()); rollbackTemplateSetInfoReq.setTemplateInfo(getAddTemplateSetInfo()); DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( - CnToDnRequestType.UPDATE_TEMPLATE, rollbackTemplateSetInfoReq, dataNodeLocationMap); + CnToDnAsyncRequestType.UPDATE_TEMPLATE, + rollbackTemplateSetInfoReq, + dataNodeLocationMap); CnToDnInternalServiceAsyncRequestManager.getInstance().sendAsyncRequestWithRetry(clientHandler); Map statusMap = clientHandler.getResponseMap(); for (TSStatus status : statusMap.values()) { @@ -265,18 +269,29 @@ private void executeRollbackInvalidateCache(ConfigNodeProcedureEnv env) } } + private ByteBuffer getAddTemplateSetInfo() { + if (this.addTemplateSetInfo == null) { + this.addTemplateSetInfo = + ByteBuffer.wrap( + TemplateInternalRPCUtil.generateAddTemplateSetInfoBytes( + template, path.getFullPath())); + } + + return addTemplateSetInfo; + } + @Override - protected boolean isRollbackSupported(UnsetTemplateState unsetTemplateState) { + protected boolean isRollbackSupported(final UnsetTemplateState unsetTemplateState) { return true; } @Override - protected UnsetTemplateState getState(int stateId) { + protected UnsetTemplateState getState(final int stateId) { return UnsetTemplateState.values()[stateId]; } @Override - protected int getStateId(UnsetTemplateState unsetTemplateState) { + protected int getStateId(final UnsetTemplateState unsetTemplateState) { return unsetTemplateState.ordinal(); } @@ -305,17 +320,6 @@ public PartialPath getPath() { return path; } - private ByteBuffer getAddTemplateSetInfo() { - if (this.addTemplateSetInfo == null) { - this.addTemplateSetInfo = - ByteBuffer.wrap( - TemplateInternalRPCUtil.generateAddTemplateSetInfoBytes( - template, path.getFullPath())); - } - - return addTemplateSetInfo; - } - private ByteBuffer getInvalidateTemplateSetInfo() { if (this.invalidateTemplateSetInfo == null) { this.invalidateTemplateSetInfo = @@ -327,7 +331,7 @@ private ByteBuffer getInvalidateTemplateSetInfo() { } @Override - public void serialize(DataOutputStream stream) throws IOException { + public void serialize(final DataOutputStream stream) throws IOException { stream.writeShort( isGeneratedByPipe ? ProcedureType.PIPE_ENRICHED_UNSET_TEMPLATE_PROCEDURE.getTypeCode() @@ -340,7 +344,7 @@ public void serialize(DataOutputStream stream) throws IOException { } @Override - public void deserialize(ByteBuffer byteBuffer) { + public void deserialize(final ByteBuffer byteBuffer) { super.deserialize(byteBuffer); queryId = ReadWriteIOUtils.readString(byteBuffer); template = new Template(); @@ -350,10 +354,14 @@ public void deserialize(ByteBuffer byteBuffer) { } @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - UnsetTemplateProcedure that = (UnsetTemplateProcedure) o; + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final UnsetTemplateProcedure that = (UnsetTemplateProcedure) o; return Objects.equals(getProcId(), that.getProcId()) && Objects.equals(getCurrentState(), that.getCurrentState()) && Objects.equals(getCycles(), that.getCycles()) diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/AbstractOperateSubscriptionProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/AbstractOperateSubscriptionProcedure.java index 4566738cd8d3e..07bbe2c014c42 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/AbstractOperateSubscriptionProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/AbstractOperateSubscriptionProcedure.java @@ -24,8 +24,6 @@ import org.apache.iotdb.confignode.persistence.subscription.SubscriptionInfo; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.apache.iotdb.confignode.procedure.impl.node.AbstractNodeProcedure; import org.apache.iotdb.confignode.procedure.impl.subscription.consumer.runtime.ConsumerGroupMetaSyncProcedure; import org.apache.iotdb.confignode.procedure.impl.subscription.topic.runtime.TopicMetaSyncProcedure; @@ -59,6 +57,16 @@ public abstract class AbstractOperateSubscriptionProcedure private static final int RETRY_THRESHOLD = 1; + // Only used in rollback to reduce the number of network calls + // Pure in-memory object, not involved in snapshot serialization and deserialization. + // TODO: consider serializing this variable later + protected boolean isRollbackFromOperateOnDataNodesSuccessful = false; + + // Only used in rollback to avoid executing rollbackFromValidate multiple times + // Pure in-memory object, not involved in snapshot serialization and deserialization. + // TODO: consider serializing this variable later + protected boolean isRollbackFromValidateSuccessful = false; + protected AtomicReference subscriptionInfo; protected AtomicReference acquireLockInternal( @@ -171,7 +179,7 @@ protected abstract void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env @Override protected Flow executeFromState(ConfigNodeProcedureEnv env, OperateSubscriptionState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws InterruptedException { if (subscriptionInfo == null) { LOGGER.warn( "ProcedureId {}: Subscription lock is not acquired, executeFromState({})'s execution will be skipped.", @@ -250,20 +258,25 @@ protected void rollbackState(ConfigNodeProcedureEnv env, OperateSubscriptionStat switch (state) { case VALIDATE: - try { - rollbackFromValidate(env); - } catch (Exception e) { - LOGGER.warn( - "ProcedureId {}: Failed to rollback from state [{}], because {}", - getProcId(), - state, - e.getMessage(), - e); + if (!isRollbackFromValidateSuccessful) { + try { + rollbackFromValidate(env); + isRollbackFromValidateSuccessful = true; + } catch (Exception e) { + LOGGER.warn( + "ProcedureId {}: Failed to rollback from state [{}], because {}", + getProcId(), + state, + e.getMessage(), + e); + } } break; case OPERATE_ON_CONFIG_NODES: try { - rollbackFromOperateOnConfigNodes(env); + if (!isRollbackFromOperateOnDataNodesSuccessful) { + rollbackFromOperateOnConfigNodes(env); + } } catch (Exception e) { LOGGER.warn( "ProcedureId {}: Failed to rollback from state [{}], because {}", @@ -275,7 +288,9 @@ protected void rollbackState(ConfigNodeProcedureEnv env, OperateSubscriptionStat break; case OPERATE_ON_DATA_NODES: try { + rollbackFromOperateOnConfigNodes(env); rollbackFromOperateOnDataNodes(env); + isRollbackFromOperateOnDataNodesSuccessful = true; } catch (Exception e) { LOGGER.warn( "ProcedureId {}: Failed to rollback from state [{}], because {}", @@ -293,10 +308,11 @@ protected void rollbackState(ConfigNodeProcedureEnv env, OperateSubscriptionStat protected abstract void rollbackFromValidate(ConfigNodeProcedureEnv env); - protected abstract void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env); + protected abstract void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) + throws SubscriptionException; protected abstract void rollbackFromOperateOnDataNodes(ConfigNodeProcedureEnv env) - throws IOException; + throws SubscriptionException, IOException; /** * Pushing all the topicMeta's to all the dataNodes. diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/consumer/AlterConsumerGroupProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/consumer/AlterConsumerGroupProcedure.java index 6e6031b8102ee..69017422505cf 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/consumer/AlterConsumerGroupProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/consumer/AlterConsumerGroupProcedure.java @@ -108,7 +108,6 @@ public void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) new TSStatus(TSStatusCode.ALTER_CONSUMER_ERROR.getStatusCode()) .setMessage(e.getMessage()); } - if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { throw new SubscriptionException( String.format( @@ -119,27 +118,20 @@ public void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) @Override public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env) - throws SubscriptionException { + throws SubscriptionException, IOException { LOGGER.info( "AlterConsumerGroupProcedure: executeFromOperateOnDataNodes({})", updatedConsumerGroupMeta.getConsumerGroupId()); - try { - final List statuses = - env.pushSingleConsumerGroupOnDataNode(updatedConsumerGroupMeta.serialize()); - if (RpcUtils.squashResponseStatusList(statuses).getCode() - != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - throw new SubscriptionException( - String.format( - "Failed to alter consumer group %s on data nodes, because %s", - updatedConsumerGroupMeta.getConsumerGroupId(), statuses)); - } - } catch (IOException e) { - LOGGER.warn("Failed to serialize the consumer group meta due to: ", e); + final List statuses = + env.pushSingleConsumerGroupOnDataNode(updatedConsumerGroupMeta.serialize()); + if (RpcUtils.squashResponseStatusList(statuses).getCode() + != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + // throw exception instead of logging warn, do not rely on metadata synchronization throw new SubscriptionException( String.format( - "Failed to alter consumer group %s on data nodes, because %s", - updatedConsumerGroupMeta.getConsumerGroupId(), e.getMessage())); + "Failed to alter consumer group (%s -> %s) on data nodes, because %s", + existingConsumerGroupMeta, updatedConsumerGroupMeta, statuses)); } } @@ -149,7 +141,8 @@ public void rollbackFromValidate(ConfigNodeProcedureEnv env) { } @Override - public void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) { + public void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) + throws SubscriptionException { LOGGER.info( "AlterConsumerGroupProcedure: rollbackFromOperateOnConfigNodes({})", updatedConsumerGroupMeta.getConsumerGroupId()); @@ -166,35 +159,28 @@ public void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) { new TSStatus(TSStatusCode.ALTER_CONSUMER_ERROR.getStatusCode()) .setMessage(e.getMessage()); } - if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - LOGGER.warn( - "Failed to rollback from altering consumer group {} on config nodes, because {}", - updatedConsumerGroupMeta.getConsumerGroupId(), - response); + throw new SubscriptionException( + String.format( + "Failed to rollback from altering consumer group (%s -> %s) on config nodes, because %s", + existingConsumerGroupMeta, updatedConsumerGroupMeta, response)); } } @Override - public void rollbackFromOperateOnDataNodes(ConfigNodeProcedureEnv env) { + public void rollbackFromOperateOnDataNodes(ConfigNodeProcedureEnv env) + throws SubscriptionException, IOException { LOGGER.info("AlterConsumerGroupProcedure: rollbackFromOperateOnDataNodes"); - try { - final List statuses = - env.pushSingleConsumerGroupOnDataNode(existingConsumerGroupMeta.serialize()); - if (RpcUtils.squashResponseStatusList(statuses).getCode() - != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - throw new SubscriptionException( - String.format( - "Failed to rollback from altering consumer group %s on data nodes, because %s", - updatedConsumerGroupMeta.getConsumerGroupId(), statuses)); - } - } catch (IOException e) { - LOGGER.warn("Failed to serialize the consumer group meta due to: ", e); + final List statuses = + env.pushSingleConsumerGroupOnDataNode(existingConsumerGroupMeta.serialize()); + if (RpcUtils.squashResponseStatusList(statuses).getCode() + != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + // throw exception instead of logging warn, do not rely on metadata synchronization throw new SubscriptionException( String.format( - "Failed to rollback from altering consumer group %s on data nodes, because %s", - updatedConsumerGroupMeta.getConsumerGroupId(), e.getMessage())); + "Failed to rollback from altering consumer group (%s -> %s) on data nodes, because %s", + existingConsumerGroupMeta, updatedConsumerGroupMeta, statuses)); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/consumer/runtime/ConsumerGroupMetaSyncProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/consumer/runtime/ConsumerGroupMetaSyncProcedure.java index bb010eaca4062..93eb6c5a5fc35 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/consumer/runtime/ConsumerGroupMetaSyncProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/consumer/runtime/ConsumerGroupMetaSyncProcedure.java @@ -99,7 +99,8 @@ public boolean executeFromValidate(ConfigNodeProcedureEnv env) { } @Override - public void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) { + public void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) + throws SubscriptionException { LOGGER.info("ConsumerGroupMetaSyncProcedure: executeFromOperateOnConfigNodes"); final List consumerGroupMetaList = @@ -122,7 +123,8 @@ public void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) { } @Override - public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env) throws IOException { + public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env) + throws SubscriptionException, IOException { LOGGER.info("ConsumerGroupMetaSyncProcedure: executeFromOperateOnDataNodes"); Map respMap = pushConsumerGroupMetaToDataNodes(env); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/AbstractOperateSubscriptionAndPipeProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/AbstractOperateSubscriptionAndPipeProcedure.java index e3d180a8f3068..2b420136253a8 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/AbstractOperateSubscriptionAndPipeProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/AbstractOperateSubscriptionAndPipeProcedure.java @@ -19,7 +19,7 @@ package org.apache.iotdb.confignode.procedure.impl.subscription.subscription; -import org.apache.iotdb.commons.pipe.task.meta.PipeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta; import org.apache.iotdb.confignode.persistence.pipe.PipeTaskInfo; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.impl.subscription.AbstractOperateSubscriptionProcedure; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/CreateSubscriptionProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/CreateSubscriptionProcedure.java index 166f7b3da5e09..4a48ebdd35d6a 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/CreateSubscriptionProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/CreateSubscriptionProcedure.java @@ -20,15 +20,13 @@ package org.apache.iotdb.confignode.procedure.impl.subscription.subscription; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.pipe.task.meta.PipeStaticMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStaticMeta; import org.apache.iotdb.commons.subscription.meta.consumer.ConsumerGroupMeta; import org.apache.iotdb.commons.subscription.meta.topic.TopicMeta; import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.write.pipe.task.DropPipePlanV2; import org.apache.iotdb.confignode.consensus.request.write.pipe.task.OperateMultiplePipesPlanV2; -import org.apache.iotdb.confignode.consensus.request.write.subscription.topic.AlterMultipleTopicsPlan; -import org.apache.iotdb.confignode.consensus.request.write.subscription.topic.AlterTopicPlan; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.impl.pipe.AbstractOperatePipeProcedureV2; import org.apache.iotdb.confignode.procedure.impl.pipe.task.CreatePipeProcedureV2; @@ -55,28 +53,26 @@ import java.util.Objects; import java.util.stream.Collectors; -// TODO: check if it also needs meta sync to keep CN and DN in sync public class CreateSubscriptionProcedure extends AbstractOperateSubscriptionAndPipeProcedure { private static final Logger LOGGER = LoggerFactory.getLogger(CreateSubscriptionProcedure.class); private TSubscribeReq subscribeReq; + // execution order: alter consumer group -> create pipe + // rollback order: create pipe -> alter consumer group + // NOTE: The 'alter consumer group' operation must be performed before 'create pipe'. private AlterConsumerGroupProcedure alterConsumerGroupProcedure; - private List alterTopicProcedures = new ArrayList<>(); private List createPipeProcedures = new ArrayList<>(); - // Record failed index of procedures to rollback properly. - // We only record fail index when executing on config nodes, because when executing on data nodes - // fails, we just push all meta to data nodes. - private int alterTopicProcedureFailIndexOnCN = -1; - private int createPipeProcedureFailIndexOnCN = -1; + // TODO: remove this variable later + private final List alterTopicProcedures = new ArrayList<>(); // unused now public CreateSubscriptionProcedure() { super(); } - public CreateSubscriptionProcedure(TSubscribeReq subscribeReq) { + public CreateSubscriptionProcedure(final TSubscribeReq subscribeReq) { this.subscribeReq = subscribeReq; } @@ -86,229 +82,171 @@ protected SubscriptionOperation getOperation() { } @Override - protected boolean executeFromValidate(ConfigNodeProcedureEnv env) throws SubscriptionException { + protected boolean executeFromValidate(final ConfigNodeProcedureEnv env) + throws SubscriptionException { LOGGER.info("CreateSubscriptionProcedure: executeFromValidate"); subscriptionInfo.get().validateBeforeSubscribe(subscribeReq); - // alterConsumerGroupProcedure + // Construct AlterConsumerGroupProcedure + final String consumerGroupId = subscribeReq.getConsumerGroupId(); final ConsumerGroupMeta updatedConsumerGroupMeta = - subscriptionInfo.get().deepCopyConsumerGroupMeta(subscribeReq.getConsumerGroupId()); + subscriptionInfo.get().deepCopyConsumerGroupMeta(consumerGroupId); updatedConsumerGroupMeta.addSubscription( subscribeReq.getConsumerId(), subscribeReq.getTopicNames()); alterConsumerGroupProcedure = new AlterConsumerGroupProcedure(updatedConsumerGroupMeta, subscriptionInfo); - // alterTopicProcedures & createPipeProcedures - for (String topic : subscribeReq.getTopicNames()) { - TopicMeta updatedTopicMeta = subscriptionInfo.get().deepCopyTopicMeta(topic); - - if (updatedTopicMeta.addSubscribedConsumerGroup(subscribeReq.getConsumerGroupId())) { + // Construct CreatePipeProcedureV2s + for (final String topicName : subscribeReq.getTopicNames()) { + final String pipeName = + PipeStaticMeta.generateSubscriptionPipeName(topicName, consumerGroupId); + if (!subscriptionInfo.get().isTopicSubscribedByConsumerGroup(topicName, consumerGroupId) + // even if there existed subscription meta, if there is no corresponding pipe meta, it + // will try to create the pipe + || !pipeTaskInfo.get().isPipeExisted(pipeName)) { + final TopicMeta topicMeta = subscriptionInfo.get().deepCopyTopicMeta(topicName); createPipeProcedures.add( new CreatePipeProcedureV2( new TCreatePipeReq() - .setPipeName( - PipeStaticMeta.generateSubscriptionPipeName( - topic, subscribeReq.getConsumerGroupId())) - .setExtractorAttributes(updatedTopicMeta.generateExtractorAttributes()) - .setProcessorAttributes(updatedTopicMeta.generateProcessorAttributes()) - .setConnectorAttributes( - updatedTopicMeta.generateConnectorAttributes( - subscribeReq.getConsumerGroupId())), + .setPipeName(pipeName) + .setExtractorAttributes(topicMeta.generateExtractorAttributes()) + .setProcessorAttributes(topicMeta.generateProcessorAttributes()) + .setConnectorAttributes(topicMeta.generateConnectorAttributes(consumerGroupId)), pipeTaskInfo)); - - alterTopicProcedures.add(new AlterTopicProcedure(updatedTopicMeta, subscriptionInfo)); } } + // Validate AlterConsumerGroupProcedure alterConsumerGroupProcedure.executeFromValidate(env); - for (AlterTopicProcedure alterTopicProcedure : alterTopicProcedures) { - alterTopicProcedure.executeFromValidate(env); - } - - for (CreatePipeProcedureV2 createPipeProcedure : createPipeProcedures) { + // Validate CreatePipeProcedureV2s + for (final CreatePipeProcedureV2 createPipeProcedure : createPipeProcedures) { createPipeProcedure.executeFromValidateTask(env); createPipeProcedure.executeFromCalculateInfoForTask(env); } + return true; } - // TODO: check periodically if the subscription is still valid but no working pipe? @Override - protected void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) + protected void executeFromOperateOnConfigNodes(final ConfigNodeProcedureEnv env) throws SubscriptionException { LOGGER.info("CreateSubscriptionProcedure: executeFromOperateOnConfigNodes"); + // Execute AlterConsumerGroupProcedure alterConsumerGroupProcedure.executeFromOperateOnConfigNodes(env); - TSStatus response; - - List alterTopicPlans = - alterTopicProcedures.stream() - .map(AlterTopicProcedure::getUpdatedTopicMeta) - .map(AlterTopicPlan::new) - .collect(Collectors.toList()); - try { - response = - env.getConfigManager() - .getConsensusManager() - .write(new AlterMultipleTopicsPlan(alterTopicPlans)); - } catch (ConsensusException e) { - LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e); - response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); - response.setMessage(e.getMessage()); - } - if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode() - && response.getSubStatusSize() > 0) { - // Record the failed index for rollback - alterTopicProcedureFailIndexOnCN = response.getSubStatusSize() - 1; - } - - List createPipePlans = + // Execute CreatePipeProcedureV2s + final List createPipePlans = createPipeProcedures.stream() .map(CreatePipeProcedureV2::constructPlan) .collect(Collectors.toList()); + TSStatus response; try { response = env.getConfigManager() .getConsensusManager() .write(new OperateMultiplePipesPlanV2(createPipePlans)); - } catch (ConsensusException e) { + } catch (final ConsensusException e) { LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e); response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); response.setMessage(e.getMessage()); } if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode() && response.getSubStatusSize() > 0) { - // Record the failed index for rollback - createPipeProcedureFailIndexOnCN = response.getSubStatusSize() - 1; + throw new SubscriptionException( + String.format( + "Failed to create subscription with request %s on config nodes, because %s", + subscribeReq, response)); } } @Override - protected void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env) + protected void executeFromOperateOnDataNodes(final ConfigNodeProcedureEnv env) throws SubscriptionException, IOException { LOGGER.info("CreateSubscriptionProcedure: executeFromOperateOnDataNodes"); + // Push consumer group meta to data nodes alterConsumerGroupProcedure.executeFromOperateOnDataNodes(env); - // push topic meta to data nodes - List topicMetaBinaryList = new ArrayList<>(); - for (AlterTopicProcedure alterTopicProcedure : alterTopicProcedures) { - topicMetaBinaryList.add(alterTopicProcedure.getUpdatedTopicMeta().serialize()); - } - if (pushTopicMetaHasException(env.pushMultiTopicMetaToDataNodes(topicMetaBinaryList))) { - // If not all topic meta are pushed successfully, the meta can be pushed during meta sync. - LOGGER.warn( - "Failed to alter topics when creating subscription, metadata will be synchronized later."); - } - - // push pipe meta to data nodes - List pipeNames = + // Push pipe meta to data nodes + final List pipeNames = createPipeProcedures.stream() .map(CreatePipeProcedureV2::getPipeName) .collect(Collectors.toList()); - String exceptionMessage = + final String exceptionMessage = AbstractOperatePipeProcedureV2.parsePushPipeMetaExceptionForPipe( null, pushMultiPipeMetaToDataNodes(pipeNames, env)); if (!exceptionMessage.isEmpty()) { - // If not all pipe meta are pushed successfully, the meta can be pushed during meta sync. - LOGGER.warn( - "Failed to create pipes {} when creating subscription, details: {}, metadata will be synchronized later.", - pipeNames, - exceptionMessage); + // throw exception instead of logging warn, do not rely on metadata synchronization + throw new SubscriptionException( + String.format( + "Failed to create pipes %s when creating subscription with request %s, details: %s, metadata will be synchronized later.", + pipeNames, subscribeReq, exceptionMessage)); } } @Override - protected void rollbackFromValidate(ConfigNodeProcedureEnv env) { + protected void rollbackFromValidate(final ConfigNodeProcedureEnv env) { LOGGER.info("CreateSubscriptionProcedure: rollbackFromValidate"); } @Override - protected void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) { + protected void rollbackFromOperateOnConfigNodes(final ConfigNodeProcedureEnv env) + throws SubscriptionException { LOGGER.info("CreateSubscriptionProcedure: rollbackFromOperateOnConfigNodes"); - // TODO: roll back from the last executed procedure to the first executed - alterConsumerGroupProcedure.rollbackFromOperateOnConfigNodes(env); - + // Rollback CreatePipeProcedureV2s + final List dropPipePlans = + createPipeProcedures.stream() + .map(procedure -> new DropPipePlanV2(procedure.getPipeName())) + .collect(Collectors.toList()); TSStatus response; - - // rollback alterTopicProcedures - List alterTopicRollbackPlans = new ArrayList<>(); - for (int i = 0; - i <= Math.min(alterTopicProcedureFailIndexOnCN, alterTopicProcedures.size()); - i++) { - alterTopicRollbackPlans.add( - new AlterTopicPlan(alterTopicProcedures.get(i).getExistedTopicMeta())); - } - try { - response = - env.getConfigManager() - .getConsensusManager() - .write(new AlterMultipleTopicsPlan(alterTopicRollbackPlans)); - } catch (ConsensusException e) { - LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e); - response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); - response.setMessage(e.getMessage()); - } - // if failed to rollback, throw exception - if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - throw new SubscriptionException(response.getMessage()); - } - - // rollback createPipeProcedures - List dropPipePlans = new ArrayList<>(); - for (int i = 0; - i <= Math.min(createPipeProcedureFailIndexOnCN, createPipeProcedures.size()); - i++) { - dropPipePlans.add(new DropPipePlanV2(createPipeProcedures.get(i).getPipeName())); - } try { response = env.getConfigManager() .getConsensusManager() .write(new OperateMultiplePipesPlanV2(dropPipePlans)); - } catch (ConsensusException e) { + } catch (final ConsensusException e) { LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e); response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); response.setMessage(e.getMessage()); } - // if failed to rollback, throw exception if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - throw new SubscriptionException(response.getMessage()); + throw new SubscriptionException( + String.format( + "Failed to rollback creating subscription with request %s on config nodes, because %s", + subscribeReq, response)); } + + // Rollback AlterConsumerGroupProcedure + alterConsumerGroupProcedure.rollbackFromOperateOnConfigNodes(env); } @Override - protected void rollbackFromOperateOnDataNodes(ConfigNodeProcedureEnv env) throws IOException { + protected void rollbackFromOperateOnDataNodes(final ConfigNodeProcedureEnv env) + throws SubscriptionException, IOException { LOGGER.info("CreateSubscriptionProcedure: rollbackFromOperateOnDataNodes"); - // TODO: roll back from the last executed procedure to the first executed - alterConsumerGroupProcedure.rollbackFromOperateOnDataNodes(env); - - // Push all topic metas to datanode, may be time-consuming - if (pushTopicMetaHasException(pushTopicMetaToDataNodes(env))) { - LOGGER.warn( - "Failed to rollback alter topics when creating subscription, metadata will be synchronized later."); - } - // Push all pipe metas to datanode, may be time-consuming - String exceptionMessage = + final String exceptionMessage = AbstractOperatePipeProcedureV2.parsePushPipeMetaExceptionForPipe( null, AbstractOperatePipeProcedureV2.pushPipeMetaToDataNodes(env, pipeTaskInfo)); if (!exceptionMessage.isEmpty()) { - LOGGER.warn( - "Failed to rollback create pipes when creating subscription, details: {}, metadata will be synchronized later.", - exceptionMessage); + // throw exception instead of logging warn, do not rely on metadata synchronization + throw new SubscriptionException( + String.format( + "Failed to rollback create pipes when creating subscription with request %s, because %s", + subscribeReq, exceptionMessage)); } - } - // TODO: we still need some strategies to clean the subscription if it's not valid anymore + // Rollback AlterConsumerGroupProcedure + alterConsumerGroupProcedure.rollbackFromOperateOnDataNodes(env); + } @Override - public void serialize(DataOutputStream stream) throws IOException { + public void serialize(final DataOutputStream stream) throws IOException { stream.writeShort(ProcedureType.CREATE_SUBSCRIPTION_PROCEDURE.getTypeCode()); super.serialize(stream); @@ -318,12 +256,12 @@ public void serialize(DataOutputStream stream) throws IOException { final int size = subscribeReq.getTopicNamesSize(); ReadWriteIOUtils.write(size, stream); if (size != 0) { - for (String topicName : subscribeReq.getTopicNames()) { + for (final String topicName : subscribeReq.getTopicNames()) { ReadWriteIOUtils.write(topicName, stream); } } - // serialize consumerGroupProcedure + // Serialize AlterConsumerGroupProcedure if (alterConsumerGroupProcedure != null) { ReadWriteIOUtils.write(true, stream); alterConsumerGroupProcedure.serialize(stream); @@ -331,22 +269,22 @@ public void serialize(DataOutputStream stream) throws IOException { ReadWriteIOUtils.write(false, stream); } - // serialize topic procedures + // Serialize AlterTopicProcedures if (alterTopicProcedures != null) { ReadWriteIOUtils.write(true, stream); ReadWriteIOUtils.write(alterTopicProcedures.size(), stream); - for (AlterTopicProcedure topicProcedure : alterTopicProcedures) { + for (final AlterTopicProcedure topicProcedure : alterTopicProcedures) { topicProcedure.serialize(stream); } } else { ReadWriteIOUtils.write(false, stream); } - // serialize pipe procedures + // Serialize CreatePipeProcedureV2s if (createPipeProcedures != null) { ReadWriteIOUtils.write(true, stream); ReadWriteIOUtils.write(createPipeProcedures.size(), stream); - for (CreatePipeProcedureV2 pipeProcedure : createPipeProcedures) { + for (final CreatePipeProcedureV2 pipeProcedure : createPipeProcedures) { pipeProcedure.serialize(stream); } } else { @@ -355,7 +293,7 @@ public void serialize(DataOutputStream stream) throws IOException { } @Override - public void deserialize(ByteBuffer byteBuffer) { + public void deserialize(final ByteBuffer byteBuffer) { super.deserialize(byteBuffer); subscribeReq = @@ -368,7 +306,7 @@ public void deserialize(ByteBuffer byteBuffer) { subscribeReq.getTopicNames().add(ReadWriteIOUtils.readString(byteBuffer)); } - // deserialize consumerGroupProcedure + // Deserialize AlterConsumerGroupProcedure if (ReadWriteIOUtils.readBool(byteBuffer)) { // This readShort should return ALTER_CONSUMER_GROUP_PROCEDURE, and we ignore it. ReadWriteIOUtils.readShort(byteBuffer); @@ -377,27 +315,27 @@ public void deserialize(ByteBuffer byteBuffer) { alterConsumerGroupProcedure.deserialize(byteBuffer); } - // deserialize topic procedures + // Deserialize AlterTopicProcedures if (ReadWriteIOUtils.readBool(byteBuffer)) { size = ReadWriteIOUtils.readInt(byteBuffer); for (int i = 0; i < size; ++i) { // This readShort should return ALTER_TOPIC_PROCEDURE, and we ignore it. ReadWriteIOUtils.readShort(byteBuffer); - AlterTopicProcedure topicProcedure = new AlterTopicProcedure(); + final AlterTopicProcedure topicProcedure = new AlterTopicProcedure(); topicProcedure.deserialize(byteBuffer); alterTopicProcedures.add(topicProcedure); } } - // deserialize pipe procedures + // Deserialize CreatePipeProcedureV2s if (ReadWriteIOUtils.readBool(byteBuffer)) { size = ReadWriteIOUtils.readInt(byteBuffer); for (int i = 0; i < size; ++i) { // This readShort should return CREATE_PIPE_PROCEDURE or START_PIPE_PROCEDURE. - short typeCode = ReadWriteIOUtils.readShort(byteBuffer); + final short typeCode = ReadWriteIOUtils.readShort(byteBuffer); if (typeCode == ProcedureType.CREATE_PIPE_PROCEDURE_V2.getTypeCode()) { - CreatePipeProcedureV2 createPipeProcedureV2 = new CreatePipeProcedureV2(); + final CreatePipeProcedureV2 createPipeProcedureV2 = new CreatePipeProcedureV2(); createPipeProcedureV2.deserialize(byteBuffer); createPipeProcedures.add(createPipeProcedureV2); } @@ -406,20 +344,19 @@ public void deserialize(ByteBuffer byteBuffer) { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - CreateSubscriptionProcedure that = (CreateSubscriptionProcedure) o; + final CreateSubscriptionProcedure that = (CreateSubscriptionProcedure) o; return Objects.equals(getProcId(), that.getProcId()) && Objects.equals(getCurrentState(), that.getCurrentState()) && getCycles() == that.getCycles() && Objects.equals(subscribeReq, that.subscribeReq) && Objects.equals(alterConsumerGroupProcedure, that.alterConsumerGroupProcedure) - && Objects.equals(alterTopicProcedures, that.alterTopicProcedures) && Objects.equals(createPipeProcedures, that.createPipeProcedures); } @@ -431,13 +368,12 @@ public int hashCode() { getCycles(), subscribeReq, alterConsumerGroupProcedure, - alterTopicProcedures, createPipeProcedures); } @TestOnly public void setAlterConsumerGroupProcedure( - AlterConsumerGroupProcedure alterConsumerGroupProcedure) { + final AlterConsumerGroupProcedure alterConsumerGroupProcedure) { this.alterConsumerGroupProcedure = alterConsumerGroupProcedure; } @@ -447,17 +383,7 @@ public AlterConsumerGroupProcedure getAlterConsumerGroupProcedure() { } @TestOnly - public void setAlterTopicProcedures(List alterTopicProcedures) { - this.alterTopicProcedures = alterTopicProcedures; - } - - @TestOnly - public List getAlterTopicProcedures() { - return this.alterTopicProcedures; - } - - @TestOnly - public void setCreatePipeProcedures(List createPipeProcedures) { + public void setCreatePipeProcedures(final List createPipeProcedures) { this.createPipeProcedures = createPipeProcedures; } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/DropSubscriptionProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/DropSubscriptionProcedure.java index 7d0c1e09cbdbe..6741a6c1e2a84 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/DropSubscriptionProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/DropSubscriptionProcedure.java @@ -20,15 +20,12 @@ package org.apache.iotdb.confignode.procedure.impl.subscription.subscription; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.pipe.task.meta.PipeStaticMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStaticMeta; import org.apache.iotdb.commons.subscription.meta.consumer.ConsumerGroupMeta; -import org.apache.iotdb.commons.subscription.meta.topic.TopicMeta; import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.write.pipe.task.DropPipePlanV2; import org.apache.iotdb.confignode.consensus.request.write.pipe.task.OperateMultiplePipesPlanV2; -import org.apache.iotdb.confignode.consensus.request.write.subscription.topic.AlterMultipleTopicsPlan; -import org.apache.iotdb.confignode.consensus.request.write.subscription.topic.AlterTopicPlan; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.impl.pipe.AbstractOperatePipeProcedureV2; import org.apache.iotdb.confignode.procedure.impl.pipe.task.DropPipeProcedureV2; @@ -55,23 +52,20 @@ import java.util.Set; import java.util.stream.Collectors; -// TODO: check if it also needs meta sync to keep CN and DN in sync public class DropSubscriptionProcedure extends AbstractOperateSubscriptionAndPipeProcedure { private static final Logger LOGGER = LoggerFactory.getLogger(DropSubscriptionProcedure.class); private TUnsubscribeReq unsubscribeReq; - // NOTE: The 'drop pipe' operation should be performed before 'alter consumer group'. + // execution order: drop pipe -> alter consumer group + // rollback order: alter consumer group -> drop pipe (no-op) + // NOTE: The 'drop pipe' operation must be performed before 'alter consumer group'. private List dropPipeProcedures = new ArrayList<>(); - private List alterTopicProcedures = new ArrayList<>(); private AlterConsumerGroupProcedure alterConsumerGroupProcedure; - // Record failed index of procedures to rollback properly. - // We only record fail index when executing on config nodes, because when executing on data nodes - // fails, we just push all meta to data nodes. - private int alterTopicProcedureFailIndexOnCN = -1; - private int dropPipeProcedureFailIndexOnCN = -1; + // TODO: remove this variable later + private final List alterTopicProcedures = new ArrayList<>(); // unused now public DropSubscriptionProcedure() { super(); @@ -107,11 +101,6 @@ protected boolean executeFromValidate(final ConfigNodeProcedureEnv env) for (final String topic : unsubscribeReq.getTopicNames()) { if (topicsUnsubByGroup.contains(topic)) { // Topic will be subscribed by no consumers in this group - - final TopicMeta updatedTopicMeta = subscriptionInfo.get().deepCopyTopicMeta(topic); - updatedTopicMeta.removeSubscribedConsumerGroup(unsubscribeReq.getConsumerGroupId()); - - alterTopicProcedures.add(new AlterTopicProcedure(updatedTopicMeta, subscriptionInfo)); dropPipeProcedures.add( new DropPipeProcedureV2( PipeStaticMeta.generateSubscriptionPipeName( @@ -126,11 +115,6 @@ protected boolean executeFromValidate(final ConfigNodeProcedureEnv env) dropPipeProcedure.executeFromCalculateInfoForTask(env); } - // Validate AlterTopicProcedures - for (final AlterTopicProcedure alterTopicProcedure : alterTopicProcedures) { - alterTopicProcedure.executeFromValidate(env); - } - // Validate AlterConsumerGroupProcedure alterConsumerGroupProcedure.executeFromValidate(env); return true; @@ -141,13 +125,12 @@ protected void executeFromOperateOnConfigNodes(final ConfigNodeProcedureEnv env) throws SubscriptionException { LOGGER.info("DropSubscriptionProcedure: executeFromOperateOnConfigNodes"); - TSStatus response; - // Execute DropPipeProcedureV2s final List dropPipePlans = dropPipeProcedures.stream() .map(proc -> new DropPipePlanV2(proc.getPipeName())) .collect(Collectors.toList()); + TSStatus response; try { response = env.getConfigManager() @@ -160,30 +143,10 @@ protected void executeFromOperateOnConfigNodes(final ConfigNodeProcedureEnv env) } if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode() && response.getSubStatusSize() > 0) { - // Record the failed index for rollback - dropPipeProcedureFailIndexOnCN = response.getSubStatusSize() - 1; - } - - // Execute AlterTopicProcedures - final List alterTopicPlans = - alterTopicProcedures.stream() - .map(AlterTopicProcedure::getUpdatedTopicMeta) - .map(AlterTopicPlan::new) - .collect(Collectors.toList()); - try { - response = - env.getConfigManager() - .getConsensusManager() - .write(new AlterMultipleTopicsPlan(alterTopicPlans)); - } catch (final ConsensusException e) { - LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e); - response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); - response.setMessage(e.getMessage()); - } - if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode() - && response.getSubStatusSize() > 0) { - // Record the failed index for rollback - alterTopicProcedureFailIndexOnCN = response.getSubStatusSize() - 1; + throw new SubscriptionException( + String.format( + "Failed to drop subscription with request %s on config nodes, because %s", + unsubscribeReq, response)); } // Execute AlterConsumerGroupProcedure @@ -204,22 +167,11 @@ protected void executeFromOperateOnDataNodes(final ConfigNodeProcedureEnv env) AbstractOperatePipeProcedureV2.parsePushPipeMetaExceptionForPipe( null, dropMultiPipeOnDataNodes(pipeNames, env)); if (!exceptionMessage.isEmpty()) { - // If not all pipe meta are pushed successfully, the meta can be pushed during meta sync. - LOGGER.warn( - "Failed to drop pipes {} when dropping subscription, details: {}, metadata will be synchronized later.", - pipeNames, - exceptionMessage); - } - - // Push topic meta to data nodes - final List topicMetaBinaryList = new ArrayList<>(); - for (final AlterTopicProcedure alterTopicProcedure : alterTopicProcedures) { - topicMetaBinaryList.add(alterTopicProcedure.getUpdatedTopicMeta().serialize()); - } - if (pushTopicMetaHasException(env.pushMultiTopicMetaToDataNodes(topicMetaBinaryList))) { - // If not all topic meta are pushed successfully, the meta can be pushed during meta sync. - LOGGER.warn( - "Failed to alter topics when creating subscription, metadata will be synchronized later."); + // throw exception instead of logging warn, do not rely on metadata synchronization + throw new SubscriptionException( + String.format( + "Failed to drop pipes %s when dropping subscription with request %s, because %s", + pipeNames, unsubscribeReq, exceptionMessage)); } // Push consumer group meta to data nodes @@ -232,62 +184,25 @@ protected void rollbackFromValidate(final ConfigNodeProcedureEnv env) { } @Override - protected void rollbackFromOperateOnConfigNodes(final ConfigNodeProcedureEnv env) { + protected void rollbackFromOperateOnConfigNodes(final ConfigNodeProcedureEnv env) + throws SubscriptionException { LOGGER.info("DropSubscriptionProcedure: rollbackFromOperateOnConfigNodes"); // Rollback AlterConsumerGroupProcedure alterConsumerGroupProcedure.rollbackFromOperateOnConfigNodes(env); - // Rollback AlterTopicProcedures - TSStatus response; - final List alterTopicRollbackPlans = new ArrayList<>(); - for (int i = 0; - i <= Math.min(alterTopicProcedureFailIndexOnCN, alterTopicProcedures.size()); - i++) { - alterTopicRollbackPlans.add( - new AlterTopicPlan(alterTopicProcedures.get(i).getExistedTopicMeta())); - } - try { - response = - env.getConfigManager() - .getConsensusManager() - .write(new AlterMultipleTopicsPlan(alterTopicRollbackPlans)); - } catch (final ConsensusException e) { - LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e); - response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); - response.setMessage(e.getMessage()); - } - // If failed to rollback, throw exception - if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - throw new SubscriptionException(response.getMessage()); - } - // Do nothing to rollback DropPipeProcedureV2s } @Override protected void rollbackFromOperateOnDataNodes(final ConfigNodeProcedureEnv env) - throws IOException { + throws SubscriptionException, IOException { LOGGER.info("DropSubscriptionProcedure: rollbackFromOperateOnDataNodes"); // Rollback AlterConsumerGroupProcedure alterConsumerGroupProcedure.rollbackFromOperateOnDataNodes(env); - // Push all topic metas to datanode, may be time-consuming - if (pushTopicMetaHasException(pushTopicMetaToDataNodes(env))) { - LOGGER.warn( - "Failed to rollback alter topics when dropping subscription, metadata will be synchronized later."); - } - - // Push all pipe metas to datanode, may be time-consuming - final String exceptionMessage = - AbstractOperatePipeProcedureV2.parsePushPipeMetaExceptionForPipe( - null, AbstractOperatePipeProcedureV2.pushPipeMetaToDataNodes(env, pipeTaskInfo)); - if (!exceptionMessage.isEmpty()) { - LOGGER.warn( - "Failed to rollback create pipes when dropping subscription, details: {}, metadata will be synchronized later.", - exceptionMessage); - } + // Do nothing to rollback DropPipeProcedureV2s } @Override @@ -306,7 +221,7 @@ public void serialize(final DataOutputStream stream) throws IOException { } } - // serialize consumerGroupProcedure + // Serialize AlterConsumerGroupProcedure if (alterConsumerGroupProcedure != null) { ReadWriteIOUtils.write(true, stream); alterConsumerGroupProcedure.serialize(stream); @@ -314,7 +229,7 @@ public void serialize(final DataOutputStream stream) throws IOException { ReadWriteIOUtils.write(false, stream); } - // serialize topic procedures + // Serialize AlterTopicProcedures if (alterTopicProcedures != null) { ReadWriteIOUtils.write(true, stream); ReadWriteIOUtils.write(alterTopicProcedures.size(), stream); @@ -325,7 +240,7 @@ public void serialize(final DataOutputStream stream) throws IOException { ReadWriteIOUtils.write(false, stream); } - // serialize pipe procedures + // Serialize DropPipeProcedureV2s if (dropPipeProcedures != null) { ReadWriteIOUtils.write(true, stream); ReadWriteIOUtils.write(dropPipeProcedures.size(), stream); @@ -351,7 +266,7 @@ public void deserialize(final ByteBuffer byteBuffer) { unsubscribeReq.getTopicNames().add(ReadWriteIOUtils.readString(byteBuffer)); } - // deserialize consumerGroupProcedure + // Deserialize AlterConsumerGroupProcedure if (ReadWriteIOUtils.readBool(byteBuffer)) { // This readShort should return ALTER_CONSUMER_GROUP_PROCEDURE, and we ignore it. ReadWriteIOUtils.readShort(byteBuffer); @@ -360,7 +275,7 @@ public void deserialize(final ByteBuffer byteBuffer) { alterConsumerGroupProcedure.deserialize(byteBuffer); } - // deserialize topic procedures + // Deserialize AlterTopicProcedures if (ReadWriteIOUtils.readBool(byteBuffer)) { size = ReadWriteIOUtils.readInt(byteBuffer); for (int i = 0; i < size; ++i) { @@ -373,7 +288,7 @@ public void deserialize(final ByteBuffer byteBuffer) { } } - // deserialize pipe procedures + // Deserialize DropPipeProcedureV2s if (ReadWriteIOUtils.readBool(byteBuffer)) { size = ReadWriteIOUtils.readInt(byteBuffer); for (int i = 0; i < size; ++i) { @@ -402,7 +317,6 @@ public boolean equals(final Object o) { && getCycles() == that.getCycles() && Objects.equals(unsubscribeReq, that.unsubscribeReq) && Objects.equals(alterConsumerGroupProcedure, that.alterConsumerGroupProcedure) - && Objects.equals(alterTopicProcedures, that.alterTopicProcedures) && Objects.equals(dropPipeProcedures, that.dropPipeProcedures); } @@ -414,7 +328,6 @@ public int hashCode() { getCycles(), unsubscribeReq, alterConsumerGroupProcedure, - alterTopicProcedures, dropPipeProcedures); } @@ -429,16 +342,6 @@ public AlterConsumerGroupProcedure getAlterConsumerGroupProcedure() { return this.alterConsumerGroupProcedure; } - @TestOnly - public void setAlterTopicProcedures(final List alterTopicProcedures) { - this.alterTopicProcedures = alterTopicProcedures; - } - - @TestOnly - public List getAlterTopicProcedures() { - return this.alterTopicProcedures; - } - @TestOnly public void setDropPipeProcedures(final List dropPipeProcedures) { this.dropPipeProcedures = dropPipeProcedures; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/AlterTopicProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/AlterTopicProcedure.java index f8fbdab72e0c4..4faa2cfa0c17f 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/AlterTopicProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/AlterTopicProcedure.java @@ -108,7 +108,6 @@ public void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) response = new TSStatus(TSStatusCode.ALTER_TOPIC_ERROR.getStatusCode()).setMessage(e.getMessage()); } - if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { throw new SubscriptionException( String.format( @@ -119,25 +118,18 @@ public void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) @Override public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env) - throws SubscriptionException { + throws SubscriptionException, IOException { LOGGER.info( "AlterTopicProcedure: executeFromOperateOnDataNodes({})", updatedTopicMeta.getTopicName()); - try { - final List statuses = env.pushSingleTopicOnDataNode(updatedTopicMeta.serialize()); - if (RpcUtils.squashResponseStatusList(statuses).getCode() - != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - throw new SubscriptionException( - String.format( - "Failed to alter topic (%s -> %s) on data nodes, because %s", - existedTopicMeta, updatedTopicMeta, statuses)); - } - } catch (IOException e) { - LOGGER.warn("Failed to serialize the topic meta due to: ", e); + final List statuses = env.pushSingleTopicOnDataNode(updatedTopicMeta.serialize()); + if (RpcUtils.squashResponseStatusList(statuses).getCode() + != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + // throw exception instead of logging warn, do not rely on metadata synchronization throw new SubscriptionException( String.format( "Failed to alter topic (%s -> %s) on data nodes, because %s", - existedTopicMeta, updatedTopicMeta, e.getMessage())); + existedTopicMeta, updatedTopicMeta, statuses)); } } @@ -147,7 +139,8 @@ public void rollbackFromValidate(ConfigNodeProcedureEnv env) { } @Override - public void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) { + public void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) + throws SubscriptionException { LOGGER.info( "AlterTopicProcedure: rollbackFromOperateOnConfigNodes({})", updatedTopicMeta.getTopicName()); @@ -161,7 +154,6 @@ public void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) { response = new TSStatus(TSStatusCode.ALTER_TOPIC_ERROR.getStatusCode()).setMessage(e.getMessage()); } - if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { throw new SubscriptionException( String.format( @@ -171,25 +163,19 @@ public void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) { } @Override - public void rollbackFromOperateOnDataNodes(ConfigNodeProcedureEnv env) { + public void rollbackFromOperateOnDataNodes(ConfigNodeProcedureEnv env) + throws SubscriptionException, IOException { LOGGER.info( "AlterTopicProcedure: rollbackFromOperateOnDataNodes({})", updatedTopicMeta.getTopicName()); - try { - final List statuses = env.pushSingleTopicOnDataNode(existedTopicMeta.serialize()); - if (RpcUtils.squashResponseStatusList(statuses).getCode() - != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - throw new SubscriptionException( - String.format( - "Failed to rollback from altering topic (%s -> %s) on data nodes, because %s", - updatedTopicMeta, existedTopicMeta, statuses)); - } - } catch (IOException e) { - LOGGER.warn("Failed to serialize the topic meta due to: ", e); + final List statuses = env.pushSingleTopicOnDataNode(existedTopicMeta.serialize()); + if (RpcUtils.squashResponseStatusList(statuses).getCode() + != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + // throw exception instead of logging warn, do not rely on metadata synchronization throw new SubscriptionException( String.format( "Failed to rollback from altering topic (%s -> %s) on data nodes, because %s", - updatedTopicMeta, existedTopicMeta, e.getMessage())); + updatedTopicMeta, existedTopicMeta, statuses)); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/CreateTopicProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/CreateTopicProcedure.java index 65035f996bf3e..c27205290a8c3 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/CreateTopicProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/CreateTopicProcedure.java @@ -96,7 +96,6 @@ protected void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) response = new TSStatus(TSStatusCode.CREATE_TOPIC_ERROR.getStatusCode()).setMessage(e.getMessage()); } - if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { throw new SubscriptionException( String.format( @@ -106,22 +105,16 @@ protected void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) @Override protected void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env) - throws SubscriptionException { + throws SubscriptionException, IOException { LOGGER.info("CreateTopicProcedure: executeFromOperateOnDataNodes({})", topicMeta); - try { - final List statuses = env.pushSingleTopicOnDataNode(topicMeta.serialize()); - if (RpcUtils.squashResponseStatusList(statuses).getCode() - != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - throw new SubscriptionException( - String.format( - "Failed to create topic %s on data nodes, because %s", topicMeta, statuses)); - } - } catch (IOException e) { - LOGGER.warn("Failed to serialize the topic meta due to: ", e); + final List statuses = env.pushSingleTopicOnDataNode(topicMeta.serialize()); + if (RpcUtils.squashResponseStatusList(statuses).getCode() + != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + // throw exception instead of logging warn, do not rely on metadata synchronization throw new SubscriptionException( String.format( - "Failed to create topic %s on data nodes, because %s", topicMeta, e.getMessage())); + "Failed to create topic %s on data nodes, because %s", topicMeta, statuses)); } } @@ -131,7 +124,8 @@ protected void rollbackFromValidate(ConfigNodeProcedureEnv env) { } @Override - protected void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) { + protected void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) + throws SubscriptionException { LOGGER.info("CreateTopicProcedure: rollbackFromCreateOnConfigNodes({})", topicMeta); TSStatus response; @@ -145,24 +139,27 @@ protected void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) { response = new TSStatus(TSStatusCode.DROP_TOPIC_ERROR.getStatusCode()).setMessage(e.getMessage()); } - if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { throw new SubscriptionException( String.format( - "Failed to rollback topic %s on config nodes, because %s", topicMeta, response)); + "Failed to rollback creating topic %s on config nodes, because %s", + topicMeta, response)); } } @Override - protected void rollbackFromOperateOnDataNodes(ConfigNodeProcedureEnv env) { + protected void rollbackFromOperateOnDataNodes(ConfigNodeProcedureEnv env) + throws SubscriptionException { LOGGER.info("CreateTopicProcedure: rollbackFromCreateOnDataNodes({})", topicMeta); final List statuses = env.dropSingleTopicOnDataNode(topicMeta.getTopicName()); if (RpcUtils.squashResponseStatusList(statuses).getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + // throw exception instead of logging warn, do not rely on metadata synchronization throw new SubscriptionException( String.format( - "Failed to rollback topic %s on data nodes, because %s", topicMeta, statuses)); + "Failed to rollback creating topic %s on data nodes, because %s", + topicMeta, statuses)); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/DropTopicProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/DropTopicProcedure.java index f1cfbb59d1055..363e5716ee8ea 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/DropTopicProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/DropTopicProcedure.java @@ -81,7 +81,6 @@ protected void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) response = new TSStatus(TSStatusCode.DROP_TOPIC_ERROR.getStatusCode()).setMessage(e.getMessage()); } - if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { throw new SubscriptionException( String.format( @@ -90,13 +89,13 @@ protected void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) } @Override - protected void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env) - throws SubscriptionException { + protected void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env) { LOGGER.info("DropTopicProcedure: executeFromOperateOnDataNodes({})", topicName); final List statuses = env.dropSingleTopicOnDataNode(topicName); if (RpcUtils.squashResponseStatusList(statuses).getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + // throw exception instead of logging warn, do not rely on metadata synchronization throw new SubscriptionException( String.format("Failed to drop topic %s on data nodes, because %s", topicName, statuses)); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/runtime/TopicMetaSyncProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/runtime/TopicMetaSyncProcedure.java index 40920e439367a..27919bbcb9ea3 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/runtime/TopicMetaSyncProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/runtime/TopicMetaSyncProcedure.java @@ -98,7 +98,8 @@ public boolean executeFromValidate(ConfigNodeProcedureEnv env) { } @Override - public void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) { + public void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) + throws SubscriptionException { LOGGER.info("TopicMetaSyncProcedure: executeFromOperateOnConfigNodes"); final List topicMetaList = new ArrayList<>(); @@ -121,7 +122,8 @@ public void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) { } @Override - public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env) throws IOException { + public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env) + throws SubscriptionException, IOException { LOGGER.info("TopicMetaSyncProcedure: executeFromOperateOnDataNodes"); Map respMap = pushTopicMetaToDataNodes(env); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/sync/AbstractOperatePipeProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/sync/AbstractOperatePipeProcedure.java index 4f92525f0ad49..03c3e985e2bbe 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/sync/AbstractOperatePipeProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/sync/AbstractOperatePipeProcedure.java @@ -21,8 +21,6 @@ import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure; import org.apache.iotdb.confignode.procedure.impl.pipe.AbstractOperatePipeProcedureV2; import org.apache.iotdb.confignode.procedure.state.sync.OperatePipeState; @@ -40,7 +38,7 @@ abstract class AbstractOperatePipeProcedure @Override protected Flow executeFromState(ConfigNodeProcedureEnv env, OperatePipeState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws InterruptedException { return Flow.NO_MORE_STATE; } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/sync/AuthOperationProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/sync/AuthOperationProcedure.java index d8e999cd18789..20cc759a74cb9 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/sync/AuthOperationProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/sync/AuthOperationProcedure.java @@ -25,10 +25,10 @@ import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.exception.IoTDBException; import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils; -import org.apache.iotdb.confignode.client.CnToDnRequestType; +import org.apache.iotdb.confignode.client.sync.CnToDnSyncRequestType; import org.apache.iotdb.confignode.client.sync.SyncDataNodeClientPool; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; -import org.apache.iotdb.confignode.consensus.request.auth.AuthorPlan; +import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan; import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeEnrichedPlan; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; @@ -112,7 +112,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, AuthOperationProcedu .sendSyncRequestToDataNodeWithRetry( pair.getLeft().getLocation().getInternalEndPoint(), req, - CnToDnRequestType.INVALIDATE_PERMISSION_CACHE); + CnToDnSyncRequestType.INVALIDATE_PERMISSION_CACHE); if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { it.remove(); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/sync/StartPipeProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/sync/StartPipeProcedure.java index 7513d23c45886..d85aba66c5ae8 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/sync/StartPipeProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/sync/StartPipeProcedure.java @@ -21,7 +21,6 @@ import org.apache.iotdb.commons.sync.PipeInfo; import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.impl.pipe.task.StartPipeProcedureV2; import org.apache.iotdb.confignode.procedure.store.ProcedureType; @@ -57,11 +56,6 @@ public StartPipeProcedure(PipeInfo pipeInfo) { this.pipeInfo = pipeInfo; } - @Override - protected boolean abort(ConfigNodeProcedureEnv configNodeProcedureEnv) { - return false; - } - @Override public void serialize(DataOutputStream stream) throws IOException { stream.writeShort(ProcedureType.START_PIPE_PROCEDURE.getTypeCode()); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/testonly/AddNeverFinishSubProcedureProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/testonly/AddNeverFinishSubProcedureProcedure.java index e615eaf7f2706..1e82892af78e8 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/testonly/AddNeverFinishSubProcedureProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/testonly/AddNeverFinishSubProcedureProcedure.java @@ -22,8 +22,6 @@ import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure; import org.apache.iotdb.confignode.procedure.store.ProcedureType; @@ -42,7 +40,7 @@ public class AddNeverFinishSubProcedureProcedure @Override protected Flow executeFromState(ConfigNodeProcedureEnv env, Integer state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws InterruptedException { if (state == 0) { // the sub procedure will never finish, so the father procedure should never be called again addChildProcedure(new NeverFinishProcedure()); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/testonly/NeverFinishProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/testonly/NeverFinishProcedure.java index 7b247bc658893..18c8a69e7ac01 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/testonly/NeverFinishProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/testonly/NeverFinishProcedure.java @@ -22,8 +22,6 @@ import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure; import org.apache.iotdb.confignode.procedure.store.ProcedureType; @@ -42,7 +40,7 @@ public NeverFinishProcedure(long procId) { @Override protected Flow executeFromState(ConfigNodeProcedureEnv env, Integer state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws InterruptedException { setNextState(state + 1); Thread.sleep(1000); return Flow.HAS_MORE_STATE; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/scheduler/AbstractProcedureScheduler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/scheduler/AbstractProcedureScheduler.java index 5a91e42ae1d8c..b8e8c5778c465 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/scheduler/AbstractProcedureScheduler.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/scheduler/AbstractProcedureScheduler.java @@ -78,6 +78,10 @@ public void signalAll() { @Override public void addFront(final Procedure procedure) { + if (procedure.isSuccess()) { + LOG.warn("Don't add a successful procedure back to the scheduler, it will be ignored"); + return; + } push(procedure, true, true); } @@ -88,6 +92,10 @@ public void addFront(final Procedure procedure, boolean notify) { @Override public void addBack(final Procedure procedure) { + if (procedure.isSuccess()) { + LOG.warn("Don't add a successful procedure back to the scheduler, it will be ignored"); + return; + } push(procedure, false, true); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/NotifyRegionMigrationState.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/NotifyRegionMigrationState.java new file mode 100644 index 0000000000000..1b964621e5b7c --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/NotifyRegionMigrationState.java @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.procedure.state; + +public enum NotifyRegionMigrationState { + INIT, +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/ReconstructRegionState.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/ReconstructRegionState.java new file mode 100644 index 0000000000000..ef7abc5329cce --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/ReconstructRegionState.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.procedure.state; + +public enum ReconstructRegionState { + RECONSTRUCT_REGION_PREPARE, + REMOVE_REGION_PEER, + CHECK_REMOVE_REGION_PEER, + ADD_REGION_PEER, + CHECK_ADD_REGION_PEER, +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/RemoveAINodeState.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/RemoveAINodeState.java new file mode 100644 index 0000000000000..eecb5a4d9d985 --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/RemoveAINodeState.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.procedure.state; + +public enum RemoveAINodeState { + MODEL_DELETE, + NODE_REMOVE +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/RemoveConfigNodeState.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/RemoveConfigNodeState.java index 864ee97e0ccde..312b3c3cb4943 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/RemoveConfigNodeState.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/RemoveConfigNodeState.java @@ -22,5 +22,5 @@ public enum RemoveConfigNodeState { REMOVE_PEER, DELETE_PEER, - STOP_CONFIG_NODE + STOP_AND_CLEAR_CONFIG_NODE } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/model/CreateModelState.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/model/CreateModelState.java new file mode 100644 index 0000000000000..9bf9d347afb2b --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/model/CreateModelState.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.procedure.state.model; + +public enum CreateModelState { + LOADING, + ACTIVE +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/model/DropModelState.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/model/DropModelState.java new file mode 100644 index 0000000000000..a06c19cc7046b --- /dev/null +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/model/DropModelState.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.procedure.state.model; + +public enum DropModelState { + AI_NODE_DROPPED, + CONFIG_NODE_DROPPED +} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ProcedureFactory.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ProcedureFactory.java index 4a7c9d008cf0f..fc88b54f3d569 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ProcedureFactory.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ProcedureFactory.java @@ -19,11 +19,15 @@ package org.apache.iotdb.confignode.procedure.store; +import org.apache.iotdb.commons.exception.runtime.ThriftSerDeException; import org.apache.iotdb.confignode.procedure.Procedure; import org.apache.iotdb.confignode.procedure.impl.cq.CreateCQProcedure; +import org.apache.iotdb.confignode.procedure.impl.model.CreateModelProcedure; +import org.apache.iotdb.confignode.procedure.impl.model.DropModelProcedure; import org.apache.iotdb.confignode.procedure.impl.node.AddConfigNodeProcedure; +import org.apache.iotdb.confignode.procedure.impl.node.RemoveAINodeProcedure; import org.apache.iotdb.confignode.procedure.impl.node.RemoveConfigNodeProcedure; -import org.apache.iotdb.confignode.procedure.impl.node.RemoveDataNodeProcedure; +import org.apache.iotdb.confignode.procedure.impl.node.RemoveDataNodesProcedure; import org.apache.iotdb.confignode.procedure.impl.pipe.plugin.CreatePipePluginProcedure; import org.apache.iotdb.confignode.procedure.impl.pipe.plugin.DropPipePluginProcedure; import org.apache.iotdb.confignode.procedure.impl.pipe.runtime.PipeHandleLeaderChangeProcedure; @@ -36,6 +40,8 @@ import org.apache.iotdb.confignode.procedure.impl.pipe.task.StopPipeProcedureV2; import org.apache.iotdb.confignode.procedure.impl.region.AddRegionPeerProcedure; import org.apache.iotdb.confignode.procedure.impl.region.CreateRegionGroupsProcedure; +import org.apache.iotdb.confignode.procedure.impl.region.NotifyRegionMigrationProcedure; +import org.apache.iotdb.confignode.procedure.impl.region.ReconstructRegionProcedure; import org.apache.iotdb.confignode.procedure.impl.region.RegionMigrateProcedure; import org.apache.iotdb.confignode.procedure.impl.region.RemoveRegionPeerProcedure; import org.apache.iotdb.confignode.procedure.impl.schema.AlterLogicalViewProcedure; @@ -99,7 +105,7 @@ public Procedure create(ByteBuffer buffer) throws IOException { procedure = new RemoveConfigNodeProcedure(); break; case REMOVE_DATA_NODE_PROCEDURE: - procedure = new RemoveDataNodeProcedure(); + procedure = new RemoveDataNodesProcedure(); break; case REGION_MIGRATE_PROCEDURE: procedure = new RegionMigrateProcedure(); @@ -113,6 +119,12 @@ public Procedure create(ByteBuffer buffer) throws IOException { case CREATE_REGION_GROUPS: procedure = new CreateRegionGroupsProcedure(); break; + case RECONSTRUCT_REGION_PROCEDURE: + procedure = new ReconstructRegionProcedure(); + break; + case NOTIFY_REGION_MIGRATION_PROCEDURE: + procedure = new NotifyRegionMigrationProcedure(); + break; case DELETE_TIMESERIES_PROCEDURE: procedure = new DeleteTimeSeriesProcedure(false); break; @@ -187,6 +199,12 @@ public Procedure create(ByteBuffer buffer) throws IOException { case DROP_PIPE_PLUGIN_PROCEDURE: procedure = new DropPipePluginProcedure(); break; + case CREATE_MODEL_PROCEDURE: + procedure = new CreateModelProcedure(); + break; + case DROP_MODEL_PROCEDURE: + procedure = new DropModelProcedure(); + break; case AUTH_OPERATE_PROCEDURE: procedure = new AuthOperationProcedure(false); break; @@ -220,6 +238,9 @@ public Procedure create(ByteBuffer buffer) throws IOException { case PIPE_ENRICHED_AUTH_OPERATE_PROCEDURE: procedure = new AuthOperationProcedure(true); break; + case REMOVE_AI_NODE_PROCEDURE: + procedure = new RemoveAINodeProcedure(); + break; case PIPE_ENRICHED_SET_TTL_PROCEDURE: procedure = new SetTTLProcedure(true); break; @@ -269,7 +290,13 @@ public Procedure create(ByteBuffer buffer) throws IOException { LOGGER.error("Unknown Procedure type: {}", typeCode); throw new IOException("Unknown Procedure type: " + typeCode); } - procedure.deserialize(buffer); + try { + procedure.deserialize(buffer); + } catch (ThriftSerDeException e) { + LOGGER.warn( + "Catch exception while deserializing procedure, this procedure will be ignored.", e); + procedure = null; + } return procedure; } @@ -280,8 +307,10 @@ public static ProcedureType getProcedureType(Procedure procedure) { return ProcedureType.ADD_CONFIG_NODE_PROCEDURE; } else if (procedure instanceof RemoveConfigNodeProcedure) { return ProcedureType.REMOVE_CONFIG_NODE_PROCEDURE; - } else if (procedure instanceof RemoveDataNodeProcedure) { + } else if (procedure instanceof RemoveDataNodesProcedure) { return ProcedureType.REMOVE_DATA_NODE_PROCEDURE; + } else if (procedure instanceof RemoveAINodeProcedure) { + return ProcedureType.REMOVE_AI_NODE_PROCEDURE; } else if (procedure instanceof RegionMigrateProcedure) { return ProcedureType.REGION_MIGRATE_PROCEDURE; } else if (procedure instanceof AddRegionPeerProcedure) { @@ -292,6 +321,10 @@ public static ProcedureType getProcedureType(Procedure procedure) { return ProcedureType.CREATE_REGION_GROUPS; } else if (procedure instanceof DeleteTimeSeriesProcedure) { return ProcedureType.DELETE_TIMESERIES_PROCEDURE; + } else if (procedure instanceof ReconstructRegionProcedure) { + return ProcedureType.RECONSTRUCT_REGION_PROCEDURE; + } else if (procedure instanceof NotifyRegionMigrationProcedure) { + return ProcedureType.NOTIFY_REGION_MIGRATION_PROCEDURE; } else if (procedure instanceof CreateTriggerProcedure) { return ProcedureType.CREATE_TRIGGER_PROCEDURE; } else if (procedure instanceof DropTriggerProcedure) { @@ -316,6 +349,10 @@ public static ProcedureType getProcedureType(Procedure procedure) { return ProcedureType.CREATE_PIPE_PLUGIN_PROCEDURE; } else if (procedure instanceof DropPipePluginProcedure) { return ProcedureType.DROP_PIPE_PLUGIN_PROCEDURE; + } else if (procedure instanceof CreateModelProcedure) { + return ProcedureType.CREATE_MODEL_PROCEDURE; + } else if (procedure instanceof DropModelProcedure) { + return ProcedureType.DROP_MODEL_PROCEDURE; } else if (procedure instanceof CreatePipeProcedureV2) { return ProcedureType.CREATE_PIPE_PROCEDURE_V2; } else if (procedure instanceof StartPipeProcedureV2) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ProcedureType.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ProcedureType.java index 683365b1dfc52..48ccca42d44a6 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ProcedureType.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ProcedureType.java @@ -36,10 +36,12 @@ public enum ProcedureType { DELETE_DATABASE_PROCEDURE((short) 200), REGION_MIGRATE_PROCEDURE((short) 201), CREATE_REGION_GROUPS((short) 202), - @TestOnly - CREATE_MANY_DATABASES_PROCEDURE((short) 203), + RECONSTRUCT_REGION_PROCEDURE((short) 203), ADD_REGION_PEER_PROCEDURE((short) 204), REMOVE_REGION_PEER_PROCEDURE((short) 205), + NOTIFY_REGION_MIGRATION_PROCEDURE((short) 206), + @TestOnly + CREATE_MANY_DATABASES_PROCEDURE((short) 250), /** Timeseries */ DELETE_TIMESERIES_PROCEDURE((short) 300), @@ -62,6 +64,11 @@ public enum ProcedureType { UNSET_TEMPLATE_PROCEDURE((short) 701), SET_TEMPLATE_PROCEDURE((short) 702), + /** AI Model */ + CREATE_MODEL_PROCEDURE((short) 800), + DROP_MODEL_PROCEDURE((short) 801), + REMOVE_AI_NODE_PROCEDURE((short) 802), + // ProcedureId 800-899 is used by IoTDB-Ml /** Pipe Plugin */ diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/ConfigNode.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/ConfigNode.java index 3d537cdec61f7..7f0db7a37840b 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/ConfigNode.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/ConfigNode.java @@ -22,6 +22,7 @@ import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.ServerCommandLine; import org.apache.iotdb.commons.client.ClientManagerMetrics; import org.apache.iotdb.commons.concurrent.ThreadModule; import org.apache.iotdb.commons.concurrent.ThreadName; @@ -29,7 +30,9 @@ import org.apache.iotdb.commons.conf.CommonConfig; import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.conf.IoTDBConstant; +import org.apache.iotdb.commons.exception.ConfigurationException; import org.apache.iotdb.commons.exception.IllegalPathException; +import org.apache.iotdb.commons.exception.IoTDBException; import org.apache.iotdb.commons.exception.StartupException; import org.apache.iotdb.commons.service.JMXService; import org.apache.iotdb.commons.service.RegisterManager; @@ -43,6 +46,7 @@ import org.apache.iotdb.confignode.conf.ConfigNodeConfig; import org.apache.iotdb.confignode.conf.ConfigNodeConstant; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; +import org.apache.iotdb.confignode.conf.ConfigNodeStartupCheck; import org.apache.iotdb.confignode.conf.SystemPropertiesUtils; import org.apache.iotdb.confignode.manager.ConfigManager; import org.apache.iotdb.confignode.manager.consensus.ConsensusManager; @@ -72,9 +76,10 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Set; import java.util.concurrent.TimeUnit; -public class ConfigNode implements ConfigNodeMBean { +public class ConfigNode extends ServerCommandLine implements ConfigNodeMBean { private static final Logger LOGGER = LoggerFactory.getLogger(ConfigNode.class); @@ -84,7 +89,7 @@ public class ConfigNode implements ConfigNodeMBean { private static final int STARTUP_RETRY_NUM = 10; private static final long STARTUP_RETRY_INTERVAL_IN_MS = TimeUnit.SECONDS.toMillis(3); private static final int SCHEDULE_WAITING_RETRY_NUM = - (int) (COMMON_CONFIG.getConnectionTimeoutInMS() / STARTUP_RETRY_INTERVAL_IN_MS); + (int) (COMMON_CONFIG.getCnConnectionTimeoutInMS() / STARTUP_RETRY_INTERVAL_IN_MS); private static final int SEED_CONFIG_NODE_ID = 0; private static final int INIT_NON_SEED_CONFIG_NODE_ID = -1; @@ -101,11 +106,13 @@ public class ConfigNode implements ConfigNodeMBean { protected ConfigManager configManager; - protected ConfigNode() { + public ConfigNode() { + super("ConfigNode"); // We do not init anything here, so that we can re-initialize the instance in IT. + ConfigNodeHolder.instance = this; } - public static void main(String[] args) { + public static void main(String[] args) throws Exception { LOGGER.info( "{} environment variables: {}", ConfigNodeConstant.GLOBAL_NAME, @@ -114,7 +121,32 @@ public static void main(String[] args) { "{} default charset is: {}", ConfigNodeConstant.GLOBAL_NAME, Charset.defaultCharset().displayName()); - new ConfigNodeCommandLine().doMain(args); + ConfigNode configNode = new ConfigNode(); + int returnCode = configNode.run(args); + if (returnCode != 0) { + System.exit(returnCode); + } + } + + @Override + protected void start() throws IoTDBException { + try { + // Do ConfigNode startup checks + LOGGER.info("Starting IoTDB {}", IoTDBConstant.VERSION_WITH_BUILD); + ConfigNodeStartupCheck checks = new ConfigNodeStartupCheck(IoTDBConstant.CN_ROLE); + checks.startUpCheck(); + } catch (StartupException | ConfigurationException | IOException e) { + LOGGER.error("Meet error when doing start checking", e); + throw new IoTDBException("Error starting", -1); + } + active(); + } + + @Override + protected void remove(Set nodeIds) throws IoTDBException { + throw new IoTDBException( + "The remove-confignode script has been deprecated. Please connect to the CLI and use SQL: remove confignode [confignode_id].", + -1); } public void active() { @@ -171,7 +203,7 @@ public void active() { "The current {} is now starting as the Seed-ConfigNode.", ConfigNodeConstant.GLOBAL_NAME); - /* Always set ClusterId and ConfigNodeId before initConsensusManager */ + /* Always set ConfigNodeId before initConsensusManager */ CONF.setConfigNodeId(SEED_CONFIG_NODE_ID); configManager.initConsensusManager(); @@ -465,7 +497,7 @@ public void setConfigManager(ConfigManager configManager) { private static class ConfigNodeHolder { - private static ConfigNode instance = new ConfigNode(); + private static ConfigNode instance; private ConfigNodeHolder() { // Empty constructor diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/ConfigNodeCommandLine.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/ConfigNodeCommandLine.java deleted file mode 100644 index 21ccbab34219b..0000000000000 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/ConfigNodeCommandLine.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.confignode.service; - -import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; -import org.apache.iotdb.commons.ServerCommandLine; -import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.commons.exception.BadNodeUrlException; -import org.apache.iotdb.commons.exception.ConfigurationException; -import org.apache.iotdb.commons.exception.StartupException; -import org.apache.iotdb.confignode.conf.ConfigNodeRemoveCheck; -import org.apache.iotdb.confignode.conf.ConfigNodeStartupCheck; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; - -import static org.apache.iotdb.confignode.conf.ConfigNodeConstant.REMOVE_CONFIGNODE_USAGE; - -public class ConfigNodeCommandLine extends ServerCommandLine { - private static final Logger LOGGER = LoggerFactory.getLogger(ConfigNodeCommandLine.class); - - // Start ConfigNode - private static final String MODE_START = "-s"; - // Remove ConfigNode - private static final String MODE_REMOVE = "-r"; - - private static final String USAGE = - "Usage: <-s|-r> " - + "[-D{} ] \n" - + "-s: Start the ConfigNode and join to the cluster\n" - + "-r: Remove the ConfigNode out of the cluster\n"; - - @Override - protected String getUsage() { - return USAGE; - } - - @Override - protected int run(String[] args) { - String mode; - if (args.length < 1) { - mode = MODE_START; - LOGGER.warn( - "ConfigNode does not specify a startup mode. The default startup mode {} will be used", - MODE_START); - } else { - mode = args[0]; - } - - LOGGER.info("Running mode {}", mode); - if (MODE_START.equals(mode)) { - try { - // Do ConfigNode startup checks - LOGGER.info("Starting IoTDB {}", IoTDBConstant.VERSION_WITH_BUILD); - ConfigNodeStartupCheck checks = new ConfigNodeStartupCheck(IoTDBConstant.CN_ROLE); - checks.startUpCheck(); - } catch (StartupException | ConfigurationException | IOException e) { - LOGGER.error("Meet error when doing start checking", e); - return -1; - } - activeConfigNodeInstance(); - } else if (MODE_REMOVE.equals(mode)) { - // remove ConfigNode - try { - doRemoveConfigNode(args); - } catch (IOException e) { - LOGGER.error("Meet error when doing remove ConfigNode", e); - return -1; - } - } else { - LOGGER.error("Unsupported startup mode: {}", mode); - return -1; - } - - return 0; - } - - protected void activeConfigNodeInstance() { - ConfigNode.getInstance().active(); - } - - protected void doRemoveConfigNode(String[] args) throws IOException { - - if (args.length != 2) { - LOGGER.info(REMOVE_CONFIGNODE_USAGE); - return; - } - - LOGGER.info("Starting to remove ConfigNode, parameter: {}, {}", args[0], args[1]); - - try { - TConfigNodeLocation removeConfigNodeLocation = - ConfigNodeRemoveCheck.getInstance().removeCheck(args[1]); - if (removeConfigNodeLocation == null) { - LOGGER.error( - "The ConfigNode to be removed is not in the cluster, or the input format is incorrect."); - return; - } - - ConfigNodeRemoveCheck.getInstance().removeConfigNode(removeConfigNodeLocation); - } catch (BadNodeUrlException e) { - LOGGER.warn("No ConfigNodes need to be removed.", e); - return; - } - - LOGGER.info( - "ConfigNode: {} is removed. If the confignode data directory is no longer needed, you can delete it manually.", - args[1]); - } -} diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessor.java index f1a09ad088a06..21dbccd07a75a 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessor.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessor.java @@ -23,6 +23,7 @@ import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TFlushReq; import org.apache.iotdb.common.rpc.thrift.TNodeLocations; +import org.apache.iotdb.common.rpc.thrift.TPipeHeartbeatResp; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.common.rpc.thrift.TSetConfigurationReq; import org.apache.iotdb.common.rpc.thrift.TSetSpaceQuotaReq; @@ -43,9 +44,11 @@ import org.apache.iotdb.confignode.conf.ConfigNodeConfig; import org.apache.iotdb.confignode.conf.ConfigNodeConstant; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; +import org.apache.iotdb.confignode.conf.ConfigNodeSystemPropertiesHandler; import org.apache.iotdb.confignode.conf.SystemPropertiesUtils; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; -import org.apache.iotdb.confignode.consensus.request.auth.AuthorPlan; +import org.apache.iotdb.confignode.consensus.request.read.ainode.GetAINodeConfigurationPlan; +import org.apache.iotdb.confignode.consensus.request.read.auth.AuthorReadPlan; import org.apache.iotdb.confignode.consensus.request.read.database.CountDatabasePlan; import org.apache.iotdb.confignode.consensus.request.read.database.GetDatabasePlan; import org.apache.iotdb.confignode.consensus.request.read.datanode.GetDataNodeConfigurationPlan; @@ -53,6 +56,8 @@ import org.apache.iotdb.confignode.consensus.request.read.partition.GetOrCreateDataPartitionPlan; import org.apache.iotdb.confignode.consensus.request.read.region.GetRegionInfoListPlan; import org.apache.iotdb.confignode.consensus.request.read.ttl.ShowTTLPlan; +import org.apache.iotdb.confignode.consensus.request.write.ainode.RemoveAINodePlan; +import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan; import org.apache.iotdb.confignode.consensus.request.write.confignode.RemoveConfigNodePlan; import org.apache.iotdb.confignode.consensus.request.write.database.DatabaseSchemaPlan; import org.apache.iotdb.confignode.consensus.request.write.database.SetDataReplicationFactorPlan; @@ -60,6 +65,8 @@ import org.apache.iotdb.confignode.consensus.request.write.database.SetTTLPlan; import org.apache.iotdb.confignode.consensus.request.write.database.SetTimePartitionIntervalPlan; import org.apache.iotdb.confignode.consensus.request.write.datanode.RemoveDataNodePlan; +import org.apache.iotdb.confignode.consensus.response.ainode.AINodeConfigurationResp; +import org.apache.iotdb.confignode.consensus.response.ainode.AINodeRegisterResp; import org.apache.iotdb.confignode.consensus.response.auth.PermissionInfoResp; import org.apache.iotdb.confignode.consensus.response.database.CountDatabaseResp; import org.apache.iotdb.confignode.consensus.response.database.DatabaseSchemaResp; @@ -72,6 +79,12 @@ import org.apache.iotdb.confignode.manager.ConfigManager; import org.apache.iotdb.confignode.manager.consensus.ConsensusManager; import org.apache.iotdb.confignode.rpc.thrift.IConfigNodeRPCService; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeConfigurationResp; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeRegisterReq; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeRegisterResp; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeRemoveReq; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeRestartReq; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeRestartResp; import org.apache.iotdb.confignode.rpc.thrift.TAddConsensusGroupReq; import org.apache.iotdb.confignode.rpc.thrift.TAlterLogicalViewReq; import org.apache.iotdb.confignode.rpc.thrift.TAlterPipeReq; @@ -91,6 +104,7 @@ import org.apache.iotdb.confignode.rpc.thrift.TCreateCQReq; import org.apache.iotdb.confignode.rpc.thrift.TCreateConsumerReq; import org.apache.iotdb.confignode.rpc.thrift.TCreateFunctionReq; +import org.apache.iotdb.confignode.rpc.thrift.TCreateModelReq; import org.apache.iotdb.confignode.rpc.thrift.TCreatePipePluginReq; import org.apache.iotdb.confignode.rpc.thrift.TCreatePipeReq; import org.apache.iotdb.confignode.rpc.thrift.TCreateSchemaTemplateReq; @@ -114,10 +128,13 @@ import org.apache.iotdb.confignode.rpc.thrift.TDeleteTimeSeriesReq; import org.apache.iotdb.confignode.rpc.thrift.TDropCQReq; import org.apache.iotdb.confignode.rpc.thrift.TDropFunctionReq; +import org.apache.iotdb.confignode.rpc.thrift.TDropModelReq; import org.apache.iotdb.confignode.rpc.thrift.TDropPipePluginReq; import org.apache.iotdb.confignode.rpc.thrift.TDropPipeReq; +import org.apache.iotdb.confignode.rpc.thrift.TDropSubscriptionReq; import org.apache.iotdb.confignode.rpc.thrift.TDropTopicReq; import org.apache.iotdb.confignode.rpc.thrift.TDropTriggerReq; +import org.apache.iotdb.confignode.rpc.thrift.TExtendRegionReq; import org.apache.iotdb.confignode.rpc.thrift.TGetAllPipeInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TGetAllSubscriptionInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TGetAllTemplatesResp; @@ -128,6 +145,8 @@ import org.apache.iotdb.confignode.rpc.thrift.TGetJarInListReq; import org.apache.iotdb.confignode.rpc.thrift.TGetJarInListResp; import org.apache.iotdb.confignode.rpc.thrift.TGetLocationForTriggerResp; +import org.apache.iotdb.confignode.rpc.thrift.TGetModelInfoReq; +import org.apache.iotdb.confignode.rpc.thrift.TGetModelInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TGetPathsSetTemplatesReq; import org.apache.iotdb.confignode.rpc.thrift.TGetPathsSetTemplatesResp; import org.apache.iotdb.confignode.rpc.thrift.TGetPipePluginTableResp; @@ -145,7 +164,9 @@ import org.apache.iotdb.confignode.rpc.thrift.TPermissionInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TPipeConfigTransferReq; import org.apache.iotdb.confignode.rpc.thrift.TPipeConfigTransferResp; +import org.apache.iotdb.confignode.rpc.thrift.TReconstructRegionReq; import org.apache.iotdb.confignode.rpc.thrift.TRegionRouteMapResp; +import org.apache.iotdb.confignode.rpc.thrift.TRemoveRegionReq; import org.apache.iotdb.confignode.rpc.thrift.TSchemaNodeManagementReq; import org.apache.iotdb.confignode.rpc.thrift.TSchemaNodeManagementResp; import org.apache.iotdb.confignode.rpc.thrift.TSchemaPartitionReq; @@ -155,11 +176,14 @@ import org.apache.iotdb.confignode.rpc.thrift.TSetSchemaReplicationFactorReq; import org.apache.iotdb.confignode.rpc.thrift.TSetSchemaTemplateReq; import org.apache.iotdb.confignode.rpc.thrift.TSetTimePartitionIntervalReq; +import org.apache.iotdb.confignode.rpc.thrift.TShowAINodesResp; import org.apache.iotdb.confignode.rpc.thrift.TShowCQResp; import org.apache.iotdb.confignode.rpc.thrift.TShowClusterResp; import org.apache.iotdb.confignode.rpc.thrift.TShowConfigNodesResp; import org.apache.iotdb.confignode.rpc.thrift.TShowDataNodesResp; import org.apache.iotdb.confignode.rpc.thrift.TShowDatabaseResp; +import org.apache.iotdb.confignode.rpc.thrift.TShowModelReq; +import org.apache.iotdb.confignode.rpc.thrift.TShowModelResp; import org.apache.iotdb.confignode.rpc.thrift.TShowPipeReq; import org.apache.iotdb.confignode.rpc.thrift.TShowPipeResp; import org.apache.iotdb.confignode.rpc.thrift.TShowRegionReq; @@ -279,6 +303,47 @@ public TDataNodeRestartResp restartDataNode(TDataNodeRestartReq req) { return resp; } + @Override + public TAINodeRegisterResp registerAINode(TAINodeRegisterReq req) { + TAINodeRegisterResp resp = + ((AINodeRegisterResp) configManager.registerAINode(req)).convertToAINodeRegisterResp(); + LOGGER.info("Execute RegisterAINodeRequest {} with result {}", req, resp); + return resp; + } + + @Override + public TAINodeRestartResp restartAINode(TAINodeRestartReq req) { + TAINodeRestartResp resp = configManager.restartAINode(req); + LOGGER.info("Execute RestartAINodeRequest {} with result {}", req, resp); + return resp; + } + + @Override + public TSStatus removeAINode(TAINodeRemoveReq req) { + LOGGER.info("ConfigNode RPC Service start to remove AINode, req: {}", req); + RemoveAINodePlan removeAINodePlan = new RemoveAINodePlan(req.getAiNodeLocation()); + TSStatus status = configManager.removeAINode(removeAINodePlan); + LOGGER.info( + "ConfigNode RPC Service finished to remove AINode, req: {}, result: {}", req, status); + return status; + } + + @Override + public TShowAINodesResp showAINodes() throws TException { + return configManager.showAINodes(); + } + + @Override + public TAINodeConfigurationResp getAINodeConfiguration(int aiNodeId) throws TException { + GetAINodeConfigurationPlan getAINodeConfigurationPlan = + new GetAINodeConfigurationPlan(aiNodeId); + AINodeConfigurationResp aiNodeConfigurationResp = + (AINodeConfigurationResp) configManager.getAINodeConfiguration(getAINodeConfigurationPlan); + TAINodeConfigurationResp resp = new TAINodeConfigurationResp(); + aiNodeConfigurationResp.convertToRpcAINodeLocationResp(resp); + return resp; + } + @Override public TDataNodeRemoveResp removeDataNode(TDataNodeRemoveReq req) { LOGGER.info("ConfigNode RPC Service start to remove DataNode, req: {}", req); @@ -328,9 +393,7 @@ public TSStatus setDatabase(TDatabaseSchema databaseSchema) { .setMessage("Failed to create database. The TTL should be positive."); } - if (isSystemDatabase) { - databaseSchema.setSchemaReplicationFactor(1); - } else if (!databaseSchema.isSetSchemaReplicationFactor()) { + if (!databaseSchema.isSetSchemaReplicationFactor()) { databaseSchema.setSchemaReplicationFactor(configNodeConfig.getSchemaReplicationFactor()); } else if (databaseSchema.getSchemaReplicationFactor() <= 0) { errorResp = @@ -339,9 +402,7 @@ public TSStatus setDatabase(TDatabaseSchema databaseSchema) { "Failed to create database. The schemaReplicationFactor should be positive."); } - if (isSystemDatabase) { - databaseSchema.setDataReplicationFactor(1); - } else if (!databaseSchema.isSetDataReplicationFactor()) { + if (!databaseSchema.isSetDataReplicationFactor()) { databaseSchema.setDataReplicationFactor(configNodeConfig.getDataReplicationFactor()); } else if (databaseSchema.getDataReplicationFactor() <= 0) { errorResp = @@ -533,7 +594,7 @@ public TShowTTLResp showTTL(TShowTTLReq req) { return showTTLResp.convertToRPCTShowTTLResp(); } - public TSStatus callSpecialProcedure(TTestOperation operation) throws TException { + public TSStatus callSpecialProcedure(TTestOperation operation) { switch (operation) { case TEST_PROCEDURE_RECOVER: return configManager.getProcedureManager().createManyDatabases(); @@ -612,7 +673,7 @@ public TAuthorizerResp queryPermission(final TAuthorizerReq req) { final PermissionInfoResp dataSet = (PermissionInfoResp) configManager.queryPermission( - new AuthorPlan( + new AuthorReadPlan( ConfigPhysicalPlanType.values()[ req.getAuthorType() + ConfigPhysicalPlanType.CreateUser.ordinal()], req.getUserName(), @@ -697,7 +758,10 @@ public TSStatus removeConfigNode(TConfigNodeLocation configNodeLocation) throws RemoveConfigNodePlan removeConfigNodePlan = new RemoveConfigNodePlan(configNodeLocation); TSStatus status = configManager.removeConfigNode(removeConfigNodePlan); // Print log to record the ConfigNode that performs the RemoveConfigNodeRequest - LOGGER.info("Execute RemoveConfigNodeRequest {} with result {}", configNodeLocation, status); + LOGGER.info( + "The result of submitting RemoveConfigNode job is {}. RemoveConfigNodeRequest: {}", + status, + configNodeLocation); return status; } @@ -730,7 +794,7 @@ public TSStatus reportConfigNodeShutdown(TConfigNodeLocation configNodeLocation) /** Stop ConfigNode */ @Override - public TSStatus stopConfigNode(TConfigNodeLocation configNodeLocation) { + public TSStatus stopAndClearConfigNode(TConfigNodeLocation configNodeLocation) { new Thread( // TODO: Perhaps we should find some other way of shutting down the config node, adding // a hard dependency @@ -739,18 +803,19 @@ public TSStatus stopConfigNode(TConfigNodeLocation configNodeLocation) { // instance is created feels cleaner. () -> { try { - // Sleep 1s before stop itself - TimeUnit.SECONDS.sleep(1); + // Sleep 5s before stop itself + TimeUnit.SECONDS.sleep(5); } catch (InterruptedException e) { Thread.currentThread().interrupt(); LOGGER.warn(e.getMessage()); } finally { + ConfigNodeSystemPropertiesHandler.getInstance().delete(); configNode.stop(); } }) .start(); return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()) - .setMessage("Stop ConfigNode success."); + .setMessage("Stop And Clear ConfigNode Success."); } @Override @@ -1115,6 +1180,11 @@ public TSStatus dropSubscription(TUnsubscribeReq req) { return configManager.dropSubscription(req); } + @Override + public TSStatus dropSubscriptionById(TDropSubscriptionReq req) { + return configManager.dropSubscriptionById(req); + } + @Override public TShowSubscriptionResp showSubscription(TShowSubscriptionReq req) { return configManager.showSubscription(req); @@ -1150,6 +1220,21 @@ public TSStatus migrateRegion(TMigrateRegionReq req) { return configManager.migrateRegion(req); } + @Override + public TSStatus reconstructRegion(TReconstructRegionReq req) { + return configManager.reconstructRegion(req); + } + + @Override + public TSStatus extendRegion(TExtendRegionReq req) throws TException { + return configManager.extendRegion(req); + } + + @Override + public TSStatus removeRegion(TRemoveRegionReq req) throws TException { + return configManager.removeRegion(req); + } + @Override public TSStatus createCQ(TCreateCQReq req) { return configManager.createCQ(req); @@ -1165,6 +1250,26 @@ public TShowCQResp showCQ() { return configManager.showCQ(); } + @Override + public TSStatus createModel(TCreateModelReq req) { + return configManager.createModel(req); + } + + @Override + public TSStatus dropModel(TDropModelReq req) { + return configManager.dropModel(req); + } + + @Override + public TShowModelResp showModel(TShowModelReq req) { + return configManager.showModel(req); + } + + @Override + public TGetModelInfoResp getModelInfo(TGetModelInfoReq req) { + return configManager.getModelInfo(req); + } + @Override public TSStatus setSpaceQuota(TSetSpaceQuotaReq req) throws TException { return configManager.setSpaceQuota(req); @@ -1194,4 +1299,9 @@ public TThrottleQuotaResp showThrottleQuota(TShowThrottleReq req) { public TThrottleQuotaResp getThrottleQuota() { return configManager.getThrottleQuota(); } + + @Override + public TSStatus pushHeartbeat(final int dataNodeId, final TPipeHeartbeatResp resp) { + return configManager.pushHeartbeat(dataNodeId, resp); + } } diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/conf/ConfigNodePropertiesTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/conf/ConfigNodePropertiesTest.java new file mode 100644 index 0000000000000..1101fcc3f111d --- /dev/null +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/conf/ConfigNodePropertiesTest.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.conf; + +import com.tngtech.archunit.core.domain.JavaClasses; +import com.tngtech.archunit.core.importer.ClassFileImporter; +import com.tngtech.archunit.core.importer.ImportOption; +import com.tngtech.archunit.lang.ArchRule; +import org.junit.Test; + +import java.util.Properties; + +import static com.tngtech.archunit.lang.syntax.ArchRuleDefinition.noClasses; + +public class ConfigNodePropertiesTest { + @Test + public void TrimPropertiesOnly() { + JavaClasses allClasses = + new ClassFileImporter() + .withImportOption(new ImportOption.DoNotIncludeTests()) + .importPackages("org.apache.iotdb"); + + ArchRule rule = + noClasses() + .that() + .areAssignableTo("org.apache.iotdb.confignode.conf.ConfigNodeDescriptor") + .should() + .callMethod(Properties.class, "getProperty", String.class) + .orShould() + .callMethod(Properties.class, "getProperty", String.class, String.class); + + rule.check(allClasses); + } +} diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlanSerDeTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlanSerDeTest.java index cfa372be8dd04..be2ba4b696017 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlanSerDeTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlanSerDeTest.java @@ -41,11 +41,11 @@ import org.apache.iotdb.commons.partition.SeriesPartitionTable; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PathPatternTree; -import org.apache.iotdb.commons.pipe.plugin.meta.PipePluginMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeRuntimeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeStaticMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.agent.plugin.meta.PipePluginMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeRuntimeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStaticMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; import org.apache.iotdb.commons.subscription.meta.consumer.ConsumerGroupMeta; import org.apache.iotdb.commons.subscription.meta.consumer.ConsumerMeta; import org.apache.iotdb.commons.subscription.meta.topic.TopicMeta; @@ -55,40 +55,14 @@ import org.apache.iotdb.commons.sync.TsFilePipeInfo; import org.apache.iotdb.commons.trigger.TriggerInformation; import org.apache.iotdb.commons.udf.UDFInformation; -import org.apache.iotdb.confignode.consensus.request.auth.AuthorPlan; -import org.apache.iotdb.confignode.consensus.request.read.database.CountDatabasePlan; -import org.apache.iotdb.confignode.consensus.request.read.database.GetDatabasePlan; -import org.apache.iotdb.confignode.consensus.request.read.datanode.GetDataNodeConfigurationPlan; -import org.apache.iotdb.confignode.consensus.request.read.function.GetFunctionTablePlan; -import org.apache.iotdb.confignode.consensus.request.read.function.GetUDFJarPlan; -import org.apache.iotdb.confignode.consensus.request.read.partition.CountTimeSlotListPlan; -import org.apache.iotdb.confignode.consensus.request.read.partition.GetDataPartitionPlan; -import org.apache.iotdb.confignode.consensus.request.read.partition.GetNodePathsPartitionPlan; -import org.apache.iotdb.confignode.consensus.request.read.partition.GetOrCreateDataPartitionPlan; -import org.apache.iotdb.confignode.consensus.request.read.partition.GetOrCreateSchemaPartitionPlan; -import org.apache.iotdb.confignode.consensus.request.read.partition.GetSchemaPartitionPlan; -import org.apache.iotdb.confignode.consensus.request.read.partition.GetSeriesSlotListPlan; -import org.apache.iotdb.confignode.consensus.request.read.partition.GetTimeSlotListPlan; -import org.apache.iotdb.confignode.consensus.request.read.pipe.plugin.GetPipePluginJarPlan; -import org.apache.iotdb.confignode.consensus.request.read.pipe.plugin.GetPipePluginTablePlan; -import org.apache.iotdb.confignode.consensus.request.read.pipe.task.ShowPipePlanV2; -import org.apache.iotdb.confignode.consensus.request.read.region.GetRegionIdPlan; -import org.apache.iotdb.confignode.consensus.request.read.region.GetRegionInfoListPlan; -import org.apache.iotdb.confignode.consensus.request.read.template.GetAllSchemaTemplatePlan; -import org.apache.iotdb.confignode.consensus.request.read.template.GetAllTemplateSetInfoPlan; -import org.apache.iotdb.confignode.consensus.request.read.template.GetPathsSetTemplatePlan; -import org.apache.iotdb.confignode.consensus.request.read.template.GetSchemaTemplatePlan; -import org.apache.iotdb.confignode.consensus.request.read.trigger.GetTransferringTriggersPlan; -import org.apache.iotdb.confignode.consensus.request.read.trigger.GetTriggerJarPlan; -import org.apache.iotdb.confignode.consensus.request.read.trigger.GetTriggerLocationPlan; -import org.apache.iotdb.confignode.consensus.request.read.trigger.GetTriggerTablePlan; +import org.apache.iotdb.commons.utils.TimePartitionUtils; +import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan; import org.apache.iotdb.confignode.consensus.request.write.confignode.ApplyConfigNodePlan; import org.apache.iotdb.confignode.consensus.request.write.confignode.RemoveConfigNodePlan; import org.apache.iotdb.confignode.consensus.request.write.confignode.UpdateClusterIdPlan; import org.apache.iotdb.confignode.consensus.request.write.cq.ActiveCQPlan; import org.apache.iotdb.confignode.consensus.request.write.cq.AddCQPlan; import org.apache.iotdb.confignode.consensus.request.write.cq.DropCQPlan; -import org.apache.iotdb.confignode.consensus.request.write.cq.ShowCQPlan; import org.apache.iotdb.confignode.consensus.request.write.cq.UpdateCQLastExecTimePlan; import org.apache.iotdb.confignode.consensus.request.write.database.AdjustMaxRegionGroupNumPlan; import org.apache.iotdb.confignode.consensus.request.write.database.DatabaseSchemaPlan; @@ -103,6 +77,7 @@ import org.apache.iotdb.confignode.consensus.request.write.function.CreateFunctionPlan; import org.apache.iotdb.confignode.consensus.request.write.function.DropFunctionPlan; import org.apache.iotdb.confignode.consensus.request.write.partition.AddRegionLocationPlan; +import org.apache.iotdb.confignode.consensus.request.write.partition.AutoCleanPartitionTablePlan; import org.apache.iotdb.confignode.consensus.request.write.partition.CreateDataPartitionPlan; import org.apache.iotdb.confignode.consensus.request.write.partition.CreateSchemaPartitionPlan; import org.apache.iotdb.confignode.consensus.request.write.partition.RemoveRegionLocationPlan; @@ -141,7 +116,6 @@ import org.apache.iotdb.confignode.consensus.request.write.sync.PreCreatePipePlanV1; import org.apache.iotdb.confignode.consensus.request.write.sync.RecordPipeMessagePlan; import org.apache.iotdb.confignode.consensus.request.write.sync.SetPipeStatusPlanV1; -import org.apache.iotdb.confignode.consensus.request.write.sync.ShowPipePlanV1; import org.apache.iotdb.confignode.consensus.request.write.template.CreateSchemaTemplatePlan; import org.apache.iotdb.confignode.consensus.request.write.template.DropSchemaTemplatePlan; import org.apache.iotdb.confignode.consensus.request.write.template.ExtendSchemaTemplatePlan; @@ -162,8 +136,6 @@ import org.apache.iotdb.confignode.rpc.thrift.TCreateCQReq; import org.apache.iotdb.confignode.rpc.thrift.TDatabaseSchema; import org.apache.iotdb.confignode.rpc.thrift.TPipeSinkInfo; -import org.apache.iotdb.confignode.rpc.thrift.TShowRegionReq; -import org.apache.iotdb.confignode.rpc.thrift.TTimeSlotList; import org.apache.iotdb.confignode.rpc.thrift.TTriggerState; import org.apache.iotdb.db.schemaengine.template.Template; import org.apache.iotdb.db.schemaengine.template.alter.TemplateExtendInfo; @@ -188,14 +160,13 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.TreeMap; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import static org.apache.iotdb.common.rpc.thrift.TConsensusGroupType.ConfigRegion; import static org.apache.iotdb.common.rpc.thrift.TConsensusGroupType.DataRegion; import static org.apache.iotdb.common.rpc.thrift.TConsensusGroupType.SchemaRegion; -import static org.apache.iotdb.commons.schema.SchemaConstant.ALL_MATCH_SCOPE; import static org.junit.Assert.assertEquals; public class ConfigPhysicalPlanSerDeTest { @@ -244,15 +215,6 @@ public void UpdateDataNodePlanTest() throws IOException { Assert.assertEquals(plan0, plan1); } - @Test - public void QueryDataNodeInfoPlanTest() throws IOException { - GetDataNodeConfigurationPlan plan0 = new GetDataNodeConfigurationPlan(-1); - GetDataNodeConfigurationPlan plan1 = - (GetDataNodeConfigurationPlan) - ConfigPhysicalPlan.Factory.create(plan0.serializeToByteBuffer()); - Assert.assertEquals(plan0, plan1); - } - @Test public void CreateDatabasePlanTest() throws IOException { DatabaseSchemaPlan req0 = @@ -343,22 +305,6 @@ public void AdjustMaxRegionGroupCountPlanTest() throws IOException { Assert.assertEquals(req0, req1); } - @Test - public void CountStorageGroupPlanTest() throws IOException { - CountDatabasePlan req0 = new CountDatabasePlan(Arrays.asList("root", "sg"), ALL_MATCH_SCOPE); - CountDatabasePlan req1 = - (CountDatabasePlan) ConfigPhysicalPlan.Factory.create(req0.serializeToByteBuffer()); - Assert.assertEquals(req0, req1); - } - - @Test - public void GetStorageGroupPlanTest() throws IOException { - GetDatabasePlan req0 = new GetDatabasePlan(Arrays.asList("root", "sg"), ALL_MATCH_SCOPE); - CountDatabasePlan req1 = - (CountDatabasePlan) ConfigPhysicalPlan.Factory.create(req0.serializeToByteBuffer()); - Assert.assertEquals(req0, req1); - } - @Test public void CreateRegionsPlanTest() throws IOException { TDataNodeLocation dataNodeLocation = new TDataNodeLocation(); @@ -448,35 +394,6 @@ public void CreateSchemaPartitionPlanTest() throws IOException { Assert.assertEquals(req0, req1); } - @Test - public void GetSchemaPartitionPlanTest() throws IOException { - String storageGroup = "root.sg0"; - TSeriesPartitionSlot seriesPartitionSlot = new TSeriesPartitionSlot(10); - - Map> partitionSlotsMap = new HashMap<>(); - partitionSlotsMap.put(storageGroup, Collections.singletonList(seriesPartitionSlot)); - - GetSchemaPartitionPlan req0 = new GetSchemaPartitionPlan(partitionSlotsMap); - GetSchemaPartitionPlan req1 = - (GetSchemaPartitionPlan) ConfigPhysicalPlan.Factory.create(req0.serializeToByteBuffer()); - Assert.assertEquals(req0, req1); - } - - @Test - public void GetOrCreateSchemaPartitionPlanTest() throws IOException { - String storageGroup = "root.sg0"; - TSeriesPartitionSlot seriesPartitionSlot = new TSeriesPartitionSlot(10); - - Map> partitionSlotsMap = new HashMap<>(); - partitionSlotsMap.put(storageGroup, Collections.singletonList(seriesPartitionSlot)); - - GetOrCreateSchemaPartitionPlan req0 = new GetOrCreateSchemaPartitionPlan(partitionSlotsMap); - GetOrCreateSchemaPartitionPlan req1 = - (GetOrCreateSchemaPartitionPlan) - ConfigPhysicalPlan.Factory.create(req0.serializeToByteBuffer()); - Assert.assertEquals(req0, req1); - } - @Test public void CreateDataPartitionPlanTest() throws IOException { TDataNodeLocation dataNodeLocation = new TDataNodeLocation(); @@ -512,48 +429,17 @@ public void CreateDataPartitionPlanTest() throws IOException { } @Test - public void GetDataPartitionPlanTest() throws IOException { - String storageGroup = "root.sg0"; - TSeriesPartitionSlot seriesPartitionSlot = new TSeriesPartitionSlot(10); - TTimePartitionSlot timePartitionSlot = new TTimePartitionSlot(100); - - Map> partitionSlotsMap = new HashMap<>(); - partitionSlotsMap.put(storageGroup, new HashMap<>()); - partitionSlotsMap - .get(storageGroup) - .put(seriesPartitionSlot, new TTimeSlotList().setTimePartitionSlots(new ArrayList<>())); - partitionSlotsMap - .get(storageGroup) - .get(seriesPartitionSlot) - .getTimePartitionSlots() - .add(timePartitionSlot); - - GetDataPartitionPlan req0 = new GetDataPartitionPlan(partitionSlotsMap); - GetDataPartitionPlan req1 = - (GetDataPartitionPlan) ConfigPhysicalPlan.Factory.create(req0.serializeToByteBuffer()); - Assert.assertEquals(req0, req1); - } - - @Test - public void GetOrCreateDataPartitionPlanTest() throws IOException { - String storageGroup = "root.sg0"; - TSeriesPartitionSlot seriesPartitionSlot = new TSeriesPartitionSlot(10); - TTimePartitionSlot timePartitionSlot = new TTimePartitionSlot(100); - - Map> partitionSlotsMap = new HashMap<>(); - partitionSlotsMap.put(storageGroup, new HashMap<>()); - partitionSlotsMap - .get(storageGroup) - .put(seriesPartitionSlot, new TTimeSlotList().setTimePartitionSlots(new ArrayList<>())); - partitionSlotsMap - .get(storageGroup) - .get(seriesPartitionSlot) - .getTimePartitionSlots() - .add(timePartitionSlot); - - GetOrCreateDataPartitionPlan req0 = new GetOrCreateDataPartitionPlan(partitionSlotsMap); - GetOrCreateDataPartitionPlan req1 = - (GetOrCreateDataPartitionPlan) + public void AutoCleanPartitionTablePlan() throws IOException { + Map databaseTTLMap = new TreeMap<>(); + databaseTTLMap.put("root.db1", -1L); // NULL_TTL + databaseTTLMap.put("root.db2", 3600L * 1000 * 24); // 1d_TTL + databaseTTLMap.put("root.db3", 3600L * 1000 * 24 * 30); // 1m_TTL + TTimePartitionSlot currentTimeSlot = + new TTimePartitionSlot(TimePartitionUtils.getTimePartitionSlot(System.currentTimeMillis())); + AutoCleanPartitionTablePlan req0 = + new AutoCleanPartitionTablePlan(databaseTTLMap, currentTimeSlot); + AutoCleanPartitionTablePlan req1 = + (AutoCleanPartitionTablePlan) ConfigPhysicalPlan.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); } @@ -722,118 +608,6 @@ public void AuthorPlanTest() throws IOException, IllegalPathException { new ArrayList<>()); req1 = (AuthorPlan) ConfigPhysicalPlan.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); - - // list user - req0 = - new AuthorPlan( - ConfigPhysicalPlanType.ListUser, - "", - "", - "", - "", - new HashSet<>(), - false, - new ArrayList<>()); - req1 = (AuthorPlan) ConfigPhysicalPlan.Factory.create(req0.serializeToByteBuffer()); - Assert.assertEquals(req0, req1); - - // list role - req0 = - new AuthorPlan( - ConfigPhysicalPlanType.ListRole, - "", - "", - "", - "", - new HashSet<>(), - false, - new ArrayList<>()); - req1 = (AuthorPlan) ConfigPhysicalPlan.Factory.create(req0.serializeToByteBuffer()); - Assert.assertEquals(req0, req1); - - // list privileges user - req0 = - new AuthorPlan( - ConfigPhysicalPlanType.ListUserPrivilege, - "", - "", - "", - "", - new HashSet<>(), - false, - new ArrayList<>()); - req1 = (AuthorPlan) ConfigPhysicalPlan.Factory.create(req0.serializeToByteBuffer()); - Assert.assertEquals(req0, req1); - - // list privileges role - req0 = - new AuthorPlan( - ConfigPhysicalPlanType.ListRolePrivilege, - "", - "", - "", - "", - new HashSet<>(), - false, - new ArrayList<>()); - req1 = (AuthorPlan) ConfigPhysicalPlan.Factory.create(req0.serializeToByteBuffer()); - Assert.assertEquals(req0, req1); - - // list user privileges - req0 = - new AuthorPlan( - ConfigPhysicalPlanType.ListUserPrivilege, - "", - "", - "", - "", - new HashSet<>(), - false, - new ArrayList<>()); - req1 = (AuthorPlan) ConfigPhysicalPlan.Factory.create(req0.serializeToByteBuffer()); - Assert.assertEquals(req0, req1); - - // list role privileges - req0 = - new AuthorPlan( - ConfigPhysicalPlanType.ListRolePrivilege, - "", - "", - "", - "", - new HashSet<>(), - false, - new ArrayList<>()); - req1 = (AuthorPlan) ConfigPhysicalPlan.Factory.create(req0.serializeToByteBuffer()); - Assert.assertEquals(req0, req1); - - // list all role of user - req0 = - new AuthorPlan( - ConfigPhysicalPlanType.ListUserRoles, - "", - "", - "", - "", - new HashSet<>(), - false, - new ArrayList<>()); - req1 = (AuthorPlan) ConfigPhysicalPlan.Factory.create(req0.serializeToByteBuffer()); - Assert.assertEquals(req0, req1); - - // list all user of role - req0 = - new AuthorPlan( - ConfigPhysicalPlanType.ListRoleUsers, - "", - "", - "", - "", - new HashSet<>(), - false, - new ArrayList<>()); - req1 = (AuthorPlan) ConfigPhysicalPlan.Factory.create(req0.serializeToByteBuffer()); - Assert.assertEquals(req0, req1); } @Test @@ -928,24 +702,6 @@ public void DeleteProcedurePlanTest() throws IOException { Assert.assertEquals(req0, req1); } - @Test - public void GetRegionLocationsPlanTest() throws IOException { - GetRegionInfoListPlan req0 = new GetRegionInfoListPlan(); - TShowRegionReq showRegionReq = new TShowRegionReq(); - req0.setShowRegionReq(showRegionReq); - showRegionReq.setConsensusGroupType(TConsensusGroupType.DataRegion); - GetRegionInfoListPlan req1 = - (GetRegionInfoListPlan) ConfigPhysicalPlan.Factory.create(req0.serializeToByteBuffer()); - Assert.assertEquals(req0.getType(), req1.getType()); - Assert.assertEquals(req0.getShowRegionReq(), req1.getShowRegionReq()); - final List sgList = Collections.singletonList("root.sg1, root.sg2, root.*"); - showRegionReq.setDatabases(new ArrayList<>(sgList)); - GetRegionInfoListPlan req2 = - (GetRegionInfoListPlan) ConfigPhysicalPlan.Factory.create(req0.serializeToByteBuffer()); - Assert.assertEquals(req0.getType(), req1.getType()); - Assert.assertEquals(req0.getShowRegionReq(), req2.getShowRegionReq()); - } - @Test public void CreateSchemaTemplatePlanTest() throws IOException, IllegalPathException { Template template = newSchemaTemplate("template_name"); @@ -980,51 +736,6 @@ public void ExtendSchemaTemplatePlanTest() throws IOException { Assert.assertEquals(plan, ConfigPhysicalPlan.Factory.create(plan.serializeToByteBuffer())); } - @Test - public void GetSchemaTemplatePlanTest() throws IOException { - GetSchemaTemplatePlan getSchemaTemplatePlan = new GetSchemaTemplatePlan("template1"); - GetSchemaTemplatePlan deserializedPlan = - (GetSchemaTemplatePlan) - ConfigPhysicalPlan.Factory.create(getSchemaTemplatePlan.serializeToByteBuffer()); - Assert.assertEquals("template1", deserializedPlan.getTemplateName()); - } - - @Test - public void GetAllSchemaTemplatePlanTest() throws IOException { - GetAllSchemaTemplatePlan getAllSchemaTemplatePlan0 = new GetAllSchemaTemplatePlan(); - Assert.assertTrue( - ConfigPhysicalPlan.Factory.create(getAllSchemaTemplatePlan0.serializeToByteBuffer()) - instanceof GetAllSchemaTemplatePlan); - } - - @Test - public void GetNodesInSchemaTemplatePlanTest() throws IOException { - GetSchemaTemplatePlan getSchemaTemplatePlan0 = new GetSchemaTemplatePlan("template_name_test"); - GetSchemaTemplatePlan getSchemaTemplatePlan1 = - (GetSchemaTemplatePlan) - ConfigPhysicalPlan.Factory.create(getSchemaTemplatePlan0.serializeToByteBuffer()); - Assert.assertEquals(getSchemaTemplatePlan0, getSchemaTemplatePlan1); - } - - @Test - public void GetNodePathsPartitionPlanTest() throws IOException, IllegalPathException { - GetNodePathsPartitionPlan getNodePathsPartitionPlan0 = new GetNodePathsPartitionPlan(); - getNodePathsPartitionPlan0.setPartialPath(new PartialPath("root.sg1.**")); - getNodePathsPartitionPlan0.setScope(ALL_MATCH_SCOPE); - GetNodePathsPartitionPlan getNodePathsPartitionPlan1 = - (GetNodePathsPartitionPlan) - ConfigPhysicalPlan.Factory.create(getNodePathsPartitionPlan0.serializeToByteBuffer()); - Assert.assertEquals(getNodePathsPartitionPlan0, getNodePathsPartitionPlan1); - } - - @Test - public void GetAllTemplateSetInfoPlanTest() throws IOException { - GetAllTemplateSetInfoPlan getAllTemplateSetInfoPlan = new GetAllTemplateSetInfoPlan(); - Assert.assertTrue( - ConfigPhysicalPlan.Factory.create(getAllTemplateSetInfoPlan.serializeToByteBuffer()) - instanceof GetAllTemplateSetInfoPlan); - } - @Test public void SetSchemaTemplatePlanTest() throws IOException { SetSchemaTemplatePlan setSchemaTemplatePlanPlan0 = @@ -1037,16 +748,6 @@ public void SetSchemaTemplatePlanTest() throws IOException { setSchemaTemplatePlanPlan0.getPath().equals(setSchemaTemplatePlanPlan1.getPath())); } - @Test - public void ShowPathSetTemplatePlanTest() throws IOException { - GetPathsSetTemplatePlan getPathsSetTemplatePlan0 = - new GetPathsSetTemplatePlan("template_name_test", ALL_MATCH_SCOPE); - GetPathsSetTemplatePlan getPathsSetTemplatePlan1 = - (GetPathsSetTemplatePlan) - ConfigPhysicalPlan.Factory.create(getPathsSetTemplatePlan0.serializeToByteBuffer()); - Assert.assertEquals(getPathsSetTemplatePlan0.getName(), getPathsSetTemplatePlan1.getName()); - } - @Test public void DropSchemaTemplateTest() throws IOException { DropSchemaTemplatePlan dropSchemaTemplatePlan = new DropSchemaTemplatePlan("template"); @@ -1191,9 +892,10 @@ public void AlterPipePlanV2Test() throws IOException { @Test public void SetPipeStatusPlanV2Test() throws IOException { - SetPipeStatusPlanV2 setPipeStatusPlanV2 = - new SetPipeStatusPlanV2("pipe", org.apache.iotdb.commons.pipe.task.meta.PipeStatus.RUNNING); - SetPipeStatusPlanV2 setPipeStatusPlanV21 = + final SetPipeStatusPlanV2 setPipeStatusPlanV2 = + new SetPipeStatusPlanV2( + "pipe", org.apache.iotdb.commons.pipe.agent.task.meta.PipeStatus.RUNNING); + final SetPipeStatusPlanV2 setPipeStatusPlanV21 = (SetPipeStatusPlanV2) ConfigPhysicalPlan.Factory.create(setPipeStatusPlanV2.serializeToByteBuffer()); Assert.assertEquals(setPipeStatusPlanV2.getPipeName(), setPipeStatusPlanV21.getPipeName()); @@ -1240,7 +942,7 @@ public void OperateMultiplePipesPlanV2Test() throws IOException { SetPipeStatusPlanV2 setPipeStatusPlanV2 = new SetPipeStatusPlanV2( - "testSet", org.apache.iotdb.commons.pipe.task.meta.PipeStatus.RUNNING); + "testSet", org.apache.iotdb.commons.pipe.agent.task.meta.PipeStatus.RUNNING); List subPlans = new ArrayList<>(); subPlans.add(createPipePlanV2); @@ -1257,20 +959,6 @@ public void OperateMultiplePipesPlanV2Test() throws IOException { operateMultiplePipesPlanV2.getSubPlans(), operateMultiplePipesPlanV21.getSubPlans()); } - @Test - public void ShowPipePlanTest() throws IOException { - ShowPipePlanV1 showPipePlan = new ShowPipePlanV1("demo"); - ShowPipePlanV1 showPipePlan1 = - (ShowPipePlanV1) ConfigPhysicalPlan.Factory.create(showPipePlan.serializeToByteBuffer()); - Assert.assertEquals(showPipePlan.getPipeName(), showPipePlan1.getPipeName()); - ShowPipePlanV1 showPipePlanWithNullName = new ShowPipePlanV1(); - ShowPipePlanV1 showPipePlanWithNullName1 = - (ShowPipePlanV1) - ConfigPhysicalPlan.Factory.create(showPipePlanWithNullName.serializeToByteBuffer()); - Assert.assertEquals( - showPipePlanWithNullName.getPipeName(), showPipePlanWithNullName1.getPipeName()); - } - @Test public void CreatePipePluginPlanTest() throws IOException { CreatePipePluginPlan createPipePluginPlan = @@ -1447,26 +1135,6 @@ public void ConsumerGroupHandleMetaChangePlanTest() throws IOException { consumerGroupHandleMetaChangePlan1.getConsumerGroupMetaList()); } - @Test - public void GetTriggerTablePlanTest() throws IOException { - GetTriggerTablePlan getTriggerTablePlan0 = new GetTriggerTablePlan(true); - GetTriggerTablePlan getTriggerTablePlan1 = - (GetTriggerTablePlan) - ConfigPhysicalPlan.Factory.create(getTriggerTablePlan0.serializeToByteBuffer()); - Assert.assertEquals( - getTriggerTablePlan0.isOnlyStateful(), getTriggerTablePlan1.isOnlyStateful()); - } - - @Test - public void GetTriggerLocationPlanTest() throws IOException { - GetTriggerLocationPlan getTriggerLocationPlan0 = new GetTriggerLocationPlan("test1"); - GetTriggerLocationPlan getTriggerLocationPlan1 = - (GetTriggerLocationPlan) - ConfigPhysicalPlan.Factory.create(getTriggerLocationPlan0.serializeToByteBuffer()); - Assert.assertEquals( - getTriggerLocationPlan0.getTriggerName(), getTriggerLocationPlan1.getTriggerName()); - } - @Test public void AddTriggerInTablePlanTest() throws IOException, IllegalPathException { TriggerInformation triggerInformation = @@ -1566,15 +1234,6 @@ public void DropCQPlanTest() throws IOException { Assert.assertEquals(dropCQPlan0, dropCQPlan1); } - @Test - public void ShowCQPlanTest() throws IOException { - ShowCQPlan showCQPlan0 = new ShowCQPlan(); - ShowCQPlan showCQPlan1 = - (ShowCQPlan) ConfigPhysicalPlan.Factory.create(showCQPlan0.serializeToByteBuffer()); - - Assert.assertEquals(showCQPlan0, showCQPlan1); - } - @Test public void UpdateCQLastExecTimePlanTest() throws IOException { UpdateCQLastExecTimePlan updateCQLastExecTimePlan0 = @@ -1586,85 +1245,6 @@ public void UpdateCQLastExecTimePlanTest() throws IOException { Assert.assertEquals(updateCQLastExecTimePlan0, updateCQLastExecTimePlan1); } - @Test - public void GetTriggerJarPlanTest() throws IOException { - List jarNames = new ArrayList<>(); - jarNames.add("test1"); - jarNames.add("test2"); - GetTriggerJarPlan getTriggerJarPlan0 = new GetTriggerJarPlan(jarNames); - - GetTriggerJarPlan getTriggerJarPlan1 = - (GetTriggerJarPlan) - ConfigPhysicalPlan.Factory.create(getTriggerJarPlan0.serializeToByteBuffer()); - Assert.assertEquals(getTriggerJarPlan0.getJarNames(), getTriggerJarPlan1.getJarNames()); - } - - @Test - public void GetRegionIdPlanTest() throws IOException { - GetRegionIdPlan getRegionIdPlan0 = new GetRegionIdPlan(ConfigRegion); - GetRegionIdPlan getRegionIdPlan1 = - (GetRegionIdPlan) - ConfigPhysicalPlan.Factory.create(getRegionIdPlan0.serializeToByteBuffer()); - Assert.assertEquals(getRegionIdPlan0, getRegionIdPlan1); - } - - @Test - public void GetTimeSlotListPlanTest() throws IOException { - GetTimeSlotListPlan getTimeSlotListPlan0 = new GetTimeSlotListPlan(0, Long.MAX_VALUE); - GetTimeSlotListPlan getTimeSlotListPlan1 = - (GetTimeSlotListPlan) - ConfigPhysicalPlan.Factory.create(getTimeSlotListPlan0.serializeToByteBuffer()); - Assert.assertEquals(getTimeSlotListPlan0, getTimeSlotListPlan1); - } - - @Test - public void CountTimeSlotListPlanTest() throws IOException { - CountTimeSlotListPlan countTimeSlotListPlan0 = new CountTimeSlotListPlan(0, Long.MAX_VALUE); - CountTimeSlotListPlan countTimeSlotListPlan1 = - (CountTimeSlotListPlan) - ConfigPhysicalPlan.Factory.create(countTimeSlotListPlan0.serializeToByteBuffer()); - Assert.assertEquals(countTimeSlotListPlan0, countTimeSlotListPlan1); - } - - @Test - public void GetSeriesSlotListPlanTest() throws IOException { - GetSeriesSlotListPlan getSeriesSlotListPlan0 = - new GetSeriesSlotListPlan("root.test", SchemaRegion); - GetSeriesSlotListPlan getSeriesSlotListPlan1 = - (GetSeriesSlotListPlan) - ConfigPhysicalPlan.Factory.create(getSeriesSlotListPlan0.serializeToByteBuffer()); - Assert.assertEquals(getSeriesSlotListPlan0, getSeriesSlotListPlan1); - } - - @Test - public void GetPipePluginJarPlanTest() throws IOException { - List jarNames = new ArrayList<>(); - jarNames.add("org.apache.testJar"); - jarNames.add("org.apache.testJar2"); - GetPipePluginJarPlan getPipePluginJarPlan0 = new GetPipePluginJarPlan(jarNames); - GetPipePluginJarPlan getPipePluginJarPlan1 = - (GetPipePluginJarPlan) - ConfigPhysicalPlan.Factory.create(getPipePluginJarPlan0.serializeToByteBuffer()); - Assert.assertEquals(getPipePluginJarPlan0, getPipePluginJarPlan1); - } - - @Test - public void GetPipePluginTablePlanTest() throws IOException { - GetPipePluginTablePlan getPipePluginTablePlan0 = new GetPipePluginTablePlan(); - GetPipePluginTablePlan getPipePluginTablePlan1 = - (GetPipePluginTablePlan) - ConfigPhysicalPlan.Factory.create(getPipePluginTablePlan0.serializeToByteBuffer()); - Assert.assertEquals(getPipePluginTablePlan0, getPipePluginTablePlan1); - } - - @Test - public void ShowPipePlanV2Test() throws IOException { - ShowPipePlanV2 showPipePlanV2 = new ShowPipePlanV2(); - ShowPipePlanV2 showPipePlanV21 = - (ShowPipePlanV2) ConfigPhysicalPlan.Factory.create(showPipePlanV2.serializeToByteBuffer()); - Assert.assertEquals(showPipePlanV2, showPipePlanV21); - } - @Test public void RemoveDataNodePlanTest() throws IOException { List locations = new ArrayList<>(); @@ -1742,34 +1322,6 @@ public void UpdateTriggerLocationPlanTest() throws IOException { Assert.assertEquals(plan0.getDataNodeLocation(), plan1.getDataNodeLocation()); } - @Test - public void GetTransferringTriggersPlanTest() throws IOException { - GetTransferringTriggersPlan getTransferringTriggerPlan0 = new GetTransferringTriggersPlan(); - Assert.assertTrue( - ConfigPhysicalPlan.Factory.create(getTransferringTriggerPlan0.serializeToByteBuffer()) - instanceof GetTransferringTriggersPlan); - } - - @Test - public void GetUDFTablePlanTest() throws IOException { - GetFunctionTablePlan getUDFTablePlan0 = new GetFunctionTablePlan(); - Assert.assertTrue( - ConfigPhysicalPlan.Factory.create(getUDFTablePlan0.serializeToByteBuffer()) - instanceof GetFunctionTablePlan); - } - - @Test - public void GetUDFJarPlanTest() throws IOException { - List jarNames = new ArrayList<>(); - jarNames.add("test1"); - jarNames.add("test2"); - GetUDFJarPlan getUDFJarPlan0 = new GetUDFJarPlan(jarNames); - - GetUDFJarPlan getUDFJarPlan1 = - (GetUDFJarPlan) ConfigPhysicalPlan.Factory.create(getUDFJarPlan0.serializeToByteBuffer()); - Assert.assertEquals(getUDFJarPlan0.getJarNames(), getUDFJarPlan1.getJarNames()); - } - @Test public void CreateFunctionPlanTest() throws IOException { UDFInformation udfInformation = diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/consensus/response/pipe/PipePluginTableRespTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/consensus/response/pipe/PipePluginTableRespTest.java index f2e6b92fbbb87..fd1d0af980bc0 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/consensus/response/pipe/PipePluginTableRespTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/consensus/response/pipe/PipePluginTableRespTest.java @@ -19,9 +19,9 @@ package org.apache.iotdb.confignode.consensus.response.pipe; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.pipe.plugin.builtin.extractor.iotdb.IoTDBExtractor; -import org.apache.iotdb.commons.pipe.plugin.builtin.processor.donothing.DoNothingProcessor; -import org.apache.iotdb.commons.pipe.plugin.meta.PipePluginMeta; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.processor.donothing.DoNothingProcessor; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.source.iotdb.IoTDBSource; +import org.apache.iotdb.commons.pipe.agent.plugin.meta.PipePluginMeta; import org.apache.iotdb.confignode.consensus.response.pipe.plugin.PipePluginTableResp; import org.apache.iotdb.confignode.rpc.thrift.TGetPipePluginTableResp; import org.apache.iotdb.rpc.TSStatusCode; @@ -40,7 +40,7 @@ public class PipePluginTableRespTest { public void testConvertToThriftResponse() throws IOException { TSStatus status = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); List pipePluginMetaList = new ArrayList<>(); - pipePluginMetaList.add(new PipePluginMeta("iotdb-extractor", IoTDBExtractor.class.getName())); + pipePluginMetaList.add(new PipePluginMeta("iotdb-extractor", IoTDBSource.class.getName())); pipePluginMetaList.add( new PipePluginMeta("do-nothing-processor", DoNothingProcessor.class.getName())); PipePluginTableResp pipePluginTableResp = new PipePluginTableResp(status, pipePluginMetaList); diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/consensus/response/pipe/PipeTableRespTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/consensus/response/pipe/PipeTableRespTest.java index 144b988df1e99..94189a19d9977 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/consensus/response/pipe/PipeTableRespTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/consensus/response/pipe/PipeTableRespTest.java @@ -20,10 +20,10 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex; -import org.apache.iotdb.commons.pipe.task.meta.PipeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeRuntimeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeStaticMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeRuntimeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStaticMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; import org.apache.iotdb.confignode.consensus.response.pipe.task.PipeTableResp; import org.apache.iotdb.rpc.TSStatusCode; diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/ProcedureManagerTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/ProcedureManagerTest.java new file mode 100644 index 0000000000000..09a8acf16f4f3 --- /dev/null +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/ProcedureManagerTest.java @@ -0,0 +1,198 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.manager; + +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; +import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.cluster.NodeStatus; +import org.apache.iotdb.confignode.manager.load.LoadManager; +import org.apache.iotdb.confignode.procedure.Procedure; +import org.apache.iotdb.confignode.procedure.ProcedureExecutor; +import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; +import org.apache.iotdb.confignode.procedure.env.RemoveDataNodeHandler; +import org.apache.iotdb.confignode.procedure.impl.node.RemoveDataNodesProcedure; +import org.apache.iotdb.confignode.procedure.impl.region.RegionMigrateProcedure; +import org.apache.iotdb.confignode.procedure.impl.region.RegionMigrationPlan; + +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +import static org.apache.iotdb.db.service.RegionMigrateService.isFailed; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + +public class ProcedureManagerTest { + + private static ProcedureManager PROCEDURE_MANAGER; + + private static RemoveDataNodeHandler REMOVE_DATA_NODE_HANDLER; + + private static LoadManager LOAD_MANAGER; + + private static final ConcurrentHashMap> procedureMap = + new ConcurrentHashMap<>(); + + private final TConsensusGroupId consensusGroupId = + new TConsensusGroupId(TConsensusGroupType.DataRegion, 1); + + private final TDataNodeLocation removeDataNodeLocationA = + new TDataNodeLocation( + 10, + new TEndPoint("127.0.0.1", 6667), + new TEndPoint("127.0.0.1", 6668), + new TEndPoint("127.0.0.1", 6669), + new TEndPoint("127.0.0.1", 6670), + new TEndPoint("127.0.0.1", 6671)); + + private final TDataNodeLocation removeDataNodeLocationB = + new TDataNodeLocation( + 11, + new TEndPoint("127.0.0.1", 6677), + new TEndPoint("127.0.0.1", 6678), + new TEndPoint("127.0.0.1", 6679), + new TEndPoint("127.0.0.1", 6680), + new TEndPoint("127.0.0.1", 6681)); + + private final TDataNodeLocation toDataNodeLocation = + new TDataNodeLocation( + 12, + new TEndPoint("127.0.0.1", 6687), + new TEndPoint("127.0.0.1", 6688), + new TEndPoint("127.0.0.1", 6689), + new TEndPoint("127.0.0.1", 6690), + new TEndPoint("127.0.0.1", 6691)); + + private final TDataNodeLocation coordinatorDataNodeLocation = + new TDataNodeLocation( + 13, + new TEndPoint("127.0.0.1", 6697), + new TEndPoint("127.0.0.1", 6698), + new TEndPoint("127.0.0.1", 6699), + new TEndPoint("127.0.0.1", 6700), + new TEndPoint("127.0.0.1", 6701)); + + private final List removedDataNodes = + new ArrayList<>(Arrays.asList(removeDataNodeLocationA, removeDataNodeLocationB)); + + @BeforeClass + public static void setUp() throws IOException { + IManager CONFIG_MANAGER = new ConfigManager(); + ProcedureManager procedureManager = CONFIG_MANAGER.getProcedureManager(); + PROCEDURE_MANAGER = spy(procedureManager); + + ProcedureExecutor procedureExecutor = PROCEDURE_MANAGER.getExecutor(); + ProcedureExecutor PROCEDURE_EXECUTOR = spy(procedureExecutor); + + ConfigNodeProcedureEnv env = PROCEDURE_MANAGER.getEnv(); + ConfigNodeProcedureEnv ENV = spy(env); + + RemoveDataNodeHandler removeDataNodeHandler = ENV.getRemoveDataNodeHandler(); + REMOVE_DATA_NODE_HANDLER = spy(removeDataNodeHandler); + + LoadManager loadManager = CONFIG_MANAGER.getLoadManager(); + LOAD_MANAGER = spy(loadManager); + + when(PROCEDURE_MANAGER.getExecutor()).thenReturn(PROCEDURE_EXECUTOR); + when(PROCEDURE_EXECUTOR.getProcedures()).thenReturn(procedureMap); + when(PROCEDURE_MANAGER.getEnv()).thenReturn(ENV); + when(ENV.getRemoveDataNodeHandler()).thenReturn(REMOVE_DATA_NODE_HANDLER); + } + + @Test + public void testCheckRemoveDataNodeWithAnotherRemoveProcedure() { + Map nodeStatusMap = new HashMap<>(); + nodeStatusMap.put(10, NodeStatus.Running); + RemoveDataNodesProcedure anotherRemoveProcedure = + new RemoveDataNodesProcedure(removedDataNodes, nodeStatusMap); + procedureMap.put(0L, anotherRemoveProcedure); + + TSStatus status = PROCEDURE_MANAGER.checkRemoveDataNodes(removedDataNodes); + Assert.assertTrue(isFailed(status)); + } + + @Test + public void testCheckRemoveDataNodeWithConflictRegionMigrateProcedure() { + RegionMigrateProcedure regionMigrateProcedure = + new RegionMigrateProcedure( + consensusGroupId, + removeDataNodeLocationA, + removeDataNodeLocationB, + coordinatorDataNodeLocation, + coordinatorDataNodeLocation); + procedureMap.put(0L, regionMigrateProcedure); + + Set set = new HashSet<>(); + set.add(consensusGroupId); + when(REMOVE_DATA_NODE_HANDLER.getRemovedDataNodesRegionSet(removedDataNodes)).thenReturn(set); + + TSStatus status = PROCEDURE_MANAGER.checkRemoveDataNodes(removedDataNodes); + Assert.assertTrue(isFailed(status)); + } + + @Test + public void testCheckRemoveDataNodeWithRegionMigrateProcedureConflictsWithEachOther() { + RegionMigrationPlan regionMigrationPlanA = + new RegionMigrationPlan(consensusGroupId, removeDataNodeLocationA); + regionMigrationPlanA.setToDataNode(toDataNodeLocation); + RegionMigrationPlan regionMigrationPlanB = + new RegionMigrationPlan(consensusGroupId, removeDataNodeLocationB); + regionMigrationPlanB.setToDataNode(toDataNodeLocation); + + List regionMigrationPlans = + new ArrayList<>(Arrays.asList(regionMigrationPlanA, regionMigrationPlanB)); + when(REMOVE_DATA_NODE_HANDLER.getRegionMigrationPlans(removedDataNodes)) + .thenReturn(regionMigrationPlans); + + TSStatus status = PROCEDURE_MANAGER.checkRemoveDataNodes(removedDataNodes); + Assert.assertTrue(isFailed(status)); + } + + @Test + public void testCheckRemoveDataNodeWithAnotherUnknownDataNode() { + Set relatedDataNodes = new HashSet<>(); + relatedDataNodes.add(removeDataNodeLocationA); + relatedDataNodes.add(coordinatorDataNodeLocation); + + when(REMOVE_DATA_NODE_HANDLER.getRelatedDataNodeLocations(removeDataNodeLocationA)) + .thenReturn(relatedDataNodes); + + when(LOAD_MANAGER.getNodeStatus(removeDataNodeLocationA.getDataNodeId())) + .thenReturn(NodeStatus.Running); + when(LOAD_MANAGER.getNodeStatus(coordinatorDataNodeLocation.getDataNodeId())) + .thenReturn(NodeStatus.Unknown); + + TSStatus status = PROCEDURE_MANAGER.checkRemoveDataNodes(removedDataNodes); + Assert.assertTrue(isFailed(status)); + } +} diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/LoadManagerTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/LoadManagerTest.java index 12b3dbb82339b..fd62c31ae3678 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/LoadManagerTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/LoadManagerTest.java @@ -89,7 +89,7 @@ public void testNodeCache() throws InterruptedException { // Simulate update to Running status LOAD_CACHE.cacheConfigNodeHeartbeatSample(0, new NodeHeartbeatSample(NodeStatus.Running)); LOAD_CACHE.cacheDataNodeHeartbeatSample(1, new NodeHeartbeatSample(NodeStatus.Running)); - LOAD_CACHE.updateNodeStatistics(); + LOAD_CACHE.updateNodeStatistics(false); LOAD_MANAGER.getEventService().checkAndBroadcastNodeStatisticsChangeEventIfNecessary(); NODE_SEMAPHORE.acquire(); Assert.assertEquals(NodeStatus.Running, LOAD_CACHE.getNodeStatus(0)); @@ -117,7 +117,7 @@ public void testNodeCache() throws InterruptedException { // Removing status can't be updated to any other status automatically LOAD_CACHE.cacheDataNodeHeartbeatSample(1, new NodeHeartbeatSample(NodeStatus.ReadOnly)); - LOAD_CACHE.updateNodeStatistics(); + LOAD_CACHE.updateNodeStatistics(false); LOAD_MANAGER.getEventService().checkAndBroadcastNodeStatisticsChangeEventIfNecessary(); Assert.assertEquals(NodeStatus.Removing, LOAD_CACHE.getNodeStatus(1)); @@ -188,7 +188,7 @@ public void testRegionGroupCache() throws InterruptedException { Assert.assertEquals( new Pair<>( new RegionGroupStatistics(RegionGroupStatus.Running, allRunningRegionStatisticsMap), - new RegionGroupStatistics(RegionGroupStatus.Disabled, oneRemovingRegionStatisticsMap)), + new RegionGroupStatistics(RegionGroupStatus.Running, oneRemovingRegionStatisticsMap)), differentRegionGroupStatisticsMap.get(regionGroupId)); // Add and mark Region 3 as Adding int addDataNodeId = 3; @@ -203,8 +203,8 @@ public void testRegionGroupCache() throws InterruptedException { oneAddingRegionStatisticsMap.put(addDataNodeId, new RegionStatistics(RegionStatus.Adding)); Assert.assertEquals( new Pair<>( - new RegionGroupStatistics(RegionGroupStatus.Disabled, oneRemovingRegionStatisticsMap), - new RegionGroupStatistics(RegionGroupStatus.Disabled, oneAddingRegionStatisticsMap)), + new RegionGroupStatistics(RegionGroupStatus.Running, oneRemovingRegionStatisticsMap), + new RegionGroupStatistics(RegionGroupStatus.Running, oneAddingRegionStatisticsMap)), differentRegionGroupStatisticsMap.get(regionGroupId)); // Both Region 0 and 3 can't be updated LOAD_CACHE.cacheRegionHeartbeatSample( @@ -226,8 +226,8 @@ public void testRegionGroupCache() throws InterruptedException { oneRemovingRegionStatisticsMap.put(addDataNodeId, new RegionStatistics(RegionStatus.Running)); Assert.assertEquals( new Pair<>( - new RegionGroupStatistics(RegionGroupStatus.Disabled, oneAddingRegionStatisticsMap), - new RegionGroupStatistics(RegionGroupStatus.Disabled, oneRemovingRegionStatisticsMap)), + new RegionGroupStatistics(RegionGroupStatus.Running, oneAddingRegionStatisticsMap), + new RegionGroupStatistics(RegionGroupStatus.Running, oneRemovingRegionStatisticsMap)), differentRegionGroupStatisticsMap.get(regionGroupId)); // Removing process completed LOAD_MANAGER.removeRegionCache(regionGroupId, removeDataNodeId); @@ -237,7 +237,7 @@ public void testRegionGroupCache() throws InterruptedException { allRunningRegionStatisticsMap.put(addDataNodeId, new RegionStatistics(RegionStatus.Running)); Assert.assertEquals( new Pair<>( - new RegionGroupStatistics(RegionGroupStatus.Disabled, oneRemovingRegionStatisticsMap), + new RegionGroupStatistics(RegionGroupStatus.Running, oneRemovingRegionStatisticsMap), new RegionGroupStatistics(RegionGroupStatus.Running, allRunningRegionStatisticsMap)), differentRegionGroupStatisticsMap.get(regionGroupId)); } diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/AllocatorScatterWidthManualTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/AllocatorScatterWidthManualTest.java index b159525268fe1..4a4a47dabd0a1 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/AllocatorScatterWidthManualTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/AllocatorScatterWidthManualTest.java @@ -24,7 +24,6 @@ import org.apache.iotdb.common.rpc.thrift.TDataNodeConfiguration; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; -import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; import org.junit.Assert; import org.junit.BeforeClass; @@ -51,8 +50,7 @@ public class AllocatorScatterWidthManualTest { private static final IRegionGroupAllocator ALLOCATOR = new GreedyRegionGroupAllocator(); private static final int TEST_DATA_NODE_NUM = 50; - private static final int DATA_REGION_PER_DATA_NODE = - (int) ConfigNodeDescriptor.getInstance().getConf().getDataRegionPerDataNode(); + private static final int DATA_REGION_PER_DATA_NODE = 5; private static final int DATA_REPLICATION_FACTOR = 3; private static final Map AVAILABLE_DATA_NODE_MAP = diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyCopySetRegionGroupAllocatorTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyCopySetRegionGroupAllocatorTest.java index 95a23ab3e6477..b807b98986a48 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyCopySetRegionGroupAllocatorTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyCopySetRegionGroupAllocatorTest.java @@ -24,7 +24,6 @@ import org.apache.iotdb.common.rpc.thrift.TDataNodeConfiguration; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; -import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; import org.junit.Assert; import org.junit.BeforeClass; @@ -54,8 +53,7 @@ public class GreedyCopySetRegionGroupAllocatorTest { private static final Random RANDOM = new Random(); private static final int TEST_DATABASE_NUM = 3; private static final int TEST_DATA_NODE_NUM = 21; - private static final int DATA_REGION_PER_DATA_NODE = - (int) ConfigNodeDescriptor.getInstance().getConf().getDataRegionPerDataNode(); + private static final int DATA_REGION_PER_DATA_NODE = 5; private static final Map AVAILABLE_DATA_NODE_MAP = new HashMap<>(); private static final Map FREE_SPACE_MAP = new HashMap<>(); diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/router/priority/GreedyPriorityTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/router/priority/GreedyPriorityTest.java index cd578aba1f92b..48104bf78a3e2 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/router/priority/GreedyPriorityTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/router/priority/GreedyPriorityTest.java @@ -36,7 +36,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; public class GreedyPriorityTest { @@ -66,19 +65,13 @@ public void testGenLoadScoreGreedyRoutingPolicy() { nodeCacheMap.put(i, new DataNodeHeartbeatCache(i)); nodeCacheMap.get(i).cacheHeartbeatSample(new NodeHeartbeatSample(currentTimeNs, statuses[i])); } - nodeCacheMap.values().forEach(BaseNodeCache::updateCurrentStatistics); - - /* Get the loadScoreMap */ - Map loadScoreMap = new ConcurrentHashMap<>(); - nodeCacheMap.forEach( - (dataNodeId, heartbeatCache) -> - loadScoreMap.put(dataNodeId, heartbeatCache.getLoadScore())); + nodeCacheMap.values().forEach(baseNodeCache -> baseNodeCache.updateCurrentStatistics(false)); /* Build TRegionReplicaSet */ TConsensusGroupId groupId1 = new TConsensusGroupId(TConsensusGroupType.SchemaRegion, 1); TRegionReplicaSet regionReplicaSet1 = new TRegionReplicaSet( - groupId1, Arrays.asList(dataNodeLocations.get(1), dataNodeLocations.get(0))); + groupId1, Arrays.asList(dataNodeLocations.get(0), dataNodeLocations.get(1))); TConsensusGroupId groupId2 = new TConsensusGroupId(TConsensusGroupType.DataRegion, 2); TRegionReplicaSet regionReplicaSet2 = new TRegionReplicaSet( @@ -88,7 +81,7 @@ public void testGenLoadScoreGreedyRoutingPolicy() { Map result = new GreedyPriorityBalancer() .generateOptimalRoutePriority( - Arrays.asList(regionReplicaSet1, regionReplicaSet2), new HashMap<>(), loadScoreMap); + Arrays.asList(regionReplicaSet1, regionReplicaSet2), new HashMap<>()); Assert.assertEquals(2, result.size()); TRegionReplicaSet result1 = result.get(groupId1); @@ -97,7 +90,7 @@ public void testGenLoadScoreGreedyRoutingPolicy() { } TRegionReplicaSet result2 = result.get(groupId2); - for (int i = 3; i < 4; i++) { + for (int i = 2; i < 4; i++) { Assert.assertEquals(dataNodeLocations.get(i), result2.getDataNodeLocations().get(i - 2)); } } diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/router/priority/LeaderPriorityBalancerTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/router/priority/LeaderPriorityBalancerTest.java index 6f77d45960399..5af3a677aab88 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/router/priority/LeaderPriorityBalancerTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/router/priority/LeaderPriorityBalancerTest.java @@ -33,11 +33,9 @@ import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; public class LeaderPriorityBalancerTest { @@ -67,13 +65,7 @@ public void testGenRealTimeRoutingPolicy() { .cacheHeartbeatSample(new NodeHeartbeatSample(currentTimeNs, NodeStatus.Running)); } } - nodeCacheMap.values().forEach(BaseNodeCache::updateCurrentStatistics); - - // Get the loadScoreMap - Map loadScoreMap = new ConcurrentHashMap<>(); - nodeCacheMap.forEach( - (dataNodeId, heartbeatCache) -> - loadScoreMap.put(dataNodeId, heartbeatCache.getLoadScore())); + nodeCacheMap.values().forEach(baseNodeCache -> baseNodeCache.updateCurrentStatistics(false)); // Build TRegionReplicaSet TConsensusGroupId groupId1 = new TConsensusGroupId(TConsensusGroupType.SchemaRegion, 1); @@ -81,13 +73,13 @@ public void testGenRealTimeRoutingPolicy() { new TRegionReplicaSet( groupId1, Arrays.asList( - dataNodeLocations.get(2), dataNodeLocations.get(1), dataNodeLocations.get(0))); + dataNodeLocations.get(0), dataNodeLocations.get(1), dataNodeLocations.get(2))); TConsensusGroupId groupId2 = new TConsensusGroupId(TConsensusGroupType.DataRegion, 2); TRegionReplicaSet regionReplicaSet2 = new TRegionReplicaSet( groupId2, Arrays.asList( - dataNodeLocations.get(5), dataNodeLocations.get(4), dataNodeLocations.get(3))); + dataNodeLocations.get(3), dataNodeLocations.get(4), dataNodeLocations.get(5))); List regionReplicaSets = Arrays.asList(regionReplicaSet1, regionReplicaSet2); // Build leaderMap @@ -97,67 +89,16 @@ public void testGenRealTimeRoutingPolicy() { // Check result Map result = - new LeaderPriorityBalancer() - .generateOptimalRoutePriority(regionReplicaSets, leaderMap, loadScoreMap); + new LeaderPriorityBalancer().generateOptimalRoutePriority(regionReplicaSets, leaderMap); TRegionReplicaSet result1 = result.get(groupId1); // Leader first Assert.assertEquals(dataNodeLocations.get(1), result1.getDataNodeLocations().get(0)); - // The others will be sorted by loadScore Assert.assertEquals(dataNodeLocations.get(0), result1.getDataNodeLocations().get(1)); Assert.assertEquals(dataNodeLocations.get(2), result1.getDataNodeLocations().get(2)); TRegionReplicaSet result2 = result.get(groupId2); // Leader first Assert.assertEquals(dataNodeLocations.get(4), result2.getDataNodeLocations().get(0)); - // The others will be sorted by loadScore Assert.assertEquals(dataNodeLocations.get(3), result2.getDataNodeLocations().get(1)); Assert.assertEquals(dataNodeLocations.get(5), result2.getDataNodeLocations().get(2)); } - - @Test - public void testLeaderUnavailable() { - // Build TDataNodeLocations - List dataNodeLocations = new ArrayList<>(); - for (int i = 0; i < 3; i++) { - dataNodeLocations.add( - new TDataNodeLocation( - i, - new TEndPoint("0.0.0.0", 6667 + i), - new TEndPoint("0.0.0.0", 10730 + i), - new TEndPoint("0.0.0.0", 10740 + i), - new TEndPoint("0.0.0.0", 10760 + i), - new TEndPoint("0.0.0.0", 10750 + i))); - } - - // Build TRegionReplicaSet - TConsensusGroupId groupId1 = new TConsensusGroupId(TConsensusGroupType.SchemaRegion, 1); - TRegionReplicaSet regionReplicaSet1 = - new TRegionReplicaSet( - groupId1, - Arrays.asList( - dataNodeLocations.get(2), dataNodeLocations.get(1), dataNodeLocations.get(0))); - - // Build leaderMap - Map leaderMap = new HashMap<>(); - leaderMap.put(groupId1, 1); - - // Build loadScoreMap - Map loadScoreMap = new ConcurrentHashMap<>(); - loadScoreMap.put(0, 10L); - loadScoreMap.put(2, 20L); - // The leader is DataNode-1, but it's unavailable - loadScoreMap.put(1, Long.MAX_VALUE); - - // Check result - Map result = - new LeaderPriorityBalancer() - .generateOptimalRoutePriority( - Collections.singletonList(regionReplicaSet1), leaderMap, loadScoreMap); - // Only sorted by loadScore since the leader is unavailable - Assert.assertEquals( - dataNodeLocations.get(0), result.get(groupId1).getDataNodeLocations().get(0)); - Assert.assertEquals( - dataNodeLocations.get(2), result.get(groupId1).getDataNodeLocations().get(1)); - Assert.assertEquals( - dataNodeLocations.get(1), result.get(groupId1).getDataNodeLocations().get(2)); - } } diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/cache/ConsensusGroupCacheTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/cache/ConsensusGroupCacheTest.java index 7bbd366be5799..caa3e8394399c 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/cache/ConsensusGroupCacheTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/cache/ConsensusGroupCacheTest.java @@ -31,7 +31,7 @@ public void periodicUpdateTest() { ConsensusGroupCache consensusGroupCache = new ConsensusGroupCache(); ConsensusGroupHeartbeatSample sample = new ConsensusGroupHeartbeatSample(1L, 1); consensusGroupCache.cacheHeartbeatSample(sample); - consensusGroupCache.updateCurrentStatistics(); + consensusGroupCache.updateCurrentStatistics(false); Assert.assertEquals(1, consensusGroupCache.getLeaderId()); } } diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/cache/NodeCacheTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/cache/NodeCacheTest.java index eef3e9b9b6d7f..a400692956621 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/cache/NodeCacheTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/cache/NodeCacheTest.java @@ -35,7 +35,7 @@ public void updateStatisticsTest() { long currentTime = System.nanoTime(); dataNodeHeartbeatCache.cacheHeartbeatSample( new NodeHeartbeatSample(currentTime, NodeStatus.Running)); - dataNodeHeartbeatCache.updateCurrentStatistics(); + dataNodeHeartbeatCache.updateCurrentStatistics(false); Assert.assertEquals(NodeStatus.Running, dataNodeHeartbeatCache.getNodeStatus()); Assert.assertEquals(0, dataNodeHeartbeatCache.getLoadScore()); @@ -44,7 +44,7 @@ public void updateStatisticsTest() { currentTime = System.nanoTime(); configNodeHeartbeatCache.cacheHeartbeatSample( new NodeHeartbeatSample(currentTime, NodeStatus.Running)); - configNodeHeartbeatCache.updateCurrentStatistics(); + configNodeHeartbeatCache.updateCurrentStatistics(false); Assert.assertEquals(NodeStatus.Running, configNodeHeartbeatCache.getNodeStatus()); Assert.assertEquals(0, configNodeHeartbeatCache.getLoadScore()); } diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/cache/RegionGroupCacheTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/cache/RegionGroupCacheTest.java index e340c347e11be..69f073aad8b5b 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/cache/RegionGroupCacheTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/cache/RegionGroupCacheTest.java @@ -37,7 +37,7 @@ public class RegionGroupCacheTest { public void getRegionStatusTest() { long currentTime = System.nanoTime(); RegionGroupCache regionGroupCache = - new RegionGroupCache(DATABASE, Stream.of(0, 1, 2, 3).collect(Collectors.toSet())); + new RegionGroupCache(DATABASE, Stream.of(0, 1, 2, 3, 4).collect(Collectors.toSet()), false); regionGroupCache.cacheHeartbeatSample( 0, new RegionHeartbeatSample(currentTime, RegionStatus.Running)); regionGroupCache.cacheHeartbeatSample( @@ -46,6 +46,8 @@ public void getRegionStatusTest() { 2, new RegionHeartbeatSample(currentTime, RegionStatus.Removing)); regionGroupCache.cacheHeartbeatSample( 3, new RegionHeartbeatSample(currentTime, RegionStatus.ReadOnly)); + regionGroupCache.cacheHeartbeatSample( + 4, new RegionHeartbeatSample(currentTime, RegionStatus.Adding)); regionGroupCache.updateCurrentStatistics(); Assert.assertEquals( @@ -56,74 +58,110 @@ public void getRegionStatusTest() { RegionStatus.Removing, regionGroupCache.getCurrentStatistics().getRegionStatus(2)); Assert.assertEquals( RegionStatus.ReadOnly, regionGroupCache.getCurrentStatistics().getRegionStatus(3)); + Assert.assertEquals( + RegionStatus.Adding, regionGroupCache.getCurrentStatistics().getRegionStatus(4)); } @Test - public void getRegionGroupStatusTest() { + public void weakConsistencyRegionGroupStatusTest() { long currentTime = System.nanoTime(); - RegionGroupCache runningRegionGroup = - new RegionGroupCache(DATABASE, Stream.of(0, 1, 2).collect(Collectors.toSet())); - runningRegionGroup.cacheHeartbeatSample( + RegionGroupCache regionGroupCache = + new RegionGroupCache(DATABASE, Stream.of(0, 1, 2).collect(Collectors.toSet()), false); + regionGroupCache.cacheHeartbeatSample( 0, new RegionHeartbeatSample(currentTime, RegionStatus.Running)); - runningRegionGroup.cacheHeartbeatSample( + regionGroupCache.cacheHeartbeatSample( 1, new RegionHeartbeatSample(currentTime, RegionStatus.Running)); - runningRegionGroup.cacheHeartbeatSample( + regionGroupCache.cacheHeartbeatSample( 2, new RegionHeartbeatSample(currentTime, RegionStatus.Running)); - runningRegionGroup.updateCurrentStatistics(); + regionGroupCache.updateCurrentStatistics(); Assert.assertEquals( - RegionGroupStatus.Running, - runningRegionGroup.getCurrentStatistics().getRegionGroupStatus()); + RegionGroupStatus.Running, regionGroupCache.getCurrentStatistics().getRegionGroupStatus()); - RegionGroupCache availableRegionGroup = - new RegionGroupCache(DATABASE, Stream.of(0, 1, 2).collect(Collectors.toSet())); - availableRegionGroup.cacheHeartbeatSample( - 0, new RegionHeartbeatSample(currentTime, RegionStatus.Running)); - availableRegionGroup.cacheHeartbeatSample( + regionGroupCache.cacheHeartbeatSample( + 0, new RegionHeartbeatSample(currentTime, RegionStatus.Unknown)); + regionGroupCache.updateCurrentStatistics(); + Assert.assertEquals( + RegionGroupStatus.Available, + regionGroupCache.getCurrentStatistics().getRegionGroupStatus()); + + regionGroupCache.cacheHeartbeatSample( 1, new RegionHeartbeatSample(currentTime, RegionStatus.Unknown)); - availableRegionGroup.cacheHeartbeatSample( - 2, new RegionHeartbeatSample(currentTime, RegionStatus.Running)); - availableRegionGroup.updateCurrentStatistics(); + regionGroupCache.updateCurrentStatistics(); Assert.assertEquals( RegionGroupStatus.Available, - availableRegionGroup.getCurrentStatistics().getRegionGroupStatus()); + regionGroupCache.getCurrentStatistics().getRegionGroupStatus()); - RegionGroupCache disabledRegionGroup0 = - new RegionGroupCache(DATABASE, Stream.of(0, 1, 2).collect(Collectors.toSet())); - disabledRegionGroup0.cacheHeartbeatSample( + regionGroupCache.cacheHeartbeatSample( + 2, new RegionHeartbeatSample(currentTime, RegionStatus.Unknown)); + regionGroupCache.updateCurrentStatistics(); + Assert.assertEquals( + RegionGroupStatus.Disabled, regionGroupCache.getCurrentStatistics().getRegionGroupStatus()); + } + + @Test + public void strongConsistencyRegionGroupStatusTest() { + long currentTime = System.nanoTime(); + RegionGroupCache regionGroupCache = + new RegionGroupCache(DATABASE, Stream.of(0, 1, 2).collect(Collectors.toSet()), true); + regionGroupCache.cacheHeartbeatSample( 0, new RegionHeartbeatSample(currentTime, RegionStatus.Running)); - disabledRegionGroup0.cacheHeartbeatSample( - 1, new RegionHeartbeatSample(currentTime, RegionStatus.ReadOnly)); - disabledRegionGroup0.cacheHeartbeatSample( + regionGroupCache.cacheHeartbeatSample( + 1, new RegionHeartbeatSample(currentTime, RegionStatus.Running)); + regionGroupCache.cacheHeartbeatSample( 2, new RegionHeartbeatSample(currentTime, RegionStatus.Running)); - disabledRegionGroup0.updateCurrentStatistics(); + regionGroupCache.updateCurrentStatistics(); Assert.assertEquals( - RegionGroupStatus.Discouraged, - disabledRegionGroup0.getCurrentStatistics().getRegionGroupStatus()); + RegionGroupStatus.Running, regionGroupCache.getCurrentStatistics().getRegionGroupStatus()); - RegionGroupCache disabledRegionGroup1 = - new RegionGroupCache(DATABASE, Stream.of(0, 1, 2).collect(Collectors.toSet())); - disabledRegionGroup1.cacheHeartbeatSample( - 0, new RegionHeartbeatSample(currentTime, RegionStatus.Running)); - disabledRegionGroup1.cacheHeartbeatSample( + regionGroupCache.cacheHeartbeatSample( + 0, new RegionHeartbeatSample(currentTime, RegionStatus.Unknown)); + regionGroupCache.updateCurrentStatistics(); + Assert.assertEquals( + RegionGroupStatus.Available, + regionGroupCache.getCurrentStatistics().getRegionGroupStatus()); + + regionGroupCache.cacheHeartbeatSample( 1, new RegionHeartbeatSample(currentTime, RegionStatus.Unknown)); - disabledRegionGroup1.cacheHeartbeatSample( + regionGroupCache.updateCurrentStatistics(); + Assert.assertEquals( + RegionGroupStatus.Disabled, regionGroupCache.getCurrentStatistics().getRegionGroupStatus()); + + regionGroupCache.cacheHeartbeatSample( 2, new RegionHeartbeatSample(currentTime, RegionStatus.Unknown)); - disabledRegionGroup1.updateCurrentStatistics(); + regionGroupCache.updateCurrentStatistics(); Assert.assertEquals( - RegionGroupStatus.Disabled, - disabledRegionGroup1.getCurrentStatistics().getRegionGroupStatus()); + RegionGroupStatus.Disabled, regionGroupCache.getCurrentStatistics().getRegionGroupStatus()); + } - RegionGroupCache disabledRegionGroup2 = - new RegionGroupCache(DATABASE, Stream.of(0, 1, 2).collect(Collectors.toSet())); - disabledRegionGroup2.cacheHeartbeatSample( + @Test + public void migrateRegionRegionGroupStatusTest() { + long currentTime = System.nanoTime(); + RegionGroupCache regionGroupCache = + new RegionGroupCache(DATABASE, Stream.of(0).collect(Collectors.toSet()), true); + regionGroupCache.cacheHeartbeatSample( 0, new RegionHeartbeatSample(currentTime, RegionStatus.Running)); - disabledRegionGroup2.cacheHeartbeatSample( - 1, new RegionHeartbeatSample(currentTime, RegionStatus.Running)); - disabledRegionGroup2.cacheHeartbeatSample( - 2, new RegionHeartbeatSample(currentTime, RegionStatus.Removing)); - disabledRegionGroup2.updateCurrentStatistics(); + regionGroupCache.updateCurrentStatistics(); + Assert.assertEquals( + RegionGroupStatus.Running, regionGroupCache.getCurrentStatistics().getRegionGroupStatus()); + + regionGroupCache = + new RegionGroupCache(DATABASE, Stream.of(0, 1).collect(Collectors.toSet()), true); + regionGroupCache.cacheHeartbeatSample( + 0, new RegionHeartbeatSample(currentTime, RegionStatus.Running)); + regionGroupCache.cacheHeartbeatSample( + 1, new RegionHeartbeatSample(currentTime, RegionStatus.Adding)); + regionGroupCache.updateCurrentStatistics(); + Assert.assertEquals( + RegionGroupStatus.Running, regionGroupCache.getCurrentStatistics().getRegionGroupStatus()); + + regionGroupCache = + new RegionGroupCache(DATABASE, Stream.of(0, 1).collect(Collectors.toSet()), true); + regionGroupCache.cacheHeartbeatSample( + 0, new RegionHeartbeatSample(currentTime, RegionStatus.Running)); + regionGroupCache.cacheHeartbeatSample( + 1, new RegionHeartbeatSample(currentTime, RegionStatus.Removing)); + regionGroupCache.updateCurrentStatistics(); Assert.assertEquals( - RegionGroupStatus.Disabled, - disabledRegionGroup2.getCurrentStatistics().getRegionGroupStatus()); + RegionGroupStatus.Running, regionGroupCache.getCurrentStatistics().getRegionGroupStatus()); } } diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/cache/detector/DetectorTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/cache/detector/DetectorTest.java new file mode 100644 index 0000000000000..a56a716664637 --- /dev/null +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/cache/detector/DetectorTest.java @@ -0,0 +1,179 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.manager.load.cache.detector; + +import org.apache.iotdb.commons.cluster.NodeStatus; +import org.apache.iotdb.confignode.manager.load.cache.AbstractHeartbeatSample; +import org.apache.iotdb.confignode.manager.load.cache.node.NodeHeartbeatSample; + +import org.junit.Assert; +import org.junit.Test; + +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; + +public class DetectorTest { + + final long sec = 1_000_000_000L; + final FixedDetector fixedDetector = new FixedDetector(20 * sec); + final PhiAccrualDetector phiAccrualDetector = + new PhiAccrualDetector(30, 10 * sec, (long) (0.2 * sec), 0, fixedDetector); + + private double getPhi(long elapsed, double[] intervals, long minStd, long pause) { + final PhiAccrualDetector.PhiAccrual p = + new PhiAccrualDetector.PhiAccrual(intervals, elapsed, minStd, pause); + return p.phi(); + } + + private void assertInRange(double value, double start, double end) { + Assert.assertTrue(value > start); + Assert.assertTrue(value < end); + } + + @Test + public void testFixedDetector() { + final long lastHeartbeatTs = System.nanoTime() - 21 * sec; + final List history = + Collections.singletonList(new NodeHeartbeatSample(lastHeartbeatTs, NodeStatus.Running)); + Assert.assertFalse(fixedDetector.isAvailable(history)); + + final long lastAvailableHeartbeat = System.nanoTime() - 18 * sec; + final List history2 = + Collections.singletonList( + new NodeHeartbeatSample(lastAvailableHeartbeat, NodeStatus.Running)); + Assert.assertTrue(fixedDetector.isAvailable(history2)); + } + + @Test + public void testPhiCalculation1() { + /* (min, std, acceptable_pause) = (1000, 200, 0) */ + final double[] heartbeatIntervals = {1000, 1000, 1000, 1000, 1000}; + final long minStd = 200; + final long pause = 0; + + assertInRange(getPhi(1000, heartbeatIntervals, minStd, pause), 0, 1); + assertInRange(getPhi(2000, heartbeatIntervals, minStd, pause), 5, 10); + assertInRange(getPhi(3000, heartbeatIntervals, minStd, pause), 35, 50); + } + + @Test + public void testPhiCalculation2() { + /* (min, std, acceptable_pause) = (1000, 300, 0) */ + final double[] heartbeatIntervals = {1000, 1000, 1000, 1000, 1000}; + final long minStd = 300; + final long pause = 0; + + assertInRange(getPhi(1000, heartbeatIntervals, minStd, pause), 0, 1); + assertInRange(getPhi(2000, heartbeatIntervals, minStd, pause), 1, 5); + assertInRange(getPhi(3000, heartbeatIntervals, minStd, pause), 10, 15); + } + + @Test + public void testPhiCalculation3() { + /* (min, std, acceptable_pause) = (1000, 200, 3000) */ + final double[] heartbeatIntervals = {1000, 1000, 1000, 1000, 1000}; + final long minStd = 200; + final long pause = 3000; + + assertInRange(getPhi(1000 + pause, heartbeatIntervals, minStd, pause), 0, 1); + assertInRange(getPhi(2000 + pause, heartbeatIntervals, minStd, pause), 5, 10); + assertInRange(getPhi(3000 + pause, heartbeatIntervals, minStd, pause), 35, 50); + } + + /** + * When a node hasn't responded with interval longer than accepted GC pause Phi Accrual can detect + * the problem quicker than Fix In this case, the accepted pause is 10s, but we haven't received + * heartbeats for 13s + */ + @Test + public void testComparisonQuickFailureDetection() { + long[] interval = new long[] {sec, sec, sec}; + List history = fromInterval(interval, 13 * sec); + Assert.assertTrue(fixedDetector.isAvailable(history)); + Assert.assertFalse(phiAccrualDetector.isAvailable(history)); + } + + /** + * When the system load is high, we may observe exceptionally long GC pause The first + * exceptionally long GC pause will be a false positive to Phi the GC pause is 15 (longer than the + * expected 10s) Phi will report false positive + */ + @Test + public void testFalsePositiveOnExceptionallyLongGCPause() { + long[] interval = new long[] {sec, sec, sec}; + long gcPause = 15 * sec; + List history = fromInterval(interval, gcPause + 2 * sec); + Assert.assertTrue(fixedDetector.isAvailable(history)); + Assert.assertFalse(phiAccrualDetector.isAvailable(history)); + } + + /** + * When the system load is high, we may observe exceptionally long GC pause If the GC pause is + * very often, Phi can be adaptive to the new environment in this case, there are 2 long GC pause + * in history when a new GC with 21s pause occurs, Phi takes it normal while Fixed will fail. + */ + @Test + public void testPhiAdaptionToFrequentGCPause() { + long[] interval = + new long[] { + sec, + sec, + sec, + 15 * sec, + (long) (0.1 * sec), + sec, + sec, + sec, + 15 * sec, + (long) (0.1 * sec), + sec, + sec + }; + List history = fromInterval(interval, 21 * sec); + Assert.assertFalse(fixedDetector.isAvailable(history)); + Assert.assertTrue(phiAccrualDetector.isAvailable(history)); + } + + /** + * If the Phi detector haven't received enough samples its behavior will fall back to Fix detector + */ + @Test + public void testColdStart() { + final PhiAccrualDetector coldStartPhi = + new PhiAccrualDetector(30, 10 * sec, (long) (0.2 * sec), 60, fixedDetector); + long[] interval = new long[] {sec, sec, sec}; + List history = fromInterval(interval, 21 * sec); + Assert.assertFalse(fixedDetector.isAvailable(history)); + Assert.assertFalse(coldStartPhi.isAvailable(history)); + } + + private List fromInterval(long[] interval, long timeElapsed) { + long now = System.nanoTime(); + long begin = now - timeElapsed; + List sample = new LinkedList<>(); + sample.add(new NodeHeartbeatSample(begin, NodeStatus.Running)); + for (int i = interval.length - 1; i >= 0; i--) { + begin -= interval[i]; + sample.add(0, new NodeHeartbeatSample(begin, NodeStatus.Running)); + } + return sample; + } +} diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/execution/PipeConfigNodeSubtaskExecutorTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/agent/PipeConfigNodeSubtaskExecutorTest.java similarity index 89% rename from iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/execution/PipeConfigNodeSubtaskExecutorTest.java rename to iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/agent/PipeConfigNodeSubtaskExecutorTest.java index 16d949d1dcbd4..36ebac63286e2 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/execution/PipeConfigNodeSubtaskExecutorTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/agent/PipeConfigNodeSubtaskExecutorTest.java @@ -17,14 +17,16 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.execution; +package org.apache.iotdb.confignode.manager.pipe.agent; import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex; -import org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant; -import org.apache.iotdb.commons.pipe.execution.executor.PipeSubtaskExecutor; -import org.apache.iotdb.commons.pipe.plugin.builtin.BuiltinPipePlugin; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; -import org.apache.iotdb.commons.pipe.task.subtask.PipeSubtask; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.BuiltinPipePlugin; +import org.apache.iotdb.commons.pipe.agent.task.execution.PipeSubtaskExecutor; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.agent.task.subtask.PipeSubtask; +import org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant; +import org.apache.iotdb.confignode.manager.pipe.agent.task.PipeConfigNodeSubtask; +import org.apache.iotdb.confignode.manager.pipe.agent.task.PipeConfigNodeSubtaskExecutor; import org.junit.After; import org.junit.Assert; @@ -56,7 +58,7 @@ public void setUp() throws Exception { new HashMap() { { put( - PipeConnectorConstant.CONNECTOR_KEY, + PipeSinkConstant.CONNECTOR_KEY, BuiltinPipePlugin.DO_NOTHING_CONNECTOR.getPipePluginName()); } }, diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/connector/IoTDBConfigRegionConnectorTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/sink/IoTDBConfigRegionSinkTest.java similarity index 69% rename from iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/connector/IoTDBConfigRegionConnectorTest.java rename to iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/sink/IoTDBConfigRegionSinkTest.java index 30ed0ee75806a..b88862c4cae08 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/connector/IoTDBConfigRegionConnectorTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/sink/IoTDBConfigRegionSinkTest.java @@ -17,11 +17,11 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.connector; +package org.apache.iotdb.confignode.manager.pipe.sink; -import org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant; -import org.apache.iotdb.commons.pipe.plugin.builtin.BuiltinPipePlugin; -import org.apache.iotdb.confignode.manager.pipe.connector.protocol.IoTDBConfigRegionConnector; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.BuiltinPipePlugin; +import org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant; +import org.apache.iotdb.confignode.manager.pipe.sink.protocol.IoTDBConfigRegionSink; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; @@ -30,21 +30,21 @@ import java.util.HashMap; -public class IoTDBConfigRegionConnectorTest { +public class IoTDBConfigRegionSinkTest { @Test public void testIoTDBSchemaConnector() { - try (IoTDBConfigRegionConnector connector = new IoTDBConfigRegionConnector()) { + try (IoTDBConfigRegionSink connector = new IoTDBConfigRegionSink()) { connector.validate( new PipeParameterValidator( new PipeParameters( new HashMap() { { put( - PipeConnectorConstant.CONNECTOR_KEY, + PipeSinkConstant.CONNECTOR_KEY, BuiltinPipePlugin.IOTDB_LEGACY_PIPE_CONNECTOR.getPipePluginName()); - put(PipeConnectorConstant.CONNECTOR_IOTDB_IP_KEY, "127.0.0.1"); - put(PipeConnectorConstant.CONNECTOR_IOTDB_PORT_KEY, "6668"); + put(PipeSinkConstant.CONNECTOR_IOTDB_IP_KEY, "127.0.0.1"); + put(PipeSinkConstant.CONNECTOR_IOTDB_PORT_KEY, "6668"); } }))); } catch (Exception e) { diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/connector/PipeConfigNodeThriftRequestTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/sink/PipeConfigNodeThriftRequestTest.java similarity index 84% rename from iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/connector/PipeConfigNodeThriftRequestTest.java rename to iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/sink/PipeConfigNodeThriftRequestTest.java index c2c04f9452990..f014a8ae22a33 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/connector/PipeConfigNodeThriftRequestTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/sink/PipeConfigNodeThriftRequestTest.java @@ -17,13 +17,13 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.connector; +package org.apache.iotdb.confignode.manager.pipe.sink; import org.apache.iotdb.confignode.consensus.request.write.cq.ActiveCQPlan; -import org.apache.iotdb.confignode.manager.pipe.connector.payload.PipeTransferConfigNodeHandshakeV1Req; -import org.apache.iotdb.confignode.manager.pipe.connector.payload.PipeTransferConfigPlanReq; -import org.apache.iotdb.confignode.manager.pipe.connector.payload.PipeTransferConfigSnapshotPieceReq; -import org.apache.iotdb.confignode.manager.pipe.connector.payload.PipeTransferConfigSnapshotSealReq; +import org.apache.iotdb.confignode.manager.pipe.sink.payload.PipeTransferConfigNodeHandshakeV1Req; +import org.apache.iotdb.confignode.manager.pipe.sink.payload.PipeTransferConfigPlanReq; +import org.apache.iotdb.confignode.manager.pipe.sink.payload.PipeTransferConfigSnapshotPieceReq; +import org.apache.iotdb.confignode.manager.pipe.sink.payload.PipeTransferConfigSnapshotSealReq; import org.apache.iotdb.confignode.persistence.schema.CNSnapshotFileType; import org.junit.Assert; @@ -44,7 +44,6 @@ public void testPipeTransferConfigHandshakeReq() throws IOException { Assert.assertEquals(req.getVersion(), deserializeReq.getVersion()); Assert.assertEquals(req.getType(), deserializeReq.getType()); - Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody()); Assert.assertEquals(req.getTimestampPrecision(), deserializeReq.getTimestampPrecision()); } @@ -57,7 +56,6 @@ public void testPipeTransferConfigPlanReq() { Assert.assertEquals(req.getVersion(), deserializeReq.getVersion()); Assert.assertEquals(req.getType(), deserializeReq.getType()); - Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody()); } @Test @@ -72,7 +70,6 @@ public void testPipeTransferConfigSnapshotPieceReq() throws IOException { Assert.assertEquals(req.getVersion(), deserializeReq.getVersion()); Assert.assertEquals(req.getType(), deserializeReq.getType()); - Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody()); Assert.assertEquals(req.getFileName(), deserializeReq.getFileName()); Assert.assertEquals(req.getStartWritingOffset(), deserializeReq.getStartWritingOffset()); @@ -95,7 +92,6 @@ public void testPipeTransferConfigSnapshotSealReq() throws IOException { Assert.assertEquals(req.getVersion(), deserializeReq.getVersion()); Assert.assertEquals(req.getType(), deserializeReq.getType()); - Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody()); Assert.assertEquals(req.getFileNames(), deserializeReq.getFileNames()); Assert.assertEquals(req.getFileLengths(), deserializeReq.getFileLengths()); diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/extractor/IoTDBConfigRegionExtractorTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/source/IoTDBConfigRegionSourceTest.java similarity index 64% rename from iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/extractor/IoTDBConfigRegionExtractorTest.java rename to iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/source/IoTDBConfigRegionSourceTest.java index f3ccf42a0c228..15051d436d8b2 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/extractor/IoTDBConfigRegionExtractorTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/source/IoTDBConfigRegionSourceTest.java @@ -17,9 +17,9 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.extractor; +package org.apache.iotdb.confignode.manager.pipe.source; -import org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant; +import org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; @@ -28,27 +28,25 @@ import java.util.HashMap; -public class IoTDBConfigRegionExtractorTest { +public class IoTDBConfigRegionSourceTest { @Test public void testIoTDBConfigExtractor() { - try (final IoTDBConfigRegionExtractor extractor = new IoTDBConfigRegionExtractor()) { + try (final IoTDBConfigRegionSource extractor = new IoTDBConfigRegionSource()) { extractor.validate( new PipeParameterValidator( new PipeParameters( new HashMap() { { + put(PipeSourceConstant.EXTRACTOR_HISTORY_ENABLE_KEY, Boolean.TRUE.toString()); put( - PipeExtractorConstant.EXTRACTOR_HISTORY_ENABLE_KEY, + PipeSourceConstant.EXTRACTOR_REALTIME_ENABLE_KEY, Boolean.TRUE.toString()); put( - PipeExtractorConstant.EXTRACTOR_REALTIME_ENABLE_KEY, - Boolean.TRUE.toString()); - put( - PipeExtractorConstant.EXTRACTOR_REALTIME_MODE_KEY, - PipeExtractorConstant.EXTRACTOR_REALTIME_MODE_HYBRID_VALUE); + PipeSourceConstant.EXTRACTOR_REALTIME_MODE_KEY, + PipeSourceConstant.EXTRACTOR_REALTIME_MODE_HYBRID_VALUE); put( - PipeExtractorConstant.EXTRACTOR_INCLUSION_KEY, - PipeExtractorConstant.EXTRACTOR_INCLUSION_DEFAULT_VALUE); + PipeSourceConstant.EXTRACTOR_INCLUSION_KEY, + PipeSourceConstant.EXTRACTOR_INCLUSION_DEFAULT_VALUE); } }))); } catch (final Exception e) { diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/extractor/PipeConfigPhysicalPlanPatternParseVisitorTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/source/PipeConfigPhysicalPlanPatternParseVisitorTest.java similarity index 91% rename from iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/extractor/PipeConfigPhysicalPlanPatternParseVisitorTest.java rename to iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/source/PipeConfigPhysicalPlanPatternParseVisitorTest.java index 8d38006cdf99c..ea14e12bd9e6d 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/extractor/PipeConfigPhysicalPlanPatternParseVisitorTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/source/PipeConfigPhysicalPlanPatternParseVisitorTest.java @@ -17,14 +17,14 @@ * under the License. */ -package org.apache.iotdb.confignode.manager.pipe.extractor; +package org.apache.iotdb.confignode.manager.pipe.source; import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PathPatternTree; -import org.apache.iotdb.commons.pipe.pattern.IoTDBPipePattern; +import org.apache.iotdb.commons.pipe.datastructure.pattern.IoTDBPipePattern; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; -import org.apache.iotdb.confignode.consensus.request.auth.AuthorPlan; +import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan; import org.apache.iotdb.confignode.consensus.request.write.database.DatabaseSchemaPlan; import org.apache.iotdb.confignode.consensus.request.write.database.DeleteDatabasePlan; import org.apache.iotdb.confignode.consensus.request.write.database.SetTTLPlan; @@ -68,11 +68,11 @@ public void testCreateDatabase() { Assert.assertEquals( createDatabasePlan, - IoTDBConfigRegionExtractor.PATTERN_PARSE_VISITOR + IoTDBConfigRegionSource.PATTERN_PARSE_VISITOR .visitCreateDatabase(createDatabasePlan, prefixPathPattern) .orElseThrow(AssertionError::new)); Assert.assertFalse( - IoTDBConfigRegionExtractor.PATTERN_PARSE_VISITOR + IoTDBConfigRegionSource.PATTERN_PARSE_VISITOR .visitCreateDatabase(createDatabasePlanToFilter, prefixPathPattern) .isPresent()); } @@ -88,11 +88,11 @@ public void testAlterDatabase() { Assert.assertEquals( alterDatabasePlan, - IoTDBConfigRegionExtractor.PATTERN_PARSE_VISITOR + IoTDBConfigRegionSource.PATTERN_PARSE_VISITOR .visitAlterDatabase(alterDatabasePlan, prefixPathPattern) .orElseThrow(AssertionError::new)); Assert.assertFalse( - IoTDBConfigRegionExtractor.PATTERN_PARSE_VISITOR + IoTDBConfigRegionSource.PATTERN_PARSE_VISITOR .visitAlterDatabase(alterDatabasePlanToFilter, prefixPathPattern) .isPresent()); } @@ -104,11 +104,11 @@ public void testDeleteDatabase() { Assert.assertEquals( deleteDatabasePlan, - IoTDBConfigRegionExtractor.PATTERN_PARSE_VISITOR + IoTDBConfigRegionSource.PATTERN_PARSE_VISITOR .visitDeleteDatabase(deleteDatabasePlan, prefixPathPattern) .orElseThrow(AssertionError::new)); Assert.assertFalse( - IoTDBConfigRegionExtractor.PATTERN_PARSE_VISITOR + IoTDBConfigRegionSource.PATTERN_PARSE_VISITOR .visitDeleteDatabase(deleteDatabasePlanToFilter, prefixPathPattern) .isPresent()); } @@ -128,13 +128,13 @@ public void testCreateSchemaTemplate() throws IllegalPathException { Assert.assertEquals( createSchemaTemplatePlan, - IoTDBConfigRegionExtractor.PATTERN_PARSE_VISITOR + IoTDBConfigRegionSource.PATTERN_PARSE_VISITOR .visitCreateSchemaTemplate(createSchemaTemplatePlan, prefixPathPattern) .orElseThrow(AssertionError::new)); final CreateSchemaTemplatePlan parsedTemplatePlan = (CreateSchemaTemplatePlan) - IoTDBConfigRegionExtractor.PATTERN_PARSE_VISITOR + IoTDBConfigRegionSource.PATTERN_PARSE_VISITOR .visitCreateSchemaTemplate(createSchemaTemplatePlan, fullPathPattern) .orElseThrow(AssertionError::new); Assert.assertEquals( @@ -153,12 +153,12 @@ public void testCommitSetSchemaTemplate() { Assert.assertEquals( setSchemaTemplatePlanOnPrefix, - IoTDBConfigRegionExtractor.PATTERN_PARSE_VISITOR + IoTDBConfigRegionSource.PATTERN_PARSE_VISITOR .visitCommitSetSchemaTemplate(setSchemaTemplatePlanOnPrefix, fullPathPattern) .orElseThrow(AssertionError::new)); Assert.assertEquals( setSchemaTemplatePlanOnFullPath, - IoTDBConfigRegionExtractor.PATTERN_PARSE_VISITOR + IoTDBConfigRegionSource.PATTERN_PARSE_VISITOR .visitCommitSetSchemaTemplate(setSchemaTemplatePlanOnFullPath, fullPathPattern) .orElseThrow(AssertionError::new)); } @@ -172,12 +172,12 @@ public void testPipeUnsetSchemaTemplate() { Assert.assertEquals( pipeUnsetSchemaTemplatePlanOnPrefix, - IoTDBConfigRegionExtractor.PATTERN_PARSE_VISITOR + IoTDBConfigRegionSource.PATTERN_PARSE_VISITOR .visitPipeUnsetSchemaTemplate(pipeUnsetSchemaTemplatePlanOnPrefix, fullPathPattern) .orElseThrow(AssertionError::new)); Assert.assertEquals( pipeUnsetSchemaTemplatePlanOrFullPath, - IoTDBConfigRegionExtractor.PATTERN_PARSE_VISITOR + IoTDBConfigRegionSource.PATTERN_PARSE_VISITOR .visitPipeUnsetSchemaTemplate(pipeUnsetSchemaTemplatePlanOrFullPath, fullPathPattern) .orElseThrow(AssertionError::new)); } @@ -195,7 +195,7 @@ public void testExtendSchemaTemplate() { final ExtendSchemaTemplatePlan parsedTemplatePlan = (ExtendSchemaTemplatePlan) - IoTDBConfigRegionExtractor.PATTERN_PARSE_VISITOR + IoTDBConfigRegionSource.PATTERN_PARSE_VISITOR .visitExtendSchemaTemplate(extendSchemaTemplatePlan, fullPathPattern) .orElseThrow(AssertionError::new); Assert.assertEquals( @@ -220,7 +220,7 @@ public void testGrantUser() throws IllegalPathException { Assert.assertEquals( Collections.singletonList(new PartialPath("root.db.device.**")), ((AuthorPlan) - IoTDBConfigRegionExtractor.PATTERN_PARSE_VISITOR + IoTDBConfigRegionSource.PATTERN_PARSE_VISITOR .visitGrantUser( new AuthorPlan( ConfigPhysicalPlanType.GrantUser, @@ -242,7 +242,7 @@ public void testRevokeUser() throws IllegalPathException { Assert.assertEquals( Collections.singletonList(new PartialPath("root.db.device.**")), ((AuthorPlan) - IoTDBConfigRegionExtractor.PATTERN_PARSE_VISITOR + IoTDBConfigRegionSource.PATTERN_PARSE_VISITOR .visitRevokeUser( new AuthorPlan( ConfigPhysicalPlanType.RevokeUser, @@ -264,7 +264,7 @@ public void testGrantRole() throws IllegalPathException { Assert.assertEquals( Collections.singletonList(new PartialPath("root.db.device.**")), ((AuthorPlan) - IoTDBConfigRegionExtractor.PATTERN_PARSE_VISITOR + IoTDBConfigRegionSource.PATTERN_PARSE_VISITOR .visitGrantRole( new AuthorPlan( ConfigPhysicalPlanType.GrantRole, @@ -286,7 +286,7 @@ public void testRevokeRole() throws IllegalPathException { Assert.assertEquals( Collections.singletonList(new PartialPath("root.db.device.**")), ((AuthorPlan) - IoTDBConfigRegionExtractor.PATTERN_PARSE_VISITOR + IoTDBConfigRegionSource.PATTERN_PARSE_VISITOR .visitRevokeRole( new AuthorPlan( ConfigPhysicalPlanType.RevokeRole, @@ -313,7 +313,7 @@ public void testPipeDeleteTimeSeries() throws IllegalPathException, IOException Collections.singletonList(new PartialPath("root.db.device.s1")), PathPatternTree.deserialize( ((PipeDeleteTimeSeriesPlan) - IoTDBConfigRegionExtractor.PATTERN_PARSE_VISITOR + IoTDBConfigRegionSource.PATTERN_PARSE_VISITOR .visitPipeDeleteTimeSeries( new PipeDeleteTimeSeriesPlan(patternTree.serialize()), prefixPathPattern) @@ -331,8 +331,8 @@ public void testPipeDeleteLogicalView() throws IllegalPathException, IOException Assert.assertEquals( Collections.singletonList(new PartialPath("root.db.device.s1")), PathPatternTree.deserialize( - ((PipeDeleteTimeSeriesPlan) - IoTDBConfigRegionExtractor.PATTERN_PARSE_VISITOR + ((PipeDeleteLogicalViewPlan) + IoTDBConfigRegionSource.PATTERN_PARSE_VISITOR .visitPipeDeleteLogicalView( new PipeDeleteLogicalViewPlan(patternTree.serialize()), prefixPathPattern) @@ -356,7 +356,7 @@ public void testPipeDeactivateTemplate() throws IllegalPathException { } }, ((PipeDeactivateTemplatePlan) - IoTDBConfigRegionExtractor.PATTERN_PARSE_VISITOR + IoTDBConfigRegionSource.PATTERN_PARSE_VISITOR .visitPipeDeactivateTemplate( new PipeDeactivateTemplatePlan( new HashMap>() { @@ -387,7 +387,7 @@ private Template newSchemaTemplate(final String name) throws IllegalPathExceptio public void testSetTTL() throws IllegalPathException { final SetTTLPlan plan = ((SetTTLPlan) - IoTDBConfigRegionExtractor.PATTERN_PARSE_VISITOR + IoTDBConfigRegionSource.PATTERN_PARSE_VISITOR .visitTTL( new SetTTLPlan(Arrays.asList("root", "db", "**"), Long.MAX_VALUE), prefixPathPattern) diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/AuthorInfoTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/AuthorInfoTest.java index 12bab54572a92..963808bf7cc14 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/AuthorInfoTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/AuthorInfoTest.java @@ -29,7 +29,8 @@ import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.utils.AuthUtils; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; -import org.apache.iotdb.confignode.consensus.request.auth.AuthorPlan; +import org.apache.iotdb.confignode.consensus.request.read.auth.AuthorReadPlan; +import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan; import org.apache.iotdb.confignode.consensus.response.auth.PermissionInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TCheckUserPrivilegesReq; import org.apache.iotdb.confignode.rpc.thrift.TPermissionInfoResp; @@ -106,6 +107,7 @@ public void permissionTest() throws TException, AuthException, IllegalPathExcept roleList.add("role1"); AuthorPlan authorPlan; + AuthorReadPlan authorReadPlan; TCheckUserPrivilegesReq checkUserPrivilegesReq; Set privilegeList = new HashSet<>(); @@ -167,8 +169,8 @@ public void permissionTest() throws TException, AuthException, IllegalPathExcept Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); // list user - authorPlan = - new AuthorPlan( + authorReadPlan = + new AuthorReadPlan( ConfigPhysicalPlanType.ListUser, "", "", @@ -177,7 +179,7 @@ public void permissionTest() throws TException, AuthException, IllegalPathExcept new HashSet<>(), false, new ArrayList<>()); - PermissionInfoResp permissionInfoResp = authorInfo.executeListUsers(authorPlan); + PermissionInfoResp permissionInfoResp = authorInfo.executeListUsers(authorReadPlan); status = permissionInfoResp.getStatus(); Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); userList.remove("user1"); @@ -215,8 +217,8 @@ public void permissionTest() throws TException, AuthException, IllegalPathExcept Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); // list role - authorPlan = - new AuthorPlan( + authorReadPlan = + new AuthorReadPlan( ConfigPhysicalPlanType.ListRole, "", "", @@ -225,7 +227,7 @@ public void permissionTest() throws TException, AuthException, IllegalPathExcept new HashSet<>(), false, new ArrayList<>()); - permissionInfoResp = authorInfo.executeListRoles(authorPlan); + permissionInfoResp = authorInfo.executeListRoles(authorReadPlan); status = permissionInfoResp.getStatus(); Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); roleList.remove("role1"); @@ -343,8 +345,8 @@ public void permissionTest() throws TException, AuthException, IllegalPathExcept Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); // list privileges user - authorPlan = - new AuthorPlan( + authorReadPlan = + new AuthorReadPlan( ConfigPhysicalPlanType.ListUserPrivilege, "user0", "", @@ -353,15 +355,15 @@ public void permissionTest() throws TException, AuthException, IllegalPathExcept new HashSet<>(), false, new ArrayList<>()); - permissionInfoResp = authorInfo.executeListUserPrivileges(authorPlan); + permissionInfoResp = authorInfo.executeListUserPrivileges(authorReadPlan); status = permissionInfoResp.getStatus(); Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); Assert.assertEquals( authorInfo.getUserPermissionInfo("user0"), permissionInfoResp.getPermissionInfoResp()); // list privileges role - authorPlan = - new AuthorPlan( + authorReadPlan = + new AuthorReadPlan( ConfigPhysicalPlanType.ListRolePrivilege, "", "role0", @@ -370,13 +372,13 @@ public void permissionTest() throws TException, AuthException, IllegalPathExcept new HashSet<>(), false, new ArrayList<>()); - permissionInfoResp = authorInfo.executeListRolePrivileges(authorPlan); + permissionInfoResp = authorInfo.executeListRolePrivileges(authorReadPlan); status = permissionInfoResp.getStatus(); Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); // list all role of user - authorPlan = - new AuthorPlan( + authorReadPlan = + new AuthorReadPlan( ConfigPhysicalPlanType.ListRole, "user0", "", @@ -385,15 +387,15 @@ public void permissionTest() throws TException, AuthException, IllegalPathExcept new HashSet<>(), false, new ArrayList<>()); - permissionInfoResp = authorInfo.executeListRoles(authorPlan); + permissionInfoResp = authorInfo.executeListRoles(authorReadPlan); status = permissionInfoResp.getStatus(); Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); roleList.remove("role1"); Assert.assertEquals(roleList, permissionInfoResp.getMemberList()); // list all user of role - authorPlan = - new AuthorPlan( + authorReadPlan = + new AuthorReadPlan( ConfigPhysicalPlanType.ListUser, "", "role0", @@ -402,7 +404,7 @@ public void permissionTest() throws TException, AuthException, IllegalPathExcept new HashSet<>(), false, new ArrayList<>()); - permissionInfoResp = authorInfo.executeListUsers(authorPlan); + permissionInfoResp = authorInfo.executeListUsers(authorReadPlan); status = permissionInfoResp.getStatus(); Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); userList.remove("user1"); @@ -427,9 +429,12 @@ public void permissionTest() throws TException, AuthException, IllegalPathExcept private void cleanUserAndRole() throws TException, AuthException { TSStatus status; + AuthorPlan authorPlan; + AuthorReadPlan authorReadPlan; + // clean user - AuthorPlan authorPlan = - new AuthorPlan( + authorReadPlan = + new AuthorReadPlan( ConfigPhysicalPlanType.ListUser, "", "", @@ -438,7 +443,7 @@ private void cleanUserAndRole() throws TException, AuthException { new HashSet<>(), false, new ArrayList<>()); - PermissionInfoResp permissionInfoResp = authorInfo.executeListUsers(authorPlan); + PermissionInfoResp permissionInfoResp = authorInfo.executeListUsers(authorReadPlan); status = permissionInfoResp.getStatus(); Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); @@ -461,8 +466,8 @@ private void cleanUserAndRole() throws TException, AuthException { } // clean role - authorPlan = - new AuthorPlan( + authorReadPlan = + new AuthorReadPlan( ConfigPhysicalPlanType.ListRole, "", "", @@ -471,7 +476,7 @@ private void cleanUserAndRole() throws TException, AuthException { new HashSet<>(), false, new ArrayList<>()); - permissionInfoResp = authorInfo.executeListRoles(authorPlan); + permissionInfoResp = authorInfo.executeListRoles(authorReadPlan); status = permissionInfoResp.getStatus(); Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); @@ -506,8 +511,8 @@ public void takeSnapshot() throws TException, IOException, AuthException { status = authorInfo.authorNonQuery(createUserReq); Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); - AuthorPlan listUserPlan = - new AuthorPlan( + AuthorReadPlan listUserPlan = + new AuthorReadPlan( ConfigPhysicalPlanType.ListUser, "", "", @@ -516,8 +521,8 @@ public void takeSnapshot() throws TException, IOException, AuthException { new HashSet<>(), false, new ArrayList<>()); - AuthorPlan listRolePlan = - new AuthorPlan( + AuthorReadPlan listRolePlan = + new AuthorReadPlan( ConfigPhysicalPlanType.ListRole, "", "", @@ -540,6 +545,7 @@ public void testMultPathsPermission() throws TException, AuthException, IllegalP TSStatus status; AuthorPlan authorPlan; + AuthorReadPlan authorReadPlan; Set privilegeList = new HashSet<>(); privilegeList.add(PrivilegeType.WRITE_DATA.ordinal()); @@ -636,8 +642,8 @@ public void testMultPathsPermission() throws TException, AuthException, IllegalP Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); // list privileges user - authorPlan = - new AuthorPlan( + authorReadPlan = + new AuthorReadPlan( ConfigPhysicalPlanType.ListUserPrivilege, "user0", "", @@ -647,13 +653,13 @@ public void testMultPathsPermission() throws TException, AuthException, IllegalP false, new ArrayList<>()); PermissionInfoResp permissionInfoResp; - permissionInfoResp = authorInfo.executeListUserPrivileges(authorPlan); + permissionInfoResp = authorInfo.executeListUserPrivileges(authorReadPlan); status = permissionInfoResp.getStatus(); Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); // list privileges role - authorPlan = - new AuthorPlan( + authorReadPlan = + new AuthorReadPlan( ConfigPhysicalPlanType.ListRolePrivilege, "", "role0", @@ -662,7 +668,7 @@ public void testMultPathsPermission() throws TException, AuthException, IllegalP new HashSet<>(), false, new ArrayList<>()); - permissionInfoResp = authorInfo.executeListRolePrivileges(authorPlan); + permissionInfoResp = authorInfo.executeListRolePrivileges(authorReadPlan); status = permissionInfoResp.getStatus(); Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); } @@ -737,7 +743,7 @@ public void testDepAuthorPlan() throws TException, AuthException, IllegalPathExc 3, authorInfo .executeListUsers( - new AuthorPlan( + new AuthorReadPlan( ConfigPhysicalPlanType.ListUser, "", "", @@ -766,7 +772,7 @@ public void testDepAuthorPlan() throws TException, AuthException, IllegalPathExc 2, authorInfo .executeListUsers( - new AuthorPlan( + new AuthorReadPlan( ConfigPhysicalPlanType.ListUserDep, "", "", @@ -851,7 +857,7 @@ public void testDepAuthorPlan() throws TException, AuthException, IllegalPathExc 1, authorInfo .executeListRoles( - new AuthorPlan( + new AuthorReadPlan( ConfigPhysicalPlanType.ListRoleDep, "", "", diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/CNPhysicalPlanGeneratorTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/CNPhysicalPlanGeneratorTest.java index 09645a6dbe617..960233dbba767 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/CNPhysicalPlanGeneratorTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/CNPhysicalPlanGeneratorTest.java @@ -28,7 +28,7 @@ import org.apache.iotdb.commons.utils.FileUtils; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; -import org.apache.iotdb.confignode.consensus.request.auth.AuthorPlan; +import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan; import org.apache.iotdb.confignode.consensus.request.write.database.DatabaseSchemaPlan; import org.apache.iotdb.confignode.consensus.request.write.database.SetTTLPlan; import org.apache.iotdb.confignode.consensus.request.write.template.CommitSetSchemaTemplatePlan; diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/ConfigRegionListeningQueueTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/ConfigRegionListeningQueueTest.java index 937c1a9e2dda9..dec72d181f1bb 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/ConfigRegionListeningQueueTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/ConfigRegionListeningQueueTest.java @@ -22,7 +22,7 @@ import org.apache.iotdb.commons.auth.AuthException; import org.apache.iotdb.commons.pipe.datastructure.queue.ConcurrentIterableLinkedQueue; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; -import org.apache.iotdb.confignode.consensus.request.auth.AuthorPlan; +import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan; import org.apache.iotdb.confignode.consensus.request.write.database.DatabaseSchemaPlan; import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeEnrichedPlan; import org.apache.iotdb.confignode.manager.pipe.agent.PipeConfigNodeAgent; diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/PipeInfoTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/PipeInfoTest.java index f3571bed57ea6..c3e7916108fe6 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/PipeInfoTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/PipeInfoTest.java @@ -20,11 +20,11 @@ package org.apache.iotdb.confignode.persistence; import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex; -import org.apache.iotdb.commons.pipe.plugin.meta.PipePluginMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeRuntimeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeStaticMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeStatus; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.agent.plugin.meta.PipePluginMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeRuntimeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStaticMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStatus; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; import org.apache.iotdb.confignode.consensus.request.write.pipe.plugin.CreatePipePluginPlan; import org.apache.iotdb.confignode.consensus.request.write.pipe.plugin.DropPipePluginPlan; import org.apache.iotdb.confignode.consensus.request.write.pipe.task.CreatePipePlanV2; diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/UpgradeFromWALToConsensusLayerTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/UpgradeFromWALToConsensusLayerTest.java index 35d08059b91c4..e38e207a68fd0 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/UpgradeFromWALToConsensusLayerTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/UpgradeFromWALToConsensusLayerTest.java @@ -22,6 +22,7 @@ import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.commons.cluster.NodeStatus; import org.apache.iotdb.commons.utils.FileUtils; import org.apache.iotdb.confignode.conf.ConfigNodeConfig; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; @@ -31,7 +32,7 @@ import org.apache.iotdb.confignode.persistence.ProcedureInfo; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.procedure.impl.node.AddConfigNodeProcedure; -import org.apache.iotdb.confignode.procedure.impl.node.RemoveDataNodeProcedure; +import org.apache.iotdb.confignode.procedure.impl.node.RemoveDataNodesProcedure; import org.apache.iotdb.confignode.procedure.impl.testonly.NeverFinishProcedure; import org.apache.iotdb.confignode.procedure.store.ConfigProcedureStore; import org.apache.iotdb.confignode.rpc.thrift.TNodeVersionInfo; @@ -48,7 +49,9 @@ import java.io.File; import java.io.IOException; import java.util.Arrays; +import java.util.HashMap; import java.util.List; +import java.util.Map; public class UpgradeFromWALToConsensusLayerTest { private static final Logger LOGGER = @@ -90,18 +93,30 @@ public void test() throws IOException, ConsensusException, InterruptedException ProcedureInfo procedureInfo = configManager.getProcedureManager().getStore().getProcedureInfo(); ConfigProcedureStore.createOldProcWalDir(); - - // prepare procedures - RemoveDataNodeProcedure removeDataNodeProcedure = - new RemoveDataNodeProcedure( + List removedDataNodes = + Arrays.asList( new TDataNodeLocation( 10000, new TEndPoint("127.0.0.1", 6600), new TEndPoint("127.0.0.1", 7700), new TEndPoint("127.0.0.1", 8800), new TEndPoint("127.0.0.1", 9900), - new TEndPoint("127.0.0.1", 11000))); - removeDataNodeProcedure.setProcId(10086); + new TEndPoint("127.0.0.1", 11000)), + new TDataNodeLocation( + 10001, + new TEndPoint("127.0.0.1", 6601), + new TEndPoint("127.0.0.1", 7701), + new TEndPoint("127.0.0.1", 8801), + new TEndPoint("127.0.0.1", 9901), + new TEndPoint("127.0.0.1", 11001))); + + // prepare procedures + Map nodeStatusMap = new HashMap<>(); + nodeStatusMap.put(10000, NodeStatus.Running); + nodeStatusMap.put(10001, NodeStatus.Running); + RemoveDataNodesProcedure removeDataNodesProcedure = + new RemoveDataNodesProcedure(removedDataNodes, nodeStatusMap); + removeDataNodesProcedure.setProcId(10086); AddConfigNodeProcedure addConfigNodeProcedure = new AddConfigNodeProcedure( new TConfigNodeLocation( @@ -111,7 +126,7 @@ public void test() throws IOException, ConsensusException, InterruptedException List procedureList = Arrays.asList( new NeverFinishProcedure(0), - removeDataNodeProcedure, + removeDataNodesProcedure, new NeverFinishProcedure(199), addConfigNodeProcedure, new NeverFinishProcedure(29999)); diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/IncProcedure.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/IncProcedure.java index 2a86110825d37..43d2f5d465929 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/IncProcedure.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/IncProcedure.java @@ -21,8 +21,6 @@ import org.apache.iotdb.confignode.procedure.Procedure; import org.apache.iotdb.confignode.procedure.env.TestProcEnv; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import java.io.DataOutputStream; import java.io.IOException; @@ -33,8 +31,7 @@ public class IncProcedure extends Procedure { public boolean throwEx = false; @Override - protected Procedure[] execute(TestProcEnv testProcEnv) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + protected Procedure[] execute(TestProcEnv testProcEnv) throws InterruptedException { AtomicInteger acc = testProcEnv.getAcc(); if (throwEx) { throw new RuntimeException("throw a EXCEPTION"); @@ -51,11 +48,6 @@ protected void rollback(TestProcEnv testProcEnv) throws IOException, Interrupted testProcEnv.rolledBackCount.getAndIncrement(); } - @Override - protected boolean abort(TestProcEnv testProcEnv) { - return true; - } - @Override public void serialize(DataOutputStream stream) throws IOException { stream.writeInt(TestProcedureFactory.TestProcedureType.INC_PROCEDURE.ordinal()); diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/NoopProcedure.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/NoopProcedure.java index bdaf0401cc8fe..550a191950648 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/NoopProcedure.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/NoopProcedure.java @@ -21,24 +21,16 @@ import org.apache.iotdb.confignode.procedure.Procedure; import org.apache.iotdb.confignode.procedure.env.TestProcEnv; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import java.io.IOException; public class NoopProcedure extends Procedure { @Override - protected Procedure[] execute(TestProcEnv testProcEnv) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + protected Procedure[] execute(TestProcEnv testProcEnv) throws InterruptedException { return new Procedure[0]; } @Override protected void rollback(TestProcEnv testProcEnv) throws IOException, InterruptedException {} - - @Override - protected boolean abort(TestProcEnv testProcEnv) { - return false; - } } diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/SimpleLockProcedure.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/SimpleLockProcedure.java index 564b980794202..ce9fea39d5589 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/SimpleLockProcedure.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/SimpleLockProcedure.java @@ -21,8 +21,6 @@ import org.apache.iotdb.confignode.procedure.Procedure; import org.apache.iotdb.confignode.procedure.env.TestProcEnv; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.apache.iotdb.confignode.procedure.scheduler.SimpleProcedureScheduler; import org.apache.iotdb.confignode.procedure.state.ProcedureLockState; @@ -39,8 +37,7 @@ public SimpleLockProcedure(String procName) { } @Override - protected Procedure[] execute(TestProcEnv testProcEnv) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + protected Procedure[] execute(TestProcEnv testProcEnv) throws InterruptedException { testProcEnv.executeSeq.append(procName); return null; } @@ -48,11 +45,6 @@ protected Procedure[] execute(TestProcEnv testProcEnv) @Override protected void rollback(TestProcEnv testProcEnv) throws IOException, InterruptedException {} - @Override - protected boolean abort(TestProcEnv testProcEnv) { - return false; - } - @Override protected ProcedureLockState acquireLock(TestProcEnv testProcEnv) { if (testProcEnv.getEnvLock().tryLock()) { diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/SimpleSTMProcedure.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/SimpleSTMProcedure.java index 6464ee018a04c..764d3c09729e8 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/SimpleSTMProcedure.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/SimpleSTMProcedure.java @@ -21,8 +21,6 @@ import org.apache.iotdb.confignode.procedure.env.TestProcEnv; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure; import java.io.IOException; @@ -41,7 +39,7 @@ public enum TestState { @Override protected Flow executeFromState(TestProcEnv testProcEnv, TestState testState) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws InterruptedException { AtomicInteger acc = testProcEnv.getAcc(); try { switch (testState) { diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/SleepProcedure.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/SleepProcedure.java index f3b2abf054a57..1dd978515b423 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/SleepProcedure.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/SleepProcedure.java @@ -21,8 +21,6 @@ import org.apache.iotdb.confignode.procedure.Procedure; import org.apache.iotdb.confignode.procedure.env.TestProcEnv; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.apache.iotdb.confignode.procedure.util.ProcedureTestUtil; import java.io.DataOutputStream; @@ -30,8 +28,7 @@ public class SleepProcedure extends Procedure { @Override - protected Procedure[] execute(TestProcEnv testProcEnv) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + protected Procedure[] execute(TestProcEnv testProcEnv) throws InterruptedException { System.out.println("Procedure is sleeping."); ProcedureTestUtil.sleepWithoutInterrupt(2000); return null; @@ -40,11 +37,6 @@ protected Procedure[] execute(TestProcEnv testProcEnv) @Override protected void rollback(TestProcEnv testProcEnv) throws IOException, InterruptedException {} - @Override - protected boolean abort(TestProcEnv testProcEnv) { - return false; - } - @Override public void serialize(DataOutputStream stream) throws IOException { stream.writeInt(TestProcedureFactory.TestProcedureType.SLEEP_PROCEDURE.ordinal()); diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/StuckProcedure.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/StuckProcedure.java index 0238b6a33bcba..1351d216beefc 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/StuckProcedure.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/StuckProcedure.java @@ -53,9 +53,4 @@ protected Procedure[] execute(final TestProcEnv env) { @Override protected void rollback(TestProcEnv testProcEnv) throws IOException, InterruptedException {} - - @Override - protected boolean abort(TestProcEnv testProcEnv) { - return false; - } } diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/StuckSTMProcedure.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/StuckSTMProcedure.java index 41dd5a342ea90..0b1b6aa424b34 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/StuckSTMProcedure.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/StuckSTMProcedure.java @@ -21,8 +21,6 @@ import org.apache.iotdb.confignode.procedure.env.TestProcEnv; import org.apache.iotdb.confignode.procedure.exception.ProcedureException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure; import java.io.DataOutputStream; @@ -48,7 +46,7 @@ public enum TestState { @Override protected Flow executeFromState(TestProcEnv testProcEnv, TestState testState) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws InterruptedException { AtomicInteger acc = testProcEnv.getAcc(); try { switch (testState) { diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/CreateCQProcedureTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/CreateCQProcedureTest.java index 9915b7217f1b3..d0e92b3281666 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/CreateCQProcedureTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/CreateCQProcedureTest.java @@ -67,7 +67,7 @@ public void serializeDeserializeTest() { Mockito.when(cqManager.getExecutor()).thenReturn(executor); ConfigManager configManager = Mockito.mock(ConfigManager.class); Mockito.when(configManager.getCQManager()).thenReturn(cqManager); - ConfigNode configNode = ConfigNode.getInstance(); + ConfigNode configNode = new ConfigNode(); configNode.setConfigManager(configManager); try { diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveDataNodeProcedureTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveDataNodeProcedureTest.java deleted file mode 100644 index 9633c3bc35d7c..0000000000000 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveDataNodeProcedureTest.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.confignode.procedure.impl.node; - -import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; -import org.apache.iotdb.common.rpc.thrift.TEndPoint; -import org.apache.iotdb.confignode.procedure.store.ProcedureFactory; - -import org.apache.tsfile.utils.PublicBAOS; -import org.junit.Assert; -import org.junit.Test; - -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; - -public class RemoveDataNodeProcedureTest { - - @Test - public void serDeTest() throws IOException { - RemoveDataNodeProcedure procedure0 = - new RemoveDataNodeProcedure( - new TDataNodeLocation( - 10, - new TEndPoint("127.0.0.1", 6667), - new TEndPoint("127.0.0.1", 6668), - new TEndPoint("127.0.0.1", 6669), - new TEndPoint("127.0.0.1", 6670), - new TEndPoint("127.0.0.1", 6671))); - - try (PublicBAOS byteArrayOutputStream = new PublicBAOS(); - DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) { - procedure0.serialize(outputStream); - ByteBuffer buffer = - ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size()); - Assert.assertEquals(procedure0, ProcedureFactory.getInstance().create(buffer)); - } - } -} diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveDataNodesProcedureTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveDataNodesProcedureTest.java new file mode 100644 index 0000000000000..0bfe782048a82 --- /dev/null +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveDataNodesProcedureTest.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.procedure.impl.node; + +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; +import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.commons.cluster.NodeStatus; +import org.apache.iotdb.confignode.procedure.impl.region.RegionMigrationPlan; +import org.apache.iotdb.confignode.procedure.store.ProcedureFactory; + +import org.apache.tsfile.utils.PublicBAOS; +import org.junit.Assert; +import org.junit.Test; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class RemoveDataNodesProcedureTest { + + private TConsensusGroupId consensusGroupId = + new TConsensusGroupId(TConsensusGroupType.DataRegion, 1); + + private TDataNodeLocation fromDataNodeLocation = + new TDataNodeLocation( + 10, + new TEndPoint("127.0.0.1", 6667), + new TEndPoint("127.0.0.1", 6668), + new TEndPoint("127.0.0.1", 6669), + new TEndPoint("127.0.0.1", 6670), + new TEndPoint("127.0.0.1", 6671)); + + private TDataNodeLocation toDataNodeLocation = + new TDataNodeLocation( + 11, + new TEndPoint("127.0.0.1", 6677), + new TEndPoint("127.0.0.1", 6678), + new TEndPoint("127.0.0.1", 6679), + new TEndPoint("127.0.0.1", 6680), + new TEndPoint("127.0.0.1", 6681)); + + @Test + public void serDeTest() throws IOException { + List removedDataNodes = Collections.singletonList(fromDataNodeLocation); + Map nodeStatusMap = new HashMap<>(); + nodeStatusMap.put(10, NodeStatus.Running); + RemoveDataNodesProcedure procedure0 = + new RemoveDataNodesProcedure(removedDataNodes, nodeStatusMap); + try (PublicBAOS byteArrayOutputStream = new PublicBAOS(); + DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) { + procedure0.serialize(outputStream); + ByteBuffer buffer = + ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size()); + Assert.assertEquals(procedure0, ProcedureFactory.getInstance().create(buffer)); + } + + RegionMigrationPlan regionMigrationPlan = + new RegionMigrationPlan(consensusGroupId, fromDataNodeLocation); + regionMigrationPlan.setToDataNode(toDataNodeLocation); + try (PublicBAOS byteArrayOutputStream = new PublicBAOS(); + DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) { + regionMigrationPlan.serialize(outputStream); + ByteBuffer buffer = + ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size()); + Assert.assertEquals(regionMigrationPlan, RegionMigrationPlan.deserialize(buffer)); + } + } +} diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/pipe/plugin/CreatePipePluginProcedureTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/pipe/plugin/CreatePipePluginProcedureTest.java index 93b95a3f33621..ba9aa01f25629 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/pipe/plugin/CreatePipePluginProcedureTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/pipe/plugin/CreatePipePluginProcedureTest.java @@ -19,7 +19,7 @@ package org.apache.iotdb.confignode.procedure.impl.pipe.plugin; -import org.apache.iotdb.commons.pipe.plugin.meta.PipePluginMeta; +import org.apache.iotdb.commons.pipe.agent.plugin.meta.PipePluginMeta; import org.apache.iotdb.confignode.procedure.store.ProcedureFactory; import org.apache.tsfile.utils.Binary; diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/pipe/receiver/PipeEnrichedProcedureTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/pipe/receiver/PipeEnrichedProcedureTest.java index ebccd9f36e2b1..da57c1fd2a562 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/pipe/receiver/PipeEnrichedProcedureTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/pipe/receiver/PipeEnrichedProcedureTest.java @@ -27,7 +27,7 @@ import org.apache.iotdb.commons.schema.view.viewExpression.leaf.ConstantViewOperand; import org.apache.iotdb.commons.trigger.TriggerInformation; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; -import org.apache.iotdb.confignode.consensus.request.auth.AuthorPlan; +import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan; import org.apache.iotdb.confignode.consensus.request.write.database.SetTTLPlan; import org.apache.iotdb.confignode.procedure.impl.schema.AlterLogicalViewProcedure; import org.apache.iotdb.confignode.procedure.impl.schema.DeactivateTemplateProcedure; diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/CreateSubscriptionProcedureTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/CreateSubscriptionProcedureTest.java index 3305e58d8319a..93d9941fbf333 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/CreateSubscriptionProcedureTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/CreateSubscriptionProcedureTest.java @@ -21,10 +21,8 @@ import org.apache.iotdb.commons.subscription.meta.consumer.ConsumerGroupMeta; import org.apache.iotdb.commons.subscription.meta.consumer.ConsumerMeta; -import org.apache.iotdb.commons.subscription.meta.topic.TopicMeta; import org.apache.iotdb.confignode.procedure.impl.pipe.task.CreatePipeProcedureV2; import org.apache.iotdb.confignode.procedure.impl.subscription.consumer.AlterConsumerGroupProcedure; -import org.apache.iotdb.confignode.procedure.impl.subscription.topic.AlterTopicProcedure; import org.apache.iotdb.confignode.procedure.store.ProcedureFactory; import org.apache.iotdb.confignode.rpc.thrift.TCreatePipeReq; import org.apache.iotdb.confignode.rpc.thrift.TSubscribeReq; @@ -74,11 +72,6 @@ public void serializeDeserializeTest() { AlterConsumerGroupProcedure alterConsumerGroupProcedure = new AlterConsumerGroupProcedure(newConsumerGroupMeta); - List topicProcedures = new ArrayList<>(); - TopicMeta newTopicMeta = new TopicMeta("t1", 1, topicAttributes); - newTopicMeta.addSubscribedConsumerGroup("cg1"); - topicProcedures.add(new AlterTopicProcedure(newTopicMeta)); - List pipeProcedures = new ArrayList<>(); pipeProcedures.add( new CreatePipeProcedureV2( @@ -92,7 +85,6 @@ public void serializeDeserializeTest() { .setProcessorAttributes(Collections.singletonMap("processor", "pro")))); proc.setAlterConsumerGroupProcedure(alterConsumerGroupProcedure); - proc.setAlterTopicProcedures(topicProcedures); proc.setCreatePipeProcedures(pipeProcedures); try { @@ -104,7 +96,6 @@ public void serializeDeserializeTest() { assertEquals(proc, proc2); assertEquals(alterConsumerGroupProcedure, proc2.getAlterConsumerGroupProcedure()); - assertEquals(topicProcedures, proc2.getAlterTopicProcedures()); assertEquals(pipeProcedures, proc2.getCreatePipeProcedures()); } catch (Exception e) { e.printStackTrace(); diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/DropSubscriptionProcedureTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/DropSubscriptionProcedureTest.java index 9519bf2ed15cc..9ecce2a522ca4 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/DropSubscriptionProcedureTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/DropSubscriptionProcedureTest.java @@ -21,10 +21,8 @@ import org.apache.iotdb.commons.subscription.meta.consumer.ConsumerGroupMeta; import org.apache.iotdb.commons.subscription.meta.consumer.ConsumerMeta; -import org.apache.iotdb.commons.subscription.meta.topic.TopicMeta; import org.apache.iotdb.confignode.procedure.impl.pipe.task.DropPipeProcedureV2; import org.apache.iotdb.confignode.procedure.impl.subscription.consumer.AlterConsumerGroupProcedure; -import org.apache.iotdb.confignode.procedure.impl.subscription.topic.AlterTopicProcedure; import org.apache.iotdb.confignode.procedure.store.ProcedureFactory; import org.apache.iotdb.confignode.rpc.thrift.TUnsubscribeReq; @@ -72,16 +70,11 @@ public void serializeDeserializeTest() { AlterConsumerGroupProcedure alterConsumerGroupProcedure = new AlterConsumerGroupProcedure(newConsumerGroupMeta); - List topicProcedures = new ArrayList<>(); - topicProcedures.add(new AlterTopicProcedure(new TopicMeta("t1", 1, topicAttributes))); - topicProcedures.add(new AlterTopicProcedure(new TopicMeta("t2", 2, topicAttributes))); - List pipeProcedures = new ArrayList<>(); pipeProcedures.add(new DropPipeProcedureV2("pipe_topic1")); pipeProcedures.add(new DropPipeProcedureV2("pipe_topic2")); proc.setAlterConsumerGroupProcedure(alterConsumerGroupProcedure); - proc.setAlterTopicProcedures(topicProcedures); proc.setDropPipeProcedures(pipeProcedures); try { @@ -93,7 +86,6 @@ public void serializeDeserializeTest() { assertEquals(proc, proc2); assertEquals(alterConsumerGroupProcedure, proc2.getAlterConsumerGroupProcedure()); - assertEquals(topicProcedures, proc2.getAlterTopicProcedures()); assertEquals(pipeProcedures, proc2.getDropPipeProcedures()); } catch (Exception e) { fail(); diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/sync/AuthOperationProcedureTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/sync/AuthOperationProcedureTest.java index 78a1758f8ae40..6a4c818c14a3a 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/sync/AuthOperationProcedureTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/procedure/impl/sync/AuthOperationProcedureTest.java @@ -25,7 +25,7 @@ import org.apache.iotdb.common.rpc.thrift.TNodeResource; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType; -import org.apache.iotdb.confignode.consensus.request.auth.AuthorPlan; +import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan; import org.apache.iotdb.confignode.procedure.store.ProcedureFactory; import org.apache.tsfile.utils.PublicBAOS; @@ -45,10 +45,10 @@ public class AuthOperationProcedureTest { @Test public void serializeDeserializeTest() throws IOException { - PublicBAOS byteArrayOutputStream = new PublicBAOS(); - DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream); + final PublicBAOS byteArrayOutputStream = new PublicBAOS(); + final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream); - TDataNodeLocation dataNodeLocation = new TDataNodeLocation(); + final TDataNodeLocation dataNodeLocation = new TDataNodeLocation(); dataNodeLocation.setDataNodeId(1); dataNodeLocation.setClientRpcEndPoint(new TEndPoint("0.0.0.0", 6667)); dataNodeLocation.setInternalEndPoint(new TEndPoint("0.0.0.0", 10730)); @@ -56,19 +56,18 @@ public void serializeDeserializeTest() throws IOException { dataNodeLocation.setDataRegionConsensusEndPoint(new TEndPoint("0.0.0.0", 10760)); dataNodeLocation.setSchemaRegionConsensusEndPoint(new TEndPoint("0.0.0.0", 10750)); - TDataNodeConfiguration dataNodeConfiguration = new TDataNodeConfiguration(); + final TDataNodeConfiguration dataNodeConfiguration = new TDataNodeConfiguration(); dataNodeConfiguration.setLocation(dataNodeLocation); dataNodeConfiguration.setResource(new TNodeResource(16, 34359738368L)); - List datanodes = new ArrayList<>(); + final List datanodes = new ArrayList<>(); datanodes.add(dataNodeConfiguration); try { - int begin = ConfigPhysicalPlanType.CreateUser.ordinal(); - int end = ConfigPhysicalPlanType.ListRoleUsers.ordinal(); + final int begin = ConfigPhysicalPlanType.CreateUser.ordinal(); + final int end = ConfigPhysicalPlanType.UpdateUser.ordinal(); for (int i = begin; i <= end; i++) { - PartialPath path = new PartialPath(new String("root.t1")); - AuthOperationProcedure proc = + final AuthOperationProcedure proc = new AuthOperationProcedure( new AuthorPlan( ConfigPhysicalPlanType.values()[i], @@ -78,20 +77,20 @@ public void serializeDeserializeTest() throws IOException { "123456", Collections.singleton(1), false, - Collections.singletonList(path)), + Collections.singletonList(new PartialPath("root.t1"))), datanodes, false); proc.serialize(outputStream); - ByteBuffer buffer = + final ByteBuffer buffer = ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size()); - AuthOperationProcedure proc2 = + final AuthOperationProcedure proc2 = (AuthOperationProcedure) ProcedureFactory.getInstance().create(buffer); - Assert.assertTrue(proc.equals(proc2)); + Assert.assertEquals(proc, proc2); buffer.clear(); byteArrayOutputStream.reset(); } - } catch (Exception e) { + } catch (final Exception e) { e.printStackTrace(); fail(); } diff --git a/iotdb-core/consensus/pom.xml b/iotdb-core/consensus/pom.xml index 2768096316a99..ef26bce86f8de 100644 --- a/iotdb-core/consensus/pom.xml +++ b/iotdb-core/consensus/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-core - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT iotdb-consensus IoTDB: Core: Consensus @@ -39,32 +39,32 @@ org.apache.iotdb node-commons - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb metrics-interface - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-thrift-commons - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-thrift-consensus - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb service-rpc - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb pipe-api - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.ratis diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/IConsensus.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/IConsensus.java index 7dc3fb6e94d3e..8f49af524bc01 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/IConsensus.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/IConsensus.java @@ -37,6 +37,7 @@ import java.io.IOException; import java.util.List; +import java.util.Map; /** Consensus module base interface. */ @ThreadSafe @@ -145,17 +146,26 @@ public interface IConsensus { */ void removeRemotePeer(ConsensusGroupId groupId, Peer peer) throws ConsensusException; + /** + * Record the correct peer list (likely got from the ConfigNode) for future use in resetPeerList. + * Only use this method if necessary. If it is called, it should be called before {@link + * #start()}. + * + * @param correctPeerList The correct consensus group member list + */ + void recordCorrectPeerListBeforeStarting(Map> correctPeerList); + /** * Reset the peer list of the corresponding consensus group. Currently only used in the automatic * cleanup of region migration as a rollback for {@link #addRemotePeer(ConsensusGroupId, Peer)}, * so it will only be less but not more. * * @param groupId the consensus group - * @param peers the new peer list + * @param correctPeers the correct peer list * @throws ConsensusException when resetPeerList doesn't success with other reasons * @throws ConsensusGroupNotExistException when the specified consensus group doesn't exist */ - void resetPeerList(ConsensusGroupId groupId, List peers) throws ConsensusException; + void resetPeerList(ConsensusGroupId groupId, List correctPeers) throws ConsensusException; // management API @@ -226,17 +236,6 @@ public interface IConsensus { */ List getAllConsensusGroupIds(); - /** - * Return all consensus group ids from disk. - * - *

We need to parse all the RegionGroupIds from the disk directory before starting the - * consensus layer, and {@link #getAllConsensusGroupIds()} returns an empty list, so we need to - * add a new interface. - * - * @return consensusGroupId list - */ - List getAllConsensusGroupIdsWithoutStarting(); - /** * Return the region directory of the corresponding consensus group. * diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/IStateMachine.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/IStateMachine.java index d9db3a836b2e6..692b60beb2a2d 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/IStateMachine.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/IStateMachine.java @@ -67,6 +67,15 @@ default boolean isReadOnly() { */ DataSet read(IConsensusRequest request); + /** + * Release all resources related to the region. Currently, we only check pipe related resources. + * + * @return true if all resources are released successfully + */ + default boolean hasReleaseAllRegionRelatedResource(ConsensusGroupId groupId) { + return true; + } + /** * Take a snapshot of current statemachine. All files are required to be stored under snapshotDir, * which is a subdirectory of the StorageDir in Consensus @@ -149,6 +158,11 @@ default void notifyConfigurationChanged(long term, long index, List newCon default void notifyLeaderReady() { // do nothing default } + + /** Notify the {@link IStateMachine} that this server is no longer the leader. */ + default void notifyNotLeader() { + // do nothing default + } } /** diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/common/Peer.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/common/Peer.java index e60efc6565f57..bc6ec923aaf9e 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/common/Peer.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/common/Peer.java @@ -35,14 +35,15 @@ import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.Comparator; import java.util.Objects; -public class Peer { +public class Peer implements Comparable { private final Logger logger = LoggerFactory.getLogger(Peer.class); private final ConsensusGroupId groupId; - private final TEndPoint endpoint; private final int nodeId; + private final TEndPoint endpoint; public Peer(ConsensusGroupId groupId, int nodeId, TEndPoint endpoint) { this.groupId = groupId; @@ -105,6 +106,14 @@ public String toString() { return "Peer{" + "groupId=" + groupId + ", endpoint=" + endpoint + ", nodeId=" + nodeId + '}'; } + @Override + public int compareTo(Peer peer) { + return Comparator.comparing(Peer::getGroupId) + .thenComparingInt(Peer::getNodeId) + .thenComparing(Peer::getEndpoint) + .compare(this, peer); + } + public static Peer valueOf( TConsensusGroupId consensusGroupId, TDataNodeLocation dataNodeLocation) { if (consensusGroupId.getType() == TConsensusGroupType.SchemaRegion) { diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/common/request/IConsensusRequest.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/common/request/IConsensusRequest.java index 6cd7f370a6baf..cae54c2bcbcbc 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/common/request/IConsensusRequest.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/common/request/IConsensusRequest.java @@ -36,6 +36,11 @@ public interface IConsensusRequest { */ ByteBuffer serializeToByteBuffer(); + default long getMemorySize() { + // return 0 by default + return 0; + } + default void markAsGeneratedByRemoteConsensusLeader() { // do nothing by default } diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/common/request/IndexedConsensusRequest.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/common/request/IndexedConsensusRequest.java index 58789dbd0ed13..1147abc049ef6 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/common/request/IndexedConsensusRequest.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/common/request/IndexedConsensusRequest.java @@ -33,7 +33,7 @@ public class IndexedConsensusRequest implements IConsensusRequest { private final long syncIndex; private final List requests; private final List serializedRequests; - private long serializedSize = 0; + private long memorySize = 0; public IndexedConsensusRequest(long searchIndex, List requests) { this.searchIndex = searchIndex; @@ -55,7 +55,7 @@ public void buildSerializedRequests() { r -> { ByteBuffer buffer = r.serializeToByteBuffer(); this.serializedRequests.add(buffer); - this.serializedSize += buffer.capacity(); + this.memorySize += Long.max(buffer.capacity(), r.getMemorySize()); }); } @@ -72,8 +72,8 @@ public List getSerializedRequests() { return serializedRequests; } - public long getSerializedSize() { - return serializedSize; + public long getMemorySize() { + return memorySize; } public long getSearchIndex() { diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/config/PipeConsensusConfig.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/config/PipeConsensusConfig.java index bec2c7d8e1e00..2cb149b601b01 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/config/PipeConsensusConfig.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/config/PipeConsensusConfig.java @@ -19,7 +19,7 @@ package org.apache.iotdb.consensus.config; -import org.apache.iotdb.commons.pipe.plugin.builtin.BuiltinPipePlugin; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.BuiltinPipePlugin; import org.apache.iotdb.consensus.pipe.consensuspipe.ConsensusPipeDispatcher; import org.apache.iotdb.consensus.pipe.consensuspipe.ConsensusPipeGuardian; import org.apache.iotdb.consensus.pipe.consensuspipe.ConsensusPipeReceiver; diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/config/RatisConfig.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/config/RatisConfig.java index 9b43bc956bd8e..3bb4e64e4900f 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/config/RatisConfig.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/config/RatisConfig.java @@ -541,8 +541,6 @@ public ThreadPool.Builder setClientSize(int clientSize) { public static class Log { private final boolean useMemory; - private final int queueElementLimit; - private final SizeInBytes queueByteLimit; private final int purgeGap; private final boolean purgeUptoSnapshotIndex; private final long preserveNumsWhenPurge; @@ -555,8 +553,6 @@ public static class Log { private Log( boolean useMemory, - int queueElementLimit, - SizeInBytes queueByteLimit, int purgeGap, boolean purgeUptoSnapshotIndex, long preserveNumsWhenPurge, @@ -567,8 +563,6 @@ private Log( int forceSyncNum, boolean unsafeFlushEnabled) { this.useMemory = useMemory; - this.queueElementLimit = queueElementLimit; - this.queueByteLimit = queueByteLimit; this.purgeGap = purgeGap; this.purgeUptoSnapshotIndex = purgeUptoSnapshotIndex; this.preserveNumsWhenPurge = preserveNumsWhenPurge; @@ -584,14 +578,6 @@ public boolean isUseMemory() { return useMemory; } - public int getQueueElementLimit() { - return queueElementLimit; - } - - public SizeInBytes getQueueByteLimit() { - return queueByteLimit; - } - public int getPurgeGap() { return purgeGap; } @@ -635,8 +621,6 @@ public static Log.Builder newBuilder() { public static class Builder { private boolean useMemory = false; - private int queueElementLimit = 4096; - private SizeInBytes queueByteLimit = SizeInBytes.valueOf("64MB"); private int purgeGap = 1024; private boolean purgeUptoSnapshotIndex = true; private long preserveNumsWhenPurge = 1000; @@ -650,8 +634,6 @@ public static class Builder { public Log build() { return new Log( useMemory, - queueElementLimit, - queueByteLimit, purgeGap, purgeUptoSnapshotIndex, preserveNumsWhenPurge, @@ -668,16 +650,6 @@ public Log.Builder setUseMemory(boolean useMemory) { return this; } - public Log.Builder setQueueElementLimit(int queueElementLimit) { - this.queueElementLimit = queueElementLimit; - return this; - } - - public Log.Builder setQueueByteLimit(SizeInBytes queueByteLimit) { - this.queueByteLimit = queueByteLimit; - return this; - } - public Log.Builder setPurgeGap(int purgeGap) { this.purgeGap = purgeGap; return this; @@ -908,6 +880,7 @@ public static class Impl { private final int retryTimesMax; private final long retryWaitMillis; + private final long retryMaxWaitMillis; private final long checkAndTakeSnapshotInterval; private final long raftLogSizeMaxThreshold; @@ -917,11 +890,13 @@ public static class Impl { public Impl( int retryTimesMax, long retryWaitMillis, + long retryMaxWaitMillis, long checkAndTakeSnapshotInterval, long raftLogSizeMaxThreshold, long forceSnapshotInterval) { this.retryTimesMax = retryTimesMax; this.retryWaitMillis = retryWaitMillis; + this.retryMaxWaitMillis = retryMaxWaitMillis; this.checkAndTakeSnapshotInterval = checkAndTakeSnapshotInterval; this.raftLogSizeMaxThreshold = raftLogSizeMaxThreshold; this.forceSnapshotInterval = forceSnapshotInterval; @@ -947,14 +922,19 @@ public long getForceSnapshotInterval() { return forceSnapshotInterval; } + public long getRetryMaxWaitMillis() { + return retryMaxWaitMillis; + } + public static Impl.Builder newBuilder() { return new Builder(); } public static class Builder { - private int retryTimesMax = 3; - private long retryWaitMillis = 500; + private int retryTimesMax = 10; + private long retryWaitMillis = 100; + private long retryMaxWaitMillis = 5000; // 120s private long checkAndTakeSnapshotInterval = 120; @@ -967,6 +947,7 @@ public Impl build() { return new Impl( retryTimesMax, retryWaitMillis, + retryMaxWaitMillis, checkAndTakeSnapshotInterval, raftLogSizeMaxThreshold, forceSnapshotInterval); @@ -996,6 +977,11 @@ public Impl.Builder setForceSnapshotInterval(long forceSnapshotInterval) { this.forceSnapshotInterval = forceSnapshotInterval; return this; } + + public Impl.Builder setRetryMaxWaitMillis(long retryMaxWaitTimeMillis) { + this.retryMaxWaitMillis = retryMaxWaitTimeMillis; + return this; + } } } diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/IoTConsensus.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/IoTConsensus.java index b659db9e3db10..d15d6e365a77f 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/IoTConsensus.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/IoTConsensus.java @@ -61,6 +61,7 @@ import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.TSStatusCode; +import com.google.common.collect.ImmutableList; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -70,14 +71,18 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.TreeSet; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Future; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiConsumer; +import java.util.stream.Collectors; public class IoTConsensus implements IConsensus { @@ -97,6 +102,7 @@ public class IoTConsensus implements IConsensus { private final IClientManager syncClientManager; private final ScheduledExecutorService backgroundTaskService; private Future updateReaderFuture; + private Map> correctPeerListBeforeStart = null; public IoTConsensus(ConsensusConfig config, Registry registry) { this.thisNode = config.getThisNodeEndPoint(); @@ -169,17 +175,39 @@ private void initAndRecover() throws IOException { new IoTConsensusServerImpl( path.toString(), new Peer(consensusGroupId, thisNodeId, thisNode), - new ArrayList<>(), + new TreeSet<>(), registry.apply(consensusGroupId), backgroundTaskService, clientManager, syncClientManager, config); stateMachineMap.put(consensusGroupId, consensus); - consensus.start(); } } } + if (correctPeerListBeforeStart != null) { + BiConsumer> resetPeerListWithoutThrow = + (consensusGroupId, peers) -> { + try { + resetPeerListImpl(consensusGroupId, peers, false); + } catch (ConsensusGroupNotExistException ignore) { + + } catch (Exception e) { + logger.warn("Failed to reset peer list while start", e); + } + }; + // make peers which are in list correct + correctPeerListBeforeStart.forEach(resetPeerListWithoutThrow); + // clear peers which are not in the list + stateMachineMap.keySet().stream() + .filter(consensusGroupId -> !correctPeerListBeforeStart.containsKey(consensusGroupId)) + // copy to a new list to avoid concurrent modification + .collect(Collectors.toList()) + .forEach( + consensusGroupId -> + resetPeerListWithoutThrow.accept(consensusGroupId, Collections.emptyList())); + } + stateMachineMap.values().forEach(IoTConsensusServerImpl::start); } @Override @@ -207,9 +235,11 @@ public TSStatus write(ConsensusGroupId groupId, IConsensusRequest request) if (impl.isReadOnly()) { return StatusUtils.getStatus(TSStatusCode.SYSTEM_READ_ONLY); } else if (!impl.isActive()) { - return RpcUtils.getStatus( - TSStatusCode.WRITE_PROCESS_REJECT, - "peer is inactive and not ready to receive sync log request."); + String message = + String.format( + "Peer is inactive and not ready to write request, %s, DataNode Id: %s", + groupId.toString(), impl.getThisNode().getNodeId()); + return RpcUtils.getStatus(TSStatusCode.WRITE_PROCESS_REJECT, message); } else { return impl.write(request); } @@ -252,7 +282,7 @@ public void createLocalPeer(ConsensusGroupId groupId, List peers) new IoTConsensusServerImpl( path, new Peer(groupId, thisNodeId, thisNode), - peers, + new TreeSet<>(peers), registry.apply(groupId), backgroundTaskService, clientManager, @@ -294,54 +324,55 @@ public void addRemotePeer(ConsensusGroupId groupId, Peer peer) throws ConsensusE IoTConsensusServerImpl impl = Optional.ofNullable(stateMachineMap.get(groupId)) .orElseThrow(() -> new ConsensusGroupNotExistException(groupId)); - if (impl.getConfiguration().contains(peer)) { - throw new PeerAlreadyInConsensusGroupException(groupId, peer); - } - try { - // step 1: inactive new Peer to prepare for following steps - logger.info("[IoTConsensus] inactivate new peer: {}", peer); - impl.inactivePeer(peer, false); + synchronized (impl) { + if (impl.getConfiguration().contains(peer)) { + throw new PeerAlreadyInConsensusGroupException(groupId, peer); + } + try { + // step 1: inactive new Peer to prepare for following steps + logger.info("[IoTConsensus] inactivate new peer: {}", peer); + impl.inactivatePeer(peer, false); - // step 2: take snapshot - logger.info("[IoTConsensus] start to take snapshot..."); - impl.checkAndLockSafeDeletedSearchIndex(); - impl.takeSnapshot(); + // step 2: notify all the other Peers to build the sync connection to newPeer + logger.info("[IoTConsensus] notify current peers to build sync log..."); + impl.notifyPeersToBuildSyncLogChannel(peer); - // step 3: transit snapshot - logger.info("[IoTConsensus] start to transmit snapshot..."); - impl.transmitSnapshot(peer); + // step 3: take snapshot + logger.info("[IoTConsensus] start to take snapshot..."); - // step 4: let the new peer load snapshot - logger.info("[IoTConsensus] trigger new peer to load snapshot..."); - impl.triggerSnapshotLoad(peer); - KillPoint.setKillPoint(DataNodeKillPoints.COORDINATOR_ADD_PEER_TRANSITION); + impl.takeSnapshot(); - // step 5: notify all the other Peers to build the sync connection to newPeer - logger.info("[IoTConsensus] notify current peers to build sync log..."); - impl.notifyPeersToBuildSyncLogChannel(peer); + // step 4: transit snapshot + logger.info("[IoTConsensus] start to transmit snapshot..."); + impl.transmitSnapshot(peer); - // step 6: active new Peer - logger.info("[IoTConsensus] activate new peer..."); - impl.activePeer(peer); + // step 5: let the new peer load snapshot + logger.info("[IoTConsensus] trigger new peer to load snapshot..."); + impl.triggerSnapshotLoad(peer); + KillPoint.setKillPoint(DataNodeKillPoints.COORDINATOR_ADD_PEER_TRANSITION); + + // step 6: active new Peer + logger.info("[IoTConsensus] activate new peer..."); + impl.activePeer(peer); + + // step 7: notify remote peer to clean up transferred snapshot + logger.info("[IoTConsensus] clean up remote snapshot..."); + try { + impl.cleanupRemoteSnapshot(peer); + } catch (ConsensusGroupModifyPeerException e) { + logger.warn("[IoTConsensus] failed to cleanup remote snapshot", e); + } + KillPoint.setKillPoint(DataNodeKillPoints.COORDINATOR_ADD_PEER_DONE); - // step 7: notify remote peer to clean up transferred snapshot - logger.info("[IoTConsensus] clean up remote snapshot..."); - try { - impl.cleanupRemoteSnapshot(peer); } catch (ConsensusGroupModifyPeerException e) { - logger.warn("[IoTConsensus] failed to cleanup remote snapshot", e); + logger.info("[IoTConsensus] add remote peer failed, automatic cleanup side effects..."); + // try to clean up the sync log channel + impl.notifyPeersToRemoveSyncLogChannel(peer); + throw new ConsensusException(e); + } finally { + logger.info("[IoTConsensus] clean up local snapshot..."); + impl.cleanupLocalSnapshot(); } - KillPoint.setKillPoint(DataNodeKillPoints.COORDINATOR_ADD_PEER_DONE); - - } catch (ConsensusGroupModifyPeerException e) { - logger.info("[IoTConsensus] add remote peer failed, automatic cleanup side effects..."); - // try to clean up the sync log channel - impl.notifyPeersToRemoveSyncLogChannel(peer); - throw new ConsensusException(e); - } finally { - impl.checkAndUnlockSafeDeletedSearchIndex(); - logger.info("[IoTConsensus] clean up local snapshot..."); - impl.cleanupLocalSnapshot(); } } @@ -351,27 +382,32 @@ public void removeRemotePeer(ConsensusGroupId groupId, Peer peer) throws Consens Optional.ofNullable(stateMachineMap.get(groupId)) .orElseThrow(() -> new ConsensusGroupNotExistException(groupId)); - if (!impl.getConfiguration().contains(peer)) { - throw new PeerNotInConsensusGroupException(groupId, peer.toString()); - } + synchronized (impl) { + if (!impl.getConfiguration().contains(peer)) { + throw new PeerNotInConsensusGroupException(groupId, peer.toString()); + } - KillPoint.setKillPoint(IoTConsensusRemovePeerCoordinatorKillPoints.INIT); + KillPoint.setKillPoint(IoTConsensusRemovePeerCoordinatorKillPoints.INIT); - // let other peers remove the sync channel with target peer - impl.notifyPeersToRemoveSyncLogChannel(peer); - KillPoint.setKillPoint( - IoTConsensusRemovePeerCoordinatorKillPoints.AFTER_NOTIFY_PEERS_TO_REMOVE_SYNC_LOG_CHANNEL); + // let other peers remove the sync channel with target peer + impl.notifyPeersToRemoveSyncLogChannel(peer); + KillPoint.setKillPoint( + IoTConsensusRemovePeerCoordinatorKillPoints + .AFTER_NOTIFY_PEERS_TO_REMOVE_SYNC_LOG_CHANNEL); - try { - // let target peer reject new write - impl.inactivePeer(peer, true); - KillPoint.setKillPoint(IoTConsensusRemovePeerCoordinatorKillPoints.AFTER_INACTIVE_PEER); - // wait its SyncLog to complete - impl.waitTargetPeerUntilSyncLogCompleted(peer); - } catch (ConsensusGroupModifyPeerException e) { - throw new ConsensusException(e.getMessage()); + try { + // let target peer reject new write + impl.inactivatePeer(peer, true); + KillPoint.setKillPoint(IoTConsensusRemovePeerCoordinatorKillPoints.AFTER_INACTIVE_PEER); + // wait its SyncLog to complete + impl.waitTargetPeerUntilSyncLogCompleted(peer); + // wait its region related resource to release + impl.waitReleaseAllRegionRelatedResource(peer); + } catch (ConsensusGroupModifyPeerException e) { + throw new ConsensusException(e.getMessage()); + } + KillPoint.setKillPoint(IoTConsensusRemovePeerCoordinatorKillPoints.FINISH); } - KillPoint.setKillPoint(IoTConsensusRemovePeerCoordinatorKillPoints.FINISH); } @Override @@ -427,33 +463,6 @@ public List getAllConsensusGroupIds() { return new ArrayList<>(stateMachineMap.keySet()); } - @Override - public List getAllConsensusGroupIdsWithoutStarting() { - return getConsensusGroupIdsFromDir(storageDir, logger); - } - - public static List getConsensusGroupIdsFromDir(File storageDir, Logger logger) { - List consensusGroupIds = new ArrayList<>(); - try (DirectoryStream stream = Files.newDirectoryStream(storageDir.toPath())) { - for (Path path : stream) { - try { - String[] items = path.getFileName().toString().split("_"); - ConsensusGroupId consensusGroupId = - ConsensusGroupId.Factory.create( - Integer.parseInt(items[0]), Integer.parseInt(items[1])); - consensusGroupIds.add(consensusGroupId); - } catch (Exception e) { - logger.info( - "The directory {} is not a group directory;" + " ignoring it. ", - path.getFileName().toString()); - } - } - } catch (IOException e) { - logger.error("Failed to get all consensus group ids from disk", e); - } - return consensusGroupIds; - } - @Override public String getRegionDirFromConsensusGroupId(ConsensusGroupId groupId) { return buildPeerDir(storageDir, groupId); @@ -472,38 +481,69 @@ public void reloadConsensusConfig(ConsensusConfig consensusConfig) { .init(config.getReplication().getRegionMigrationSpeedLimitBytesPerSecond()); } + @Override + public void recordCorrectPeerListBeforeStarting( + Map> correctPeerList) { + logger.info("Record correct peer list: {}", correctPeerList); + this.correctPeerListBeforeStart = correctPeerList; + } + @Override public void resetPeerList(ConsensusGroupId groupId, List correctPeers) throws ConsensusException { + resetPeerListImpl(groupId, correctPeers, true); + } + + private void resetPeerListImpl( + ConsensusGroupId groupId, List correctPeers, boolean startNow) + throws ConsensusException { IoTConsensusServerImpl impl = Optional.ofNullable(stateMachineMap.get(groupId)) .orElseThrow(() -> new ConsensusGroupNotExistException(groupId)); + Peer localPeer = new Peer(groupId, thisNodeId, thisNode); if (!correctPeers.contains(localPeer)) { - logger.warn( - "[RESET PEER LIST] Local peer is not in the correct configuration, delete local peer {}", + logger.info( + "[RESET PEER LIST] {} Local peer is not in the correct configuration, delete it.", groupId); deleteLocalPeer(groupId); return; } - String previousPeerListStr = impl.getConfiguration().toString(); - for (Peer peer : impl.getConfiguration()) { - if (!correctPeers.contains(peer)) { - if (!impl.removeSyncLogChannel(peer)) { - logger.error( - "[RESET PEER LIST] Failed to remove peer {}'s sync log channel from group {}", - peer, - groupId); + + synchronized (impl) { + // remove invalid peer + ImmutableList currentMembers = ImmutableList.copyOf(impl.getConfiguration()); + String previousPeerListStr = currentMembers.toString(); + for (Peer peer : currentMembers) { + if (!correctPeers.contains(peer)) { + if (!impl.removeSyncLogChannel(peer)) { + logger.error( + "[RESET PEER LIST] {} Failed to remove sync channel with: {}", groupId, peer); + } else { + logger.info("[RESET PEER LIST] {} Remove sync channel with: {}", groupId, peer); + } } } - } - logger.info( - "[RESET PEER LIST] Local peer list has been reset: {} -> {}", - previousPeerListStr, - impl.getConfiguration()); - for (Peer peer : correctPeers) { - if (!impl.getConfiguration().contains(peer)) { - logger.warn("[RESET PEER LIST] \"Correct peer\" {} is not in local peer list", peer); + // add correct peer + for (Peer peer : correctPeers) { + if (!impl.getConfiguration().contains(peer)) { + impl.buildSyncLogChannel(peer, startNow); + logger.info("[RESET PEER LIST] {} Build sync channel with: {}", groupId, peer); + } + } + // show result + String newPeerListStr = impl.getConfiguration().toString(); + if (!previousPeerListStr.equals(newPeerListStr)) { + logger.info( + "[RESET PEER LIST] {} Local peer list has been reset: {} -> {}", + groupId, + previousPeerListStr, + newPeerListStr); + } else { + logger.info( + "[RESET PEER LIST] {} The current peer list is correct, nothing need to be reset: {}", + groupId, + previousPeerListStr); } } } diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/IoTConsensusServerImpl.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/IoTConsensusServerImpl.java index ef35ae62c993b..3002b018e3ea8 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/IoTConsensusServerImpl.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/IoTConsensusServerImpl.java @@ -23,6 +23,7 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.client.IClientManager; import org.apache.iotdb.commons.client.exception.ClientManagerException; +import org.apache.iotdb.commons.consensus.ConsensusGroupId; import org.apache.iotdb.commons.consensus.index.ComparableConsensusRequest; import org.apache.iotdb.commons.consensus.index.impl.IoTProgressIndex; import org.apache.iotdb.commons.service.metric.MetricService; @@ -59,6 +60,8 @@ import org.apache.iotdb.consensus.iot.thrift.TSendSnapshotFragmentRes; import org.apache.iotdb.consensus.iot.thrift.TTriggerSnapshotLoadReq; import org.apache.iotdb.consensus.iot.thrift.TTriggerSnapshotLoadRes; +import org.apache.iotdb.consensus.iot.thrift.TWaitReleaseAllRegionRelatedResourceReq; +import org.apache.iotdb.consensus.iot.thrift.TWaitReleaseAllRegionRelatedResourceRes; import org.apache.iotdb.consensus.iot.thrift.TWaitSyncLogCompleteReq; import org.apache.iotdb.consensus.iot.thrift.TWaitSyncLogCompleteRes; import org.apache.iotdb.rpc.RpcUtils; @@ -70,11 +73,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.DataOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; -import java.io.UncheckedIOException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.nio.file.Files; @@ -82,12 +83,12 @@ import java.nio.file.Paths; import java.util.ArrayList; import java.util.Collections; -import java.util.HashSet; -import java.util.Iterator; import java.util.LinkedList; import java.util.List; +import java.util.Objects; import java.util.PriorityQueue; -import java.util.Set; +import java.util.TreeSet; +import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -96,15 +97,11 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.regex.Pattern; -import java.util.stream.Collectors; -import java.util.stream.Stream; import static org.apache.iotdb.commons.utils.FileUtils.humanReadableByteCountSI; public class IoTConsensusServerImpl { - private static final String CONFIGURATION_FILE_NAME = "configuration.dat"; - private static final String CONFIGURATION_TMP_FILE_NAME = "configuration.dat.tmp"; public static final String SNAPSHOT_DIR_NAME = "snapshot"; private static final Pattern SNAPSHOT_INDEX_PATTEN = Pattern.compile(".*[^\\d](?=(\\d+))"); private static final PerformanceOverviewMetrics PERFORMANCE_OVERVIEW_METRICS = @@ -116,7 +113,7 @@ public class IoTConsensusServerImpl { private final Lock stateMachineLock = new ReentrantLock(); private final Condition stateMachineCondition = stateMachineLock.newCondition(); private final String storageDir; - private final List configuration; + private final TreeSet configuration; private final AtomicLong searchIndex; private final LogDispatcher logDispatcher; private IoTConsensusConfig config; @@ -129,13 +126,11 @@ public class IoTConsensusServerImpl { private final ScheduledExecutorService backgroundTaskService; private final IoTConsensusRateLimiter ioTConsensusRateLimiter = IoTConsensusRateLimiter.getInstance(); - private volatile long lastPinnedSearchIndexForMigration = -1; - private volatile long lastPinnedSafeDeletedIndexForMigration = -1; public IoTConsensusServerImpl( String storageDir, Peer thisNode, - List configuration, + TreeSet configuration, IStateMachine stateMachine, ScheduledExecutorService backgroundTaskService, IClientManager clientManager, @@ -148,25 +143,14 @@ public IoTConsensusServerImpl( this.cacheQueueMap = new ConcurrentHashMap<>(); this.syncClientManager = syncClientManager; this.configuration = configuration; - if (configuration.isEmpty()) { - recoverConfiguration(); - } else { - persistConfiguration(); - } this.backgroundTaskService = backgroundTaskService; this.config = config; this.consensusGroupId = thisNode.getGroupId().toString(); - consensusReqReader = (ConsensusReqReader) stateMachine.read(new GetConsensusReqReaderPlan()); + this.consensusReqReader = + (ConsensusReqReader) stateMachine.read(new GetConsensusReqReaderPlan()); this.searchIndex = new AtomicLong(consensusReqReader.getCurrentSearchIndex()); this.ioTConsensusServerMetrics = new IoTConsensusServerMetrics(this); this.logDispatcher = new LogDispatcher(this, clientManager); - // Since the underlying wal does not persist safelyDeletedSearchIndex, IoTConsensus needs to - // update wal with its syncIndex recovered from the consensus layer when initializing. - // This prevents wal from being piled up if the safelyDeletedSearchIndex is not updated after - // the restart and Leader migration occurs - checkAndUpdateSafeDeletedSearchIndex(); - // see message in logs for details - checkAndUpdateSearchIndex(); } public IStateMachine getStateMachine() { @@ -174,6 +158,7 @@ public IStateMachine getStateMachine() { } public void start() { + checkAndUpdateIndex(); MetricService.getInstance().addMetricSet(this.ioTConsensusServerMetrics); stateMachine.start(); logDispatcher.start(); @@ -275,10 +260,9 @@ public DataSet read(IConsensusRequest request) { public void takeSnapshot() throws ConsensusGroupModifyPeerException { try { - long newSnapshotIndex = getLatestSnapshotIndex() + 1; newSnapshotDirName = String.format( - "%s_%s_%d", SNAPSHOT_DIR_NAME, thisNode.getGroupId().getId(), newSnapshotIndex); + "%s_%s_%s", SNAPSHOT_DIR_NAME, thisNode.getGroupId().getId(), UUID.randomUUID()); File snapshotDir = new File(storageDir, newSnapshotDirName); if (snapshotDir.exists()) { FileUtils.deleteDirectory(snapshotDir); @@ -370,18 +354,18 @@ public void transmitSnapshot(Peer targetPeer) throws ConsensusGroupModifyPeerExc } public void receiveSnapshotFragment( - String snapshotId, String originalFilePath, ByteBuffer fileChunk) + String snapshotId, String originalFilePath, ByteBuffer fileChunk, long fileOffset) throws ConsensusGroupModifyPeerException { try { String targetFilePath = calculateSnapshotPath(snapshotId, originalFilePath); - File targetFile = new File(storageDir, targetFilePath); + File targetFile = getSnapshotPath(targetFilePath); Path parentDir = Paths.get(targetFile.getParent()); if (!Files.exists(parentDir)) { Files.createDirectories(parentDir); } try (FileOutputStream fos = new FileOutputStream(targetFile.getAbsolutePath(), true); FileChannel channel = fos.getChannel()) { - channel.write(fileChunk.slice()); + channel.write(fileChunk.slice(), fileOffset); } } catch (IOException e) { throw new ConsensusGroupModifyPeerException( @@ -399,22 +383,6 @@ private String calculateSnapshotPath(String snapshotId, String originalFilePath) return originalFilePath.substring(originalFilePath.indexOf(snapshotId)); } - private long getLatestSnapshotIndex() { - long snapShotIndex = 0; - File directory = new File(storageDir); - File[] versionFiles = directory.listFiles((dir, name) -> name.startsWith(SNAPSHOT_DIR_NAME)); - if (versionFiles == null || versionFiles.length == 0) { - return snapShotIndex; - } - for (File file : versionFiles) { - snapShotIndex = - Math.max( - snapShotIndex, - Long.parseLong(SNAPSHOT_INDEX_PATTEN.matcher(file.getName()).replaceAll(""))); - } - return snapShotIndex; - } - private void clearOldSnapshot() { File directory = new File(storageDir); File[] versionFiles = directory.listFiles((dir, name) -> name.startsWith(SNAPSHOT_DIR_NAME)); @@ -437,7 +405,23 @@ private void clearOldSnapshot() { public void loadSnapshot(String snapshotId) { // TODO: (xingtanzjr) throw exception if the snapshot load failed - stateMachine.loadSnapshot(new File(storageDir, snapshotId)); + stateMachine.loadSnapshot(getSnapshotPath(snapshotId)); + } + + private File getSnapshotPath(String snapshotRelativePath) { + File storageDirFile = new File(storageDir); + File snapshotDir = new File(storageDir, snapshotRelativePath); + try { + if (!snapshotDir + .getCanonicalFile() + .toPath() + .startsWith(storageDirFile.getCanonicalFile().toPath())) { + throw new IllegalArgumentException("Invalid snapshotRelativePath: " + snapshotRelativePath); + } + } catch (IOException e) { + throw new IllegalArgumentException(e); + } + return snapshotDir; } @FunctionalInterface @@ -445,26 +429,38 @@ public interface ThrowableFunction { R apply(T t) throws Exception; } - public void inactivePeer(Peer peer, boolean forDeletionPurpose) + public void inactivatePeer(Peer peer, boolean forDeletionPurpose) throws ConsensusGroupModifyPeerException { - try (SyncIoTConsensusServiceClient client = - syncClientManager.borrowClient(peer.getEndpoint())) { - try { - TInactivatePeerRes res = - client.inactivatePeer( - new TInactivatePeerReq(peer.getGroupId().convertToTConsensusGroupId()) - .setForDeletionPurpose(forDeletionPurpose)); - if (!isSuccess(res.status)) { - throw new ConsensusGroupModifyPeerException( - String.format("error when inactivating %s. %s", peer, res.getStatus())); + ConsensusGroupModifyPeerException lastException = null; + // In region migration, if the target node restarts before the “addRegionPeer” phase within 1 + // minutes, + // the client in the ClientManager will become invalid. + // This PR adds 1 retry at this point to ensure that region migration can still proceed + // correctly in such cases. + for (int i = 0; i < 2; i++) { + try (SyncIoTConsensusServiceClient client = + syncClientManager.borrowClient(peer.getEndpoint())) { + try { + TInactivatePeerRes res = + client.inactivatePeer( + new TInactivatePeerReq(peer.getGroupId().convertToTConsensusGroupId()) + .setForDeletionPurpose(forDeletionPurpose)); + if (isSuccess(res.status)) { + return; + } + lastException = + new ConsensusGroupModifyPeerException( + String.format("error when inactivating %s. %s", peer, res.getStatus())); + } catch (Exception e) { + lastException = + new ConsensusGroupModifyPeerException( + String.format("error when inactivating %s", peer), e); } - } catch (Exception e) { - throw new ConsensusGroupModifyPeerException( - String.format("error when inactivating %s", peer), e); + } catch (ClientManagerException e) { + lastException = new ConsensusGroupModifyPeerException(e); } - } catch (ClientManagerException e) { - throw new ConsensusGroupModifyPeerException(e); } + throw lastException; } public void triggerSnapshotLoad(Peer peer) throws ConsensusGroupModifyPeerException { @@ -513,7 +509,7 @@ public void notifyPeersToBuildSyncLogChannel(Peer targetPeer) if (peer.equals(thisNode)) { // use searchIndex for thisNode as the initialSyncIndex because targetPeer will load the // snapshot produced by thisNode - buildSyncLogChannel(targetPeer, lastPinnedSearchIndexForMigration); + buildSyncLogChannel(targetPeer, true); } else { // use RPC to tell other peers to build sync log channel to target peer try (SyncIoTConsensusServiceClient client = @@ -619,33 +615,64 @@ public void waitTargetPeerUntilSyncLogCompleted(Peer targetPeer) } } + public boolean hasReleaseAllRegionRelatedResource(ConsensusGroupId groupId) { + return stateMachine.hasReleaseAllRegionRelatedResource(groupId); + } + + public void waitReleaseAllRegionRelatedResource(Peer targetPeer) + throws ConsensusGroupModifyPeerException { + long checkIntervalInMs = 10_000L; + try (SyncIoTConsensusServiceClient client = + syncClientManager.borrowClient(targetPeer.getEndpoint())) { + while (true) { + TWaitReleaseAllRegionRelatedResourceRes res = + client.waitReleaseAllRegionRelatedResource( + new TWaitReleaseAllRegionRelatedResourceReq( + targetPeer.getGroupId().convertToTConsensusGroupId())); + if (res.releaseAllResource) { + logger.info("[WAIT RELEASE] {} has released all region related resource", targetPeer); + return; + } + logger.info("[WAIT RELEASE] {} is still releasing all region related resource", targetPeer); + Thread.sleep(checkIntervalInMs); + } + } catch (ClientManagerException | TException e) { + throw new ConsensusGroupModifyPeerException( + String.format( + "error when waiting %s to release all region related resource. %s", + targetPeer, e.getMessage()), + e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new ConsensusGroupModifyPeerException( + String.format( + "thread interrupted when waiting %s to release all region related resource. %s", + targetPeer, e.getMessage()), + e); + } + } + private boolean isSuccess(TSStatus status) { return status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode(); } - /** - * build SyncLog channel with safeIndex as the default initial sync index. - * - * @throws ConsensusGroupModifyPeerException - */ - public void buildSyncLogChannel(Peer targetPeer) throws ConsensusGroupModifyPeerException { - buildSyncLogChannel(targetPeer, getMinSyncIndex()); + /** build SyncLog channel with safeIndex as the default initial sync index. */ + public void buildSyncLogChannel(Peer targetPeer, boolean startNow) { + buildSyncLogChannel(targetPeer, getMinSyncIndex(), startNow); } - public void buildSyncLogChannel(Peer targetPeer, long initialSyncIndex) - throws ConsensusGroupModifyPeerException { + public void buildSyncLogChannel(Peer targetPeer, long initialSyncIndex, boolean startNow) { KillPoint.setKillPoint(DataNodeKillPoints.ORIGINAL_ADD_PEER_DONE); - // step 1, build sync channel in LogDispatcher + configuration.add(targetPeer); + if (Objects.equals(targetPeer, thisNode)) { + return; + } + logDispatcher.addLogDispatcherThread(targetPeer, initialSyncIndex, startNow); logger.info( - "[IoTConsensus] build sync log channel to {} with initialSyncIndex {}", + "[IoTConsensus] Successfully build sync log channel to {} with initialSyncIndex {}. {}", targetPeer, - initialSyncIndex); - logDispatcher.addLogDispatcherThread(targetPeer, initialSyncIndex); - // step 2, update configuration - configuration.add(targetPeer); - // step 3, persist configuration - persistConfiguration(); - logger.info("[IoTConsensus] persist new configuration: {}", configuration); + initialSyncIndex, + startNow ? "Sync log channel has started." : "Sync log channel maybe start later."); } /** @@ -657,6 +684,7 @@ public boolean removeSyncLogChannel(Peer targetPeer) { String suggestion = ""; try { logDispatcher.removeLogDispatcherThread(targetPeer); + logger.info("[IoTConsensus] log dispatcher to {} removed and cleanup", targetPeer); } catch (Exception e) { logger.warn( "[IoTConsensus] Exception happened during removing log dispatcher thread, but configuration.dat will still be removed.", @@ -671,92 +699,13 @@ public boolean removeSyncLogChannel(Peer targetPeer) { // step 2, update configuration configuration.remove(targetPeer); checkAndUpdateSafeDeletedSearchIndex(); - // step 3, persist configuration - persistConfiguration(); - logger.info("[IoTConsensus] Configuration updated to {}. {}", this.configuration, suggestion); + logger.info( + "[IoTConsensus Configuration] Configuration updated to {}. {}", + this.configuration, + suggestion); return !exceptionHappened; } - public void persistConfiguration() { - try { - removeDuplicateConfiguration(); - renameTmpConfigurationFileToRemoveSuffix(); - serializeConfigurationAndFsyncToDisk(); - deleteConfiguration(); - renameTmpConfigurationFileToRemoveSuffix(); - } catch (IOException e) { - // TODO: (xingtanzjr) need to handle the IOException because the IoTConsensus won't - // work expectedly - // if the exception occurs - logger.error("Unexpected error occurs when persisting configuration", e); - } - } - - public void recoverConfiguration() { - try { - Path tmpConfigurationPath = - Paths.get(new File(storageDir, CONFIGURATION_TMP_FILE_NAME).getAbsolutePath()); - Path configurationPath = - Paths.get(new File(storageDir, CONFIGURATION_FILE_NAME).getAbsolutePath()); - // If the tmpConfigurationPath exists, it means the `persistConfigurationUpdate` is - // interrupted - // unexpectedly, we need substitute configuration with tmpConfiguration file - if (Files.exists(tmpConfigurationPath)) { - Files.deleteIfExists(configurationPath); - Files.move(tmpConfigurationPath, configurationPath); - } - if (Files.exists(configurationPath)) { - recoverFromOldConfigurationFile(configurationPath); - } else { - // recover from split configuration file - Path dirPath = Paths.get(storageDir); - List tmpPeerList = getConfiguration(dirPath, CONFIGURATION_TMP_FILE_NAME); - configuration.addAll(tmpPeerList); - List peerList = getConfiguration(dirPath, CONFIGURATION_FILE_NAME); - for (Peer peer : peerList) { - if (!configuration.contains(peer)) { - configuration.add(peer); - } - } - persistConfiguration(); - } - logger.info("Recover IoTConsensus server Impl, configuration: {}", configuration); - } catch (IOException e) { - logger.error("Unexpected error occurs when recovering configuration", e); - } - } - - // @Compatibility - private void recoverFromOldConfigurationFile(Path oldConfigurationPath) throws IOException { - // recover from old configuration file - ByteBuffer buffer = ByteBuffer.wrap(Files.readAllBytes(oldConfigurationPath)); - int size = buffer.getInt(); - for (int i = 0; i < size; i++) { - configuration.add(Peer.deserialize(buffer)); - } - persistConfiguration(); - } - - public static String generateConfigurationDatFileName(int nodeId, String suffix) { - return nodeId + "_" + suffix; - } - - private List getConfiguration(Path dirPath, String configurationFileName) - throws IOException { - ByteBuffer buffer; - List tmpConfiguration = new ArrayList<>(); - Path[] files = - Files.walk(dirPath) - .filter(Files::isRegularFile) - .filter(filePath -> filePath.getFileName().toString().contains(configurationFileName)) - .toArray(Path[]::new); - for (Path file : files) { - buffer = ByteBuffer.wrap(Files.readAllBytes(file)); - tmpConfiguration.add(Peer.deserialize(buffer)); - } - return tmpConfiguration; - } - public IndexedConsensusRequest buildIndexedConsensusRequestForLocalRequest( IConsensusRequest request) { if (request instanceof ComparableConsensusRequest) { @@ -782,9 +731,7 @@ public long getMinSyncIndex() { } public long getMinFlushedSyncIndex() { - return lastPinnedSafeDeletedIndexForMigration == -1 - ? logDispatcher.getMinFlushedSyncIndex().orElseGet(searchIndex::get) - : lastPinnedSafeDeletedIndexForMigration; + return logDispatcher.getMinFlushedSyncIndex().orElseGet(searchIndex::get); } public String getStorageDir() { @@ -796,7 +743,7 @@ public Peer getThisNode() { } public List getConfiguration() { - return configuration; + return new ArrayList<>(configuration); } public long getSearchIndex() { @@ -885,7 +832,7 @@ public void cleanupRemoteSnapshot(Peer targetPeer) throws ConsensusGroupModifyPe } public void cleanupSnapshot(String snapshotId) throws ConsensusGroupModifyPeerException { - File snapshotDir = new File(storageDir, snapshotId); + File snapshotDir = getSnapshotPath(snapshotId); if (snapshotDir.exists()) { try { FileUtils.deleteDirectory(snapshotDir); @@ -907,31 +854,25 @@ public void cleanupLocalSnapshot() { } } - /** - * We should set safelyDeletedSearchIndex to searchIndex before addPeer to avoid potential data - * lost. - */ - public void checkAndLockSafeDeletedSearchIndex() { - lastPinnedSearchIndexForMigration = searchIndex.get(); - lastPinnedSafeDeletedIndexForMigration = getMinFlushedSyncIndex(); - consensusReqReader.setSafelyDeletedSearchIndex(getMinFlushedSyncIndex()); - } - - /** - * We should unlock safelyDeletedSearchIndex after addPeer to avoid potential data accumulation. - */ - public void checkAndUnlockSafeDeletedSearchIndex() { - lastPinnedSearchIndexForMigration = -1; - lastPinnedSafeDeletedIndexForMigration = -1; + void checkAndUpdateIndex() { + // Since the underlying wal does not persist safelyDeletedSearchIndex, IoTConsensus needs to + // update wal with its syncIndex recovered from the consensus layer when initializing. + // This prevents wal from being piled up if the safelyDeletedSearchIndex is not updated after + // the restart and Leader migration occurs checkAndUpdateSafeDeletedSearchIndex(); + // see message in logs for details + checkAndUpdateSearchIndex(); } /** - * If there is only one replica, set it to Long.MAX_VALUE.、 If there are multiple replicas, get - * the latest SafelyDeletedSearchIndex again. This enables wal to be deleted in a timely manner. + * If there is only one replica, set it to Long.MAX_VALUE. If there are multiple replicas, get the + * latest SafelyDeletedSearchIndex again. This enables wal to be deleted in a timely manner. */ - public void checkAndUpdateSafeDeletedSearchIndex() { - if (configuration.size() == 1) { + void checkAndUpdateSafeDeletedSearchIndex() { + if (configuration.isEmpty()) { + logger.error( + "Configuration is empty, which is unexpected. Safe deleted search index won't be updated this time."); + } else if (configuration.size() == 1) { consensusReqReader.setSafelyDeletedSearchIndex(Long.MAX_VALUE); } else { consensusReqReader.setSafelyDeletedSearchIndex(getMinFlushedSyncIndex()); @@ -967,87 +908,6 @@ public String getConsensusGroupId() { return consensusGroupId; } - private void serializeConfigurationAndFsyncToDisk() throws IOException { - for (Peer peer : configuration) { - String peerConfigurationFileName = - generateConfigurationDatFileName(peer.getNodeId(), CONFIGURATION_TMP_FILE_NAME); - FileOutputStream fileOutputStream = - new FileOutputStream(new File(storageDir, peerConfigurationFileName)); - try (DataOutputStream outputStream = new DataOutputStream(fileOutputStream)) { - peer.serialize(outputStream); - } finally { - try { - fileOutputStream.flush(); - fileOutputStream.getFD().sync(); - } catch (IOException ignore) { - // ignore sync exception - } - } - } - } - - private void renameTmpConfigurationFileToRemoveSuffix() throws IOException { - try (Stream stream = Files.list(Paths.get(storageDir))) { - List paths = - stream - .filter(Files::isRegularFile) - .filter( - filePath -> - filePath.getFileName().toString().endsWith(CONFIGURATION_TMP_FILE_NAME)) - .collect(Collectors.toList()); - for (Path filePath : paths) { - String targetPath = - filePath.toString().replace(CONFIGURATION_TMP_FILE_NAME, CONFIGURATION_FILE_NAME); - File targetFile = new File(targetPath); - if (targetFile.exists()) { - try { - Files.delete(targetFile.toPath()); - } catch (IOException e) { - logger.error("Unexpected error occurs when delete file: {}", targetPath, e); - } - } - if (!filePath.toFile().renameTo(targetFile)) { - logger.error("Unexpected error occurs when rename file: {} -> {}", filePath, targetPath); - } - } - } catch (UncheckedIOException e) { - throw e.getCause(); - } - } - - private void deleteConfiguration() throws IOException { - try (Stream stream = Files.list(Paths.get(storageDir))) { - stream - .filter(Files::isRegularFile) - .filter(filePath -> filePath.getFileName().toString().endsWith(CONFIGURATION_FILE_NAME)) - .forEach( - filePath -> { - try { - Files.delete(filePath); - } catch (IOException e) { - logger.error( - "Unexpected error occurs when deleting old configuration file {}", - filePath, - e); - } - }); - } catch (UncheckedIOException e) { - throw e.getCause(); - } - } - - public void removeDuplicateConfiguration() { - Set seen = new HashSet<>(); - Iterator it = configuration.iterator(); - - while (it.hasNext()) { - Peer peer = it.next(); - if (!seen.add(peer)) { - it.remove(); - } - } - } - /** This method is used for hot reload of IoTConsensusConfig. */ public void reloadConsensusConfig(IoTConsensusConfig config) { this.config = config; @@ -1142,6 +1002,7 @@ private TSStatus cacheAndInsertLatestNode(DeserializedBatchIndexedConsensusReque request.getStartSyncIndex(), e); Thread.currentThread().interrupt(); + break; } } long sortTime = System.nanoTime(); diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/DispatchLogHandler.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/DispatchLogHandler.java index 0e03b97a111da..9b0979bf6be24 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/DispatchLogHandler.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/DispatchLogHandler.java @@ -74,6 +74,19 @@ public void onComplete(TSyncLogEntriesRes response) { messages); sleepCorrespondingTimeAndRetryAsynchronous(); } else { + if (logger.isDebugEnabled()) { + boolean containsError = + response.getStatuses().stream() + .anyMatch( + status -> status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()); + if (containsError) { + logger.debug( + "Send {} to peer {} complete but contains unsuccessful status: {}", + batch, + thread.getPeer(), + response.getStatuses()); + } + } completeBatch(batch); } logDispatcherThreadMetrics.recordSyncLogTimePerRequest(System.nanoTime() - createTime); @@ -88,21 +101,19 @@ public static boolean needRetry(int statusCode) { @Override public void onError(Exception exception) { ++retryCount; - if (logger.isWarnEnabled()) { - Throwable rootCause = ExceptionUtils.getRootCause(exception); - logger.warn( - "Can not send {} to peer for {} times {} because {}", - batch, - thread.getPeer(), - retryCount, - rootCause.toString()); - // skip TApplicationException caused by follower - if (rootCause instanceof TApplicationException) { - completeBatch(batch); - logger.warn("Skip retrying this Batch {} because of TApplicationException.", batch); - logDispatcherThreadMetrics.recordSyncLogTimePerRequest(System.nanoTime() - createTime); - return; - } + Throwable rootCause = ExceptionUtils.getRootCause(exception); + logger.warn( + "Can not send {} to peer for {} times {} because {}", + batch, + thread.getPeer(), + retryCount, + rootCause.toString()); + // skip TApplicationException caused by follower + if (rootCause instanceof TApplicationException) { + completeBatch(batch); + logger.warn("Skip retrying this Batch {} because of TApplicationException.", batch); + logDispatcherThreadMetrics.recordSyncLogTimePerRequest(System.nanoTime() - createTime); + return; } sleepCorrespondingTimeAndRetryAsynchronous(); } diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/IoTConsensusClientPool.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/IoTConsensusClientPool.java index 4f6c182ab1d80..307ef6eb8034c 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/IoTConsensusClientPool.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/IoTConsensusClientPool.java @@ -29,7 +29,6 @@ import org.apache.iotdb.commons.concurrent.ThreadName; import org.apache.iotdb.consensus.config.IoTConsensusConfig; -import org.apache.commons.pool2.KeyedObjectPool; import org.apache.commons.pool2.impl.GenericKeyedObjectPool; public class IoTConsensusClientPool { @@ -48,7 +47,7 @@ public SyncIoTConsensusServiceClientPoolFactory(IoTConsensusConfig config) { } @Override - public KeyedObjectPool createClientPool( + public GenericKeyedObjectPool createClientPool( ClientManager manager) { GenericKeyedObjectPool clientPool = new GenericKeyedObjectPool<>( @@ -84,7 +83,7 @@ public AsyncIoTConsensusServiceClientPoolFactory(IoTConsensusConfig config) { } @Override - public KeyedObjectPool createClientPool( + public GenericKeyedObjectPool createClientPool( ClientManager manager) { GenericKeyedObjectPool clientPool = new GenericKeyedObjectPool<>( diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/logdispatcher/LogDispatcher.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/logdispatcher/LogDispatcher.java index 9ffb29e77f3ce..2d4e7cd5e011e 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/logdispatcher/LogDispatcher.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/logdispatcher/LogDispatcher.java @@ -46,8 +46,8 @@ import java.util.OptionalLong; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; -import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; @@ -98,13 +98,14 @@ private void initLogSyncThreadPool() { public synchronized void start() { if (!threads.isEmpty()) { - threads.forEach(executorService::submit); + threads.forEach(logDispatcherThread -> executorService.submit(logDispatcherThread)); } } public synchronized void stop() { if (!threads.isEmpty()) { - threads.forEach(LogDispatcherThread::stop); + threads.forEach(LogDispatcherThread::setStopped); + threads.forEach(LogDispatcherThread::processStopped); executorService.shutdownNow(); int timeout = 10; try { @@ -119,7 +120,8 @@ public synchronized void stop() { stopped = true; } - public synchronized void addLogDispatcherThread(Peer peer, long initialSyncIndex) { + public synchronized void addLogDispatcherThread( + Peer peer, long initialSyncIndex, boolean startNow) { if (stopped) { return; } @@ -130,7 +132,9 @@ public synchronized void addLogDispatcherThread(Peer peer, long initialSyncIndex if (this.executorService == null) { initLogSyncThreadPool(); } - executorService.submit(thread); + if (startNow) { + executorService.submit(thread); + } } public synchronized void removeLogDispatcherThread(Peer peer) throws IOException { @@ -228,7 +232,7 @@ public class LogDispatcherThread implements Runnable { private final LogDispatcherThreadMetrics logDispatcherThreadMetrics; - private Semaphore threadSemaphore = new Semaphore(0); + private final CountDownLatch runFinished = new CountDownLatch(1); public LogDispatcherThread(Peer peer, IoTConsensusConfig config, long initialSyncIndex) { this.peer = peer; @@ -276,7 +280,7 @@ public int getBufferRequestSize() { /** try to offer a request into queue with memory control. */ public boolean offer(IndexedConsensusRequest indexedConsensusRequest) { - if (!iotConsensusMemoryManager.reserve(indexedConsensusRequest.getSerializedSize(), true)) { + if (!iotConsensusMemoryManager.reserve(indexedConsensusRequest.getMemorySize(), true)) { return false; } boolean success; @@ -284,25 +288,33 @@ public boolean offer(IndexedConsensusRequest indexedConsensusRequest) { success = pendingEntries.offer(indexedConsensusRequest); } catch (Throwable t) { // If exception occurs during request offer, the reserved memory should be released - iotConsensusMemoryManager.free(indexedConsensusRequest.getSerializedSize(), true); + iotConsensusMemoryManager.free(indexedConsensusRequest.getMemorySize(), true); throw t; } if (!success) { // If offer failed, the reserved memory should be released - iotConsensusMemoryManager.free(indexedConsensusRequest.getSerializedSize(), true); + iotConsensusMemoryManager.free(indexedConsensusRequest.getMemorySize(), true); } return success; } /** try to remove a request from queue with memory control. */ private void releaseReservedMemory(IndexedConsensusRequest indexedConsensusRequest) { - iotConsensusMemoryManager.free(indexedConsensusRequest.getSerializedSize(), true); + iotConsensusMemoryManager.free(indexedConsensusRequest.getMemorySize(), true); } public void stop() { + setStopped(); + processStopped(); + } + + private void setStopped() { stopped = true; + } + + private void processStopped() { try { - if (!threadSemaphore.tryAcquire(30, TimeUnit.SECONDS)) { + if (!runFinished.await(30, TimeUnit.SECONDS)) { logger.info("{}: Dispatcher for {} didn't stop after 30s.", impl.getThisNode(), peer); } } catch (InterruptedException e) { @@ -310,13 +322,13 @@ public void stop() { } long requestSize = 0; for (IndexedConsensusRequest indexedConsensusRequest : pendingEntries) { - requestSize += indexedConsensusRequest.getSerializedSize(); + requestSize += indexedConsensusRequest.getMemorySize(); } pendingEntries.clear(); iotConsensusMemoryManager.free(requestSize, true); requestSize = 0; for (IndexedConsensusRequest indexedConsensusRequest : bufferedEntries) { - requestSize += indexedConsensusRequest.getSerializedSize(); + requestSize += indexedConsensusRequest.getMemorySize(); } iotConsensusMemoryManager.free(requestSize, true); syncStatus.free(); @@ -377,7 +389,7 @@ public void run() { } catch (Exception e) { logger.error("Unexpected error in logDispatcher for peer {}", peer, e); } - threadSemaphore.release(); + runFinished.countDown(); logger.info("{}: Dispatcher for {} exits", impl.getThisNode(), peer); } diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/service/IoTConsensusRPCServiceProcessor.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/service/IoTConsensusRPCServiceProcessor.java index 6201ec06ef676..2bac66738fddb 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/service/IoTConsensusRPCServiceProcessor.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/service/IoTConsensusRPCServiceProcessor.java @@ -50,6 +50,8 @@ import org.apache.iotdb.consensus.iot.thrift.TSyncLogEntriesRes; import org.apache.iotdb.consensus.iot.thrift.TTriggerSnapshotLoadReq; import org.apache.iotdb.consensus.iot.thrift.TTriggerSnapshotLoadRes; +import org.apache.iotdb.consensus.iot.thrift.TWaitReleaseAllRegionRelatedResourceReq; +import org.apache.iotdb.consensus.iot.thrift.TWaitReleaseAllRegionRelatedResourceRes; import org.apache.iotdb.consensus.iot.thrift.TWaitSyncLogCompleteReq; import org.apache.iotdb.consensus.iot.thrift.TWaitSyncLogCompleteRes; import org.apache.iotdb.rpc.TSStatusCode; @@ -95,8 +97,12 @@ public TSyncLogEntriesRes syncLogEntries(TSyncLogEntriesReq req) { return new TSyncLogEntriesRes(Collections.singletonList(status)); } if (!impl.isActive()) { + String message = + String.format( + "Peer is inactive and not ready to receive sync log request, %s, DataNode Id: %s", + groupId, impl.getThisNode().getNodeId()); TSStatus status = new TSStatus(TSStatusCode.WRITE_PROCESS_REJECT.getStatusCode()); - status.setMessage("peer is inactive and not ready to receive sync log request"); + status.setMessage(message); return new TSyncLogEntriesRes(Collections.singletonList(status)); } BatchIndexedConsensusRequest logEntriesInThisBatch = @@ -183,13 +189,8 @@ public TBuildSyncLogChannelRes buildSyncLogChannel(TBuildSyncLogChannelReq req) return new TBuildSyncLogChannelRes(status); } TSStatus responseStatus; - try { - impl.buildSyncLogChannel(new Peer(groupId, req.nodeId, req.endPoint)); - responseStatus = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); - } catch (ConsensusGroupModifyPeerException e) { - responseStatus = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode()); - responseStatus.setMessage(e.getMessage()); - } + impl.buildSyncLogChannel(new Peer(groupId, req.nodeId, req.endPoint), true); + responseStatus = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); return new TBuildSyncLogChannelRes(responseStatus); } @@ -227,8 +228,6 @@ public TWaitSyncLogCompleteRes waitSyncLogComplete(TWaitSyncLogCompleteReq req) String message = String.format("unexpected consensusGroupId %s for waitSyncLogComplete request", groupId); LOGGER.error(message); - TSStatus status = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode()); - status.setMessage(message); return new TWaitSyncLogCompleteRes(true, 0, 0); } long searchIndex = impl.getSearchIndex(); @@ -236,6 +235,24 @@ public TWaitSyncLogCompleteRes waitSyncLogComplete(TWaitSyncLogCompleteReq req) return new TWaitSyncLogCompleteRes(searchIndex == safeIndex, searchIndex, safeIndex); } + @Override + public TWaitReleaseAllRegionRelatedResourceRes waitReleaseAllRegionRelatedResource( + TWaitReleaseAllRegionRelatedResourceReq req) throws TException { + ConsensusGroupId groupId = + ConsensusGroupId.Factory.createFromTConsensusGroupId(req.getConsensusGroupId()); + IoTConsensusServerImpl impl = consensus.getImpl(groupId); + if (impl == null) { + String message = + String.format( + "unexpected consensusGroupId %s for TWaitReleaseAllRegionRelatedResourceRes request", + groupId); + LOGGER.error(message); + return new TWaitReleaseAllRegionRelatedResourceRes(true); + } + return new TWaitReleaseAllRegionRelatedResourceRes( + impl.hasReleaseAllRegionRelatedResource(groupId)); + } + @Override public TSendSnapshotFragmentRes sendSnapshotFragment(TSendSnapshotFragmentReq req) throws TException { @@ -252,7 +269,7 @@ public TSendSnapshotFragmentRes sendSnapshotFragment(TSendSnapshotFragmentReq re } TSStatus responseStatus; try { - impl.receiveSnapshotFragment(req.snapshotId, req.filePath, req.fileChunk); + impl.receiveSnapshotFragment(req.snapshotId, req.filePath, req.fileChunk, req.offset); responseStatus = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } catch (ConsensusGroupModifyPeerException e) { responseStatus = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode()); diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/snapshot/SnapshotFragment.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/snapshot/SnapshotFragment.java index a249bad90ff32..67fcb220c180f 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/snapshot/SnapshotFragment.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/snapshot/SnapshotFragment.java @@ -50,6 +50,7 @@ public TSendSnapshotFragmentReq toTSendSnapshotFragmentReq() { TSendSnapshotFragmentReq req = new TSendSnapshotFragmentReq(); req.setSnapshotId(snapshotId); req.setFilePath(filePath); + req.setOffset(startOffset); req.setChunkLength(fragmentSize); req.setFileChunk(fileChunk); return req; diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/snapshot/SnapshotFragmentReader.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/snapshot/SnapshotFragmentReader.java index ca79b8f3c955c..3331829d243f3 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/snapshot/SnapshotFragmentReader.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/snapshot/SnapshotFragmentReader.java @@ -46,12 +46,12 @@ public SnapshotFragmentReader(String snapshotId, Path path) throws IOException { public boolean hasNext() throws IOException { buf.clear(); - int actualReadSize = fileChannel.read(buf); + int readSize = fileChannel.read(buf); buf.flip(); - if (actualReadSize > 0) { + if (readSize > 0) { cachedSnapshotFragment = - new SnapshotFragment(snapshotId, filePath, fileSize, totalReadSize, actualReadSize, buf); - totalReadSize += actualReadSize; + new SnapshotFragment(snapshotId, filePath, fileSize, totalReadSize, readSize, buf); + totalReadSize += readSize; return true; } return false; diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/PipeConsensus.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/PipeConsensus.java index 7cd0470f97721..86e8fec7bd8e1 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/PipeConsensus.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/PipeConsensus.java @@ -27,7 +27,7 @@ import org.apache.iotdb.commons.client.sync.SyncPipeConsensusServiceClient; import org.apache.iotdb.commons.consensus.ConsensusGroupId; import org.apache.iotdb.commons.exception.StartupException; -import org.apache.iotdb.commons.pipe.task.meta.PipeStatus; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStatus; import org.apache.iotdb.commons.service.RegisterManager; import org.apache.iotdb.commons.utils.FileUtils; import org.apache.iotdb.commons.utils.StatusUtils; @@ -55,6 +55,7 @@ import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.TSStatusCode; +import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -65,16 +66,16 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.ReentrantLock; +import java.util.function.BiConsumer; import java.util.stream.Collectors; -import static org.apache.iotdb.consensus.iot.IoTConsensus.getConsensusGroupIdsFromDir; - public class PipeConsensus implements IConsensus { private static final String CONSENSUS_PIPE_GUARDIAN_TASK_ID = "consensus_pipe_guardian"; private static final String CLASS_NAME = PipeConsensus.class.getSimpleName(); @@ -94,6 +95,7 @@ public class PipeConsensus implements IConsensus { private final ConsensusPipeGuardian consensusPipeGuardian; private final IClientManager asyncClientManager; private final IClientManager syncClientManager; + private Map> correctPeerListBeforeStart = null; public PipeConsensus(ConsensusConfig config, IStateMachine.Registry registry) { this.thisNode = config.getThisNodeEndPoint(); @@ -138,33 +140,64 @@ private void initAndRecover() throws IOException { } } else { // asynchronously recover, retry logic is implemented at PipeConsensusImpl - CompletableFuture.runAsync( - () -> { - try (DirectoryStream stream = Files.newDirectoryStream(storageDir.toPath())) { - for (Path path : stream) { - ConsensusGroupId consensusGroupId = - parsePeerFileName(path.getFileName().toString()); - PipeConsensusServerImpl consensus = - new PipeConsensusServerImpl( - new Peer(consensusGroupId, thisNodeId, thisNode), - registry.apply(consensusGroupId), - path.toString(), - new ArrayList<>(), - config, - consensusPipeManager, - syncClientManager); - stateMachineMap.put(consensusGroupId, consensus); - consensus.start(true); - } - } catch (Exception e) { - LOGGER.error("Failed to recover consensus from {}", storageDir, e); - } - }) - .exceptionally( - e -> { - LOGGER.error("Failed to recover consensus from {}", storageDir, e); - return null; - }); + CompletableFuture future = + CompletableFuture.runAsync( + () -> { + try (DirectoryStream stream = + Files.newDirectoryStream(storageDir.toPath())) { + for (Path path : stream) { + ConsensusGroupId consensusGroupId = + parsePeerFileName(path.getFileName().toString()); + PipeConsensusServerImpl consensus = + new PipeConsensusServerImpl( + new Peer(consensusGroupId, thisNodeId, thisNode), + registry.apply(consensusGroupId), + path.toString(), + new ArrayList<>(), + config, + consensusPipeManager, + syncClientManager); + stateMachineMap.put(consensusGroupId, consensus); + checkPeerListAndStartIfEligible(consensusGroupId, consensus); + } + } catch (Exception e) { + LOGGER.error("Failed to recover consensus from {}", storageDir, e); + } + }) + .exceptionally( + e -> { + LOGGER.error("Failed to recover consensus from {}", storageDir, e); + return null; + }); + } + } + + private void checkPeerListAndStartIfEligible( + ConsensusGroupId consensusGroupId, PipeConsensusServerImpl consensus) throws IOException { + BiConsumer> resetPeerListWithoutThrow = + (dataRegionId, peers) -> { + try { + resetPeerList(dataRegionId, peers); + } catch (ConsensusGroupNotExistException ignore) { + + } catch (Exception e) { + LOGGER.warn("Failed to reset peer list while start", e); + } + }; + + if (correctPeerListBeforeStart != null) { + if (correctPeerListBeforeStart.containsKey(consensusGroupId)) { + // make peers which are in list correct + resetPeerListWithoutThrow.accept( + consensusGroupId, correctPeerListBeforeStart.get(consensusGroupId)); + consensus.start(true); + } else { + // clear peers which are not in the list + resetPeerListWithoutThrow.accept(consensusGroupId, Collections.emptyList()); + } + + } else { + consensus.start(true); } } @@ -372,12 +405,20 @@ public void removeRemotePeer(ConsensusGroupId groupId, Peer peer) throws Consens } } + @Override + public void recordCorrectPeerListBeforeStarting( + Map> correctPeerList) { + LOGGER.info("Record correct peer list: {}", correctPeerList); + this.correctPeerListBeforeStart = correctPeerList; + } + @Override public void resetPeerList(ConsensusGroupId groupId, List correctPeers) throws ConsensusException { PipeConsensusServerImpl impl = Optional.ofNullable(stateMachineMap.get(groupId)) .orElseThrow(() -> new ConsensusGroupNotExistException(groupId)); + if (!correctPeers.contains(new Peer(groupId, thisNodeId, thisNode))) { LOGGER.warn( "[RESET PEER LIST] Local peer is not in the correct configuration, delete local peer {}", @@ -385,29 +426,43 @@ public void resetPeerList(ConsensusGroupId groupId, List correctPeers) deleteLocalPeer(groupId); return; } + + ImmutableList currentPeers = ImmutableList.copyOf(impl.getPeers()); String previousPeerListStr = impl.getPeers().toString(); - for (Peer peer : impl.getPeers()) { + // remove invalid peer + for (Peer peer : currentPeers) { if (!correctPeers.contains(peer)) { try { impl.dropConsensusPipeToTargetPeer(peer); + LOGGER.info("[RESET PEER LIST] Remove sync channel with: {}", peer); } catch (ConsensusGroupModifyPeerException e) { - LOGGER.error( - "[RESET PEER LIST] Failed to remove peer {}'s consensus pipe from group {}", - peer, - groupId, - e); + LOGGER.error("[RESET PEER LIST] Failed to remove sync channel with: {}", peer, e); } } } - LOGGER.info( - "[RESET PEER LIST] Local peer list has been reset: {} -> {}", - previousPeerListStr, - impl.getPeers()); + // add correct peer for (Peer peer : correctPeers) { - if (!impl.containsPeer(peer)) { - LOGGER.warn("[RESET PEER LIST] \"Correct peer\" {} is not in local peer list", peer); + if (!impl.containsPeer(peer) && peer.getNodeId() != this.thisNodeId) { + try { + impl.createConsensusPipeToTargetPeer(peer); + LOGGER.info("[RESET PEER LIST] Build sync channel with: {}", peer); + } catch (ConsensusGroupModifyPeerException e) { + LOGGER.warn("[RESET PEER LIST] Failed to build sync channel with: {}", peer, e); + } } } + // show result + String currentPeerListStr = impl.getPeers().toString(); + if (!previousPeerListStr.equals(currentPeerListStr)) { + LOGGER.info( + "[RESET PEER LIST] Local peer list has been reset: {} -> {}", + previousPeerListStr, + impl.getPeers()); + } else { + LOGGER.info( + "[RESET PEER LIST] The current peer list is correct, nothing need to be reset: {}", + previousPeerListStr); + } } @Override @@ -458,11 +513,6 @@ public List getAllConsensusGroupIds() { return new ArrayList<>(stateMachineMap.keySet()); } - @Override - public List getAllConsensusGroupIdsWithoutStarting() { - return getConsensusGroupIdsFromDir(storageDir, LOGGER); - } - @Override public String getRegionDirFromConsensusGroupId(ConsensusGroupId groupId) { return getPeerDir(groupId); diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/PipeConsensusServerImpl.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/PipeConsensusServerImpl.java index d4b5552b7ec84..b6b6829894af1 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/PipeConsensusServerImpl.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/PipeConsensusServerImpl.java @@ -27,7 +27,7 @@ import org.apache.iotdb.commons.consensus.index.ComparableConsensusRequest; import org.apache.iotdb.commons.consensus.index.ProgressIndex; import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex; -import org.apache.iotdb.commons.pipe.task.meta.PipeStatus; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStatus; import org.apache.iotdb.commons.service.metric.MetricService; import org.apache.iotdb.commons.service.metric.PerformanceOverviewMetrics; import org.apache.iotdb.consensus.IStateMachine; diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/consensuspipe/ConsensusPipeManager.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/consensuspipe/ConsensusPipeManager.java index eab94de768f04..321c9053301da 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/consensuspipe/ConsensusPipeManager.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/consensuspipe/ConsensusPipeManager.java @@ -19,7 +19,7 @@ package org.apache.iotdb.consensus.pipe.consensuspipe; -import org.apache.iotdb.commons.pipe.task.meta.PipeStatus; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStatus; import org.apache.iotdb.consensus.common.Peer; import org.apache.iotdb.consensus.config.PipeConsensusConfig; import org.apache.iotdb.consensus.config.PipeConsensusConfig.ReplicateMode; @@ -28,19 +28,19 @@ import java.util.Map; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_CONSENSUS_GROUP_ID_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_CONSENSUS_PIPE_NAME; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_IOTDB_IP_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_IOTDB_PARALLEL_TASKS_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_IOTDB_PORT_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_REALTIME_FIRST_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_CONSENSUS_GROUP_ID_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_CONSENSUS_RECEIVER_DATANODE_ID_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_CONSENSUS_SENDER_DATANODE_ID_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_REALTIME_MODE_KEY; import static org.apache.iotdb.commons.pipe.config.constant.PipeProcessorConstant.PROCESSOR_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_CONSENSUS_GROUP_ID_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_CONSENSUS_PIPE_NAME; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_IP_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_PARALLEL_TASKS_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_PORT_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_REALTIME_FIRST_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_CONSENSUS_GROUP_ID_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_CONSENSUS_RECEIVER_DATANODE_ID_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_CONSENSUS_SENDER_DATANODE_ID_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_REALTIME_MODE_KEY; public class ConsensusPipeManager { private final PipeConsensusConfig.Pipe config; diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/consensuspipe/ConsensusPipeName.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/consensuspipe/ConsensusPipeName.java index c653b0fee37ba..1beabf2b3bd93 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/consensuspipe/ConsensusPipeName.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/consensuspipe/ConsensusPipeName.java @@ -20,7 +20,7 @@ package org.apache.iotdb.consensus.pipe.consensuspipe; import org.apache.iotdb.commons.consensus.ConsensusGroupId; -import org.apache.iotdb.commons.pipe.task.meta.PipeStaticMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStaticMeta; import org.apache.iotdb.consensus.common.Peer; import java.util.Objects; diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/consensuspipe/ConsensusPipeSelector.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/consensuspipe/ConsensusPipeSelector.java index 1b130a0ea95cb..6af75b5718e65 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/consensuspipe/ConsensusPipeSelector.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/consensuspipe/ConsensusPipeSelector.java @@ -19,7 +19,7 @@ package org.apache.iotdb.consensus.pipe.consensuspipe; -import org.apache.iotdb.commons.pipe.task.meta.PipeStatus; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStatus; import java.util.Map; diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/consensuspipe/ConsensusPipeConnector.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/consensuspipe/ConsensusPipeSink.java similarity index 95% rename from iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/consensuspipe/ConsensusPipeConnector.java rename to iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/consensuspipe/ConsensusPipeSink.java index 6f1396db97229..6b20a8f3abd98 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/consensuspipe/ConsensusPipeConnector.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/consensuspipe/ConsensusPipeSink.java @@ -18,7 +18,7 @@ */ package org.apache.iotdb.consensus.pipe.consensuspipe; -public interface ConsensusPipeConnector { +public interface ConsensusPipeSink { long getConsensusPipeCommitProgress(); long getConsensusPipeReplicateProgress(); diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/metric/PipeConsensusSyncLagManager.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/metric/PipeConsensusSyncLagManager.java index 6ff754e02ef7a..0f8631514cac2 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/metric/PipeConsensusSyncLagManager.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/metric/PipeConsensusSyncLagManager.java @@ -19,7 +19,7 @@ package org.apache.iotdb.consensus.pipe.metric; -import org.apache.iotdb.consensus.pipe.consensuspipe.ConsensusPipeConnector; +import org.apache.iotdb.consensus.pipe.consensuspipe.ConsensusPipeSink; import java.util.List; import java.util.Map; @@ -35,33 +35,32 @@ public class PipeConsensusSyncLagManager { long userWriteProgress = 0; long minReplicateProgress = Long.MAX_VALUE; - List consensusPipeConnectorList = new CopyOnWriteArrayList<>(); + List consensusPipeSinkList = new CopyOnWriteArrayList<>(); private void updateReplicateProgress() { minReplicateProgress = Long.MAX_VALUE; // if there isn't a consensus pipe task, replicate progress is Long.MAX_VALUE. - if (consensusPipeConnectorList.isEmpty()) { + if (consensusPipeSinkList.isEmpty()) { return; } // else we find the minimum progress in all consensus pipe task. - consensusPipeConnectorList.forEach( - consensusPipeConnector -> + consensusPipeSinkList.forEach( + consensusPipeSink -> minReplicateProgress = Math.min( - minReplicateProgress, - consensusPipeConnector.getConsensusPipeReplicateProgress())); + minReplicateProgress, consensusPipeSink.getConsensusPipeReplicateProgress())); } private void updateUserWriteProgress() { // if there isn't a consensus pipe task, user write progress is 0. - if (consensusPipeConnectorList.isEmpty()) { + if (consensusPipeSinkList.isEmpty()) { userWriteProgress = 0; return; } // since the user write progress of different consensus pipes on the same DataRegion is the // same, we only need to take out one Connector to calculate try { - ConsensusPipeConnector connector = consensusPipeConnectorList.get(0); + ConsensusPipeSink connector = consensusPipeSinkList.get(0); userWriteProgress = connector.getConsensusPipeCommitProgress(); } catch (Exception e) { // if removing the last connector happens after empty check, we may encounter @@ -70,12 +69,12 @@ private void updateUserWriteProgress() { } } - public void addConsensusPipeConnector(ConsensusPipeConnector consensusPipeConnector) { - consensusPipeConnectorList.add(consensusPipeConnector); + public void addConsensusPipeConnector(ConsensusPipeSink consensusPipeSink) { + consensusPipeSinkList.add(consensusPipeSink); } - public void removeConsensusPipeConnector(ConsensusPipeConnector connector) { - consensusPipeConnectorList.remove(connector); + public void removeConsensusPipeConnector(ConsensusPipeSink connector) { + consensusPipeSinkList.remove(connector); } /** diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/ApplicationStateMachineProxy.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/ApplicationStateMachineProxy.java index e0e6b542deef9..12095189c522d 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/ApplicationStateMachineProxy.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/ApplicationStateMachineProxy.java @@ -54,6 +54,7 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.StandardCopyOption; +import java.util.Collection; import java.util.concurrent.CompletableFuture; import java.util.function.BiConsumer; @@ -296,6 +297,11 @@ public void notifyLeaderReady() { applicationStateMachine.event().notifyLeaderReady(); } + @Override + public void notifyNotLeader(Collection pendingEntries) throws IOException { + applicationStateMachine.event().notifyNotLeader(); + } + @Override public void notifyConfigurationChanged( long term, long index, RaftConfigurationProto newRaftConfiguration) { diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisClient.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisClient.java index 21f3f2881a2ed..43f0a82561c87 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisClient.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisClient.java @@ -29,8 +29,14 @@ import org.apache.ratis.client.RaftClientRpc; import org.apache.ratis.conf.RaftProperties; import org.apache.ratis.protocol.RaftGroup; +import org.apache.ratis.protocol.exceptions.LeaderSteppingDownException; +import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.apache.ratis.protocol.exceptions.RaftException; +import org.apache.ratis.protocol.exceptions.ReconfigurationInProgressException; +import org.apache.ratis.protocol.exceptions.ServerNotReadyException; +import org.apache.ratis.protocol.exceptions.TimeoutIOException; import org.apache.ratis.retry.ExponentialBackoffRetry; +import org.apache.ratis.retry.RetryPolicies; import org.apache.ratis.retry.RetryPolicy; import org.apache.ratis.thirdparty.io.grpc.StatusRuntimeException; import org.apache.ratis.util.TimeDuration; @@ -120,6 +126,48 @@ public boolean validateObject(RaftGroup key, PooledObject pooledObj } } + static class EndlessRetryFactory extends BaseClientFactory { + + private final RaftProperties raftProperties; + private final RaftClientRpc clientRpc; + private final RatisConfig.Client config; + + public EndlessRetryFactory( + ClientManager clientManager, + RaftProperties raftProperties, + RaftClientRpc clientRpc, + RatisConfig.Client config) { + super(clientManager); + this.raftProperties = raftProperties; + this.clientRpc = clientRpc; + this.config = config; + } + + @Override + public void destroyObject(RaftGroup key, PooledObject pooledObject) { + pooledObject.getObject().invalidate(); + } + + @Override + public PooledObject makeObject(RaftGroup group) { + return new DefaultPooledObject<>( + new RatisClient( + group, + RaftClient.newBuilder() + .setProperties(raftProperties) + .setRaftGroup(group) + .setRetryPolicy(new RatisEndlessRetryPolicy(config)) + .setClientRpc(clientRpc) + .build(), + clientManager)); + } + + @Override + public boolean validateObject(RaftGroup key, PooledObject pooledObject) { + return true; + } + } + /** * RatisRetryPolicy is similar to ExceptionDependentRetry 1. By default, use * ExponentialBackoffRetry to handle request failure 2. If unexpected IOException is caught, @@ -167,4 +215,34 @@ public Action handleAttemptFailure(Event event) { return defaultPolicy.handleAttemptFailure(event); } } + + /** This policy is used to raft configuration change */ + private static class RatisEndlessRetryPolicy implements RetryPolicy { + + private static final Logger logger = LoggerFactory.getLogger(RatisEndlessRetryPolicy.class); + // for reconfiguration request, we use different retry policy + private final RetryPolicy endlessPolicy; + private final RetryPolicy defaultPolicy; + + RatisEndlessRetryPolicy(RatisConfig.Client config) { + endlessPolicy = + RetryPolicies.retryForeverWithSleep(TimeDuration.valueOf(2, TimeUnit.SECONDS)); + defaultPolicy = new RatisRetryPolicy(config); + } + + @Override + public Action handleAttemptFailure(Event event) { + Throwable cause = event.getCause(); + if (cause == null + || cause instanceof ReconfigurationInProgressException + || cause instanceof TimeoutIOException + || cause instanceof LeaderSteppingDownException + || cause instanceof ServerNotReadyException + || cause instanceof NotLeaderException) { + return endlessPolicy.handleAttemptFailure(event); + } + + return defaultPolicy.handleAttemptFailure(event); + } + } } diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisConsensus.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisConsensus.java index 9f65281e2040a..384730a974210 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisConsensus.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisConsensus.java @@ -53,7 +53,6 @@ import org.apache.iotdb.consensus.ratis.utils.Utils; import org.apache.iotdb.rpc.TSStatusCode; -import org.apache.commons.pool2.KeyedObjectPool; import org.apache.commons.pool2.impl.GenericKeyedObjectPool; import org.apache.ratis.client.RaftClientRpc; import org.apache.ratis.conf.Parameters; @@ -73,11 +72,15 @@ import org.apache.ratis.protocol.SnapshotManagementRequest; import org.apache.ratis.protocol.exceptions.AlreadyExistsException; import org.apache.ratis.protocol.exceptions.GroupMismatchException; +import org.apache.ratis.protocol.exceptions.LeaderNotReadyException; +import org.apache.ratis.protocol.exceptions.LeaderSteppingDownException; import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.apache.ratis.protocol.exceptions.RaftException; import org.apache.ratis.protocol.exceptions.ReadException; import org.apache.ratis.protocol.exceptions.ReadIndexException; import org.apache.ratis.protocol.exceptions.ResourceUnavailableException; +import org.apache.ratis.protocol.exceptions.ServerNotReadyException; +import org.apache.ratis.protocol.exceptions.StateMachineException; import org.apache.ratis.server.DivisionInfo; import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.RaftServerConfigKeys; @@ -91,19 +94,18 @@ import java.io.File; import java.io.IOException; -import java.nio.file.DirectoryStream; -import java.nio.file.Files; -import java.nio.file.Path; import java.util.ArrayList; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.UUID; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.BiConsumer; import java.util.function.BooleanSupplier; import java.util.stream.Collectors; @@ -122,6 +124,7 @@ class RatisConsensus implements IConsensus { private final RaftClientRpc clientRpc; private final IClientManager clientManager; + private final IClientManager reconfigurationClientManager; private final DiskGuardian diskGuardian; @@ -142,6 +145,8 @@ class RatisConsensus implements IConsensus { private final RatisMetricSet ratisMetricSet; private final TConsensusGroupType consensusGroupType; + private Map> correctPeerListBeforeStart = null; + private final ConcurrentHashMap canServeStaleRead; public RatisConsensus(ConsensusConfig config, IStateMachine.Registry registry) { @@ -172,6 +177,10 @@ public RatisConsensus(ConsensusConfig config, IStateMachine.Registry registry) { .setWaitTime( TimeDuration.valueOf( this.config.getImpl().getRetryWaitMillis(), TimeUnit.MILLISECONDS)) + .setMaxWaitTime( + TimeDuration.valueOf( + this.config.getImpl().getRetryMaxWaitMillis(), TimeUnit.MILLISECONDS)) + .setExponentialBackoff(true) .build(); this.writeRetryPolicy = RetryPolicy.newBuilder() @@ -179,18 +188,28 @@ public RatisConsensus(ConsensusConfig config, IStateMachine.Registry registry) { .setRetryHandler( reply -> !reply.isSuccess() - && (reply.getException() instanceof ResourceUnavailableException)) + && ((reply.getException() instanceof ResourceUnavailableException) + || reply.getException() instanceof LeaderNotReadyException + || reply.getException() instanceof LeaderSteppingDownException + || reply.getException() instanceof StateMachineException)) .setMaxAttempts(this.config.getImpl().getRetryTimesMax()) .setWaitTime( TimeDuration.valueOf( this.config.getImpl().getRetryWaitMillis(), TimeUnit.MILLISECONDS)) + .setMaxWaitTime( + TimeDuration.valueOf( + this.config.getImpl().getRetryMaxWaitMillis(), TimeUnit.MILLISECONDS)) + .setExponentialBackoff(true) .build(); this.diskGuardian = new DiskGuardian(() -> this, this.config); clientManager = new IClientManager.Factory() - .createClientManager(new RatisClientPoolFactory()); + .createClientManager(new RatisClientPoolFactory(false)); + reconfigurationClientManager = + new IClientManager.Factory() + .createClientManager(new RatisClientPoolFactory(true)); clientRpc = new GrpcFactory(new Parameters()).newRaftClientRpc(ClientId.randomId(), properties); @@ -217,6 +236,27 @@ public synchronized void start() throws IOException { MetricService.getInstance().addMetricSet(this.ratisMetricSet); server.get().start(); registerAndStartDiskGuardian(); + + if (correctPeerListBeforeStart != null) { + BiConsumer> resetPeerListWithoutThrow = + (consensusGroupId, peers) -> { + try { + resetPeerList(consensusGroupId, peers); + } catch (ConsensusGroupNotExistException ignore) { + + } catch (Exception e) { + logger.warn("Failed to reset peer list while start", e); + } + }; + // make peers which are in list correct + correctPeerListBeforeStart.forEach(resetPeerListWithoutThrow); + // clear peers which are not in the list + getAllConsensusGroupIds().stream() + .filter(consensusGroupId -> !correctPeerListBeforeStart.containsKey(consensusGroupId)) + .forEach( + consensusGroupId -> + resetPeerListWithoutThrow.accept(consensusGroupId, Collections.emptyList())); + } } @Override @@ -228,13 +268,15 @@ public synchronized void stop() throws IOException { Thread.currentThread().interrupt(); } finally { clientManager.close(); + reconfigurationClientManager.close(); server.get().close(); MetricService.getInstance().removeMetricSet(this.ratisMetricSet); } } /** Launch a consensus write with retry mechanism */ - private RaftClientReply writeWithRetry(CheckedSupplier caller) + private RaftClientReply writeWithRetry( + CheckedSupplier caller, RaftGroupId groupId) throws IOException { RaftClientReply reply = null; try { @@ -246,6 +288,9 @@ private RaftClientReply writeWithRetry(CheckedSupplier server.get().submitClientRequest(request)); + private RaftClientReply writeLocallyWithRetry(RaftClientRequest request, RaftGroupId groupId) + throws IOException { + return writeWithRetry(() -> server.get().submitClientRequest(request), groupId); } - private RaftClientReply writeRemotelyWithRetry(RatisClient client, Message message) - throws IOException { - return writeWithRetry(() -> client.getRaftClient().io().send(message)); + private RaftClientReply writeRemotelyWithRetry( + RatisClient client, Message message, RaftGroupId groupId) throws IOException { + return writeWithRetry(() -> client.getRaftClient().io().send(message), groupId); } /** @@ -282,7 +328,7 @@ public TSStatus write(ConsensusGroupId groupId, IConsensusRequest request) try { forceStepDownLeader(raftGroup); } catch (Exception e) { - logger.warn("leader {} read only, force step down failed due to {}", myself, e); + logger.warn("leader {} read only, force step down failed due to, ", myself, e); } return StatusUtils.getStatus(TSStatusCode.SYSTEM_READ_ONLY); } @@ -303,7 +349,7 @@ public TSStatus write(ConsensusGroupId groupId, IConsensusRequest request) && waitUntilLeaderReady(raftGroupId)) { try (AutoCloseable ignored = RatisMetricsManager.getInstance().startWriteLocallyTimer(consensusGroupType)) { - RaftClientReply localServerReply = writeLocallyWithRetry(clientRequest); + RaftClientReply localServerReply = writeLocallyWithRetry(clientRequest, raftGroupId); if (localServerReply.isSuccess()) { ResponseMessage responseMessage = (ResponseMessage) localServerReply.getMessage(); return (TSStatus) responseMessage.getContentHolder(); @@ -312,6 +358,8 @@ && waitUntilLeaderReady(raftGroupId)) { if (ex != null) { suggestedLeader = ex.getSuggestedLeader(); } + } catch (GroupMismatchException e) { + throw new ConsensusGroupNotExistException(groupId); } catch (Exception e) { throw new RatisRequestFailedException(e); } @@ -322,11 +370,13 @@ && waitUntilLeaderReady(raftGroupId)) { try (AutoCloseable ignored = RatisMetricsManager.getInstance().startWriteRemotelyTimer(consensusGroupType); RatisClient client = getRaftClient(raftGroup)) { - RaftClientReply reply = writeRemotelyWithRetry(client, message); + RaftClientReply reply = writeRemotelyWithRetry(client, message, raftGroupId); if (!reply.isSuccess()) { throw new RatisRequestFailedException(reply.getException()); } writeResult = Utils.deserializeFrom(reply.getMessage().getContent().asReadOnlyByteBuffer()); + } catch (GroupMismatchException e) { + throw new ConsensusGroupNotExistException(groupId); } catch (Exception e) { throw new RatisRequestFailedException(e); } @@ -374,6 +424,19 @@ public DataSet read(ConsensusGroupId groupId, IConsensusRequest request) } else { throw new RatisRequestFailedException(e); } + } catch (GroupMismatchException e) { + throw new ConsensusGroupNotExistException(groupId); + } catch (IllegalStateException e) { + if (e.getMessage() != null && e.getMessage().contains("ServerNotReadyException")) { + ServerNotReadyException serverNotReadyException = + new ServerNotReadyException(e.getMessage()); + throw new RatisReadUnavailableException(serverNotReadyException); + } else { + throw new RatisRequestFailedException(e); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RatisReadUnavailableException(e); } catch (Exception e) { throw new RatisRequestFailedException(e); } @@ -556,20 +619,65 @@ public void removeRemotePeer(ConsensusGroupId groupId, Peer peer) throws Consens } @Override - public void resetPeerList(ConsensusGroupId groupId, List peers) throws ConsensusException { + public void recordCorrectPeerListBeforeStarting( + Map> correctPeerList) { + logger.info("Record correct peer list: {}", correctPeerList); + this.correctPeerListBeforeStart = correctPeerList; + } + + @Override + public void resetPeerList(ConsensusGroupId groupId, List correctPeers) + throws ConsensusException { final RaftGroupId raftGroupId = Utils.fromConsensusGroupIdToRaftGroupId(groupId); final RaftGroup group = getGroupInfo(raftGroupId); - // pre-conditions: group exists and myself in this group - if (group == null || !group.getPeers().contains(myself)) { + if (group == null) { throw new ConsensusGroupNotExistException(groupId); } + boolean myselfInCorrectPeers = + correctPeers.stream() + .map( + peer -> + Utils.fromNodeInfoAndPriorityToRaftPeer( + peer.getNodeId(), peer.getEndpoint(), DEFAULT_PRIORITY)) + .anyMatch( + raftPeer -> + myself.getId().equals(raftPeer.getId()) + && myself.getAddress().equals(raftPeer.getAddress())); + if (!myselfInCorrectPeers) { + logger.info( + "[RESET PEER LIST] Local peer is not in the correct peer list, delete local peer {}", + groupId); + deleteLocalPeer(groupId); + return; + } + final List newGroupPeers = - Utils.fromPeersAndPriorityToRaftPeers(peers, DEFAULT_PRIORITY); + Utils.fromPeersAndPriorityToRaftPeers(correctPeers, DEFAULT_PRIORITY); final RaftGroup newGroup = RaftGroup.valueOf(raftGroupId, newGroupPeers); - sendReconfiguration(newGroup); + Set localRaftPeerSet = new HashSet<>(group.getPeers()); + Set correctRaftPeerSet = new HashSet<>(newGroupPeers); + if (localRaftPeerSet.equals(correctRaftPeerSet)) { + // configurations are the same + logger.info( + "[RESET PEER LIST] The current peer list is correct, nothing need to be reset: {}", + localRaftPeerSet); + return; + } + + logger.info( + "[RESET PEER LIST] Peer list will be reset from {} to {}", + localRaftPeerSet, + correctRaftPeerSet); + RaftClientReply reply = sendReconfiguration(newGroup); + if (reply.isSuccess()) { + logger.info("[RESET PEER LIST] Peer list has been reset to {}", newGroupPeers); + } else { + logger.warn( + "[RESET PEER LIST] Peer list failed to reset to {}, reply is {}", newGroup, reply); + } } /** NOTICE: transferLeader *does not guarantee* the leader be transferred to newLeader. */ @@ -580,9 +688,7 @@ public void transferLeader(ConsensusGroupId groupId, Peer newLeader) throws Cons final RaftGroup raftGroup = Optional.ofNullable(getGroupInfo(raftGroupId)) .orElseThrow(() -> new ConsensusGroupNotExistException(groupId)); - final RaftPeer newRaftLeader = Utils.fromPeerAndPriorityToRaftPeer(newLeader, DEFAULT_PRIORITY); - final RaftClientReply reply; try { reply = transferLeader(raftGroup, newRaftLeader); @@ -732,27 +838,6 @@ public List getAllConsensusGroupIds() { } } - @Override - public List getAllConsensusGroupIdsWithoutStarting() { - List consensusGroupIds = new ArrayList<>(); - try (DirectoryStream stream = Files.newDirectoryStream(storageDir.toPath())) { - for (Path path : stream) { - try { - RaftGroupId raftGroupId = - RaftGroupId.valueOf(UUID.fromString(path.getFileName().toString())); - consensusGroupIds.add(Utils.fromRaftGroupIdToConsensusGroupId(raftGroupId)); - } catch (Exception e) { - logger.info( - "The directory {} is not a group directory;" + " ignoring it. ", - path.getFileName().toString()); - } - } - } catch (IOException e) { - logger.error("Failed to get all consensus group ids from disk", e); - } - return consensusGroupIds; - } - @Override public String getRegionDirFromConsensusGroupId(ConsensusGroupId consensusGroupId) { RaftGroupId raftGroupId = Utils.fromConsensusGroupIdToRaftGroupId(consensusGroupId); @@ -838,6 +923,7 @@ private RaftGroup getGroupInfo(RaftGroupId raftGroupId) { if (lastSeenGroup != null && !lastSeenGroup.equals(raftGroup)) { // delete the pooled raft-client of the out-dated group and cache the latest clientManager.clear(lastSeenGroup); + reconfigurationClientManager.clear(lastSeenGroup); lastSeen.put(raftGroupId, raftGroup); } } catch (IOException e) { @@ -862,11 +948,21 @@ private RatisClient getRaftClient(RaftGroup group) throws ClientManagerException } } + private RatisClient getConfigurationRaftClient(RaftGroup group) throws ClientManagerException { + try { + return reconfigurationClientManager.borrowClient(group); + } catch (ClientManagerException e) { + logger.error("Borrow client from pool for group {} failed.", group, e); + // rethrow the exception + throw e; + } + } + private RaftClientReply sendReconfiguration(RaftGroup newGroupConf) throws RatisRequestFailedException { // notify the group leader of configuration change RaftClientReply reply; - try (RatisClient client = getRaftClient(newGroupConf)) { + try (RatisClient client = getConfigurationRaftClient(newGroupConf)) { reply = client.getRaftClient().admin().setConfiguration(new ArrayList<>(newGroupConf.getPeers())); if (!reply.isSuccess()) { @@ -899,12 +995,21 @@ public void allowStaleRead(ConsensusGroupId consensusGroupId) { private class RatisClientPoolFactory implements IClientPoolFactory { + private final boolean isReconfiguration; + + RatisClientPoolFactory(boolean isReconfiguration) { + this.isReconfiguration = isReconfiguration; + } + @Override - public KeyedObjectPool createClientPool( + public GenericKeyedObjectPool createClientPool( ClientManager manager) { GenericKeyedObjectPool clientPool = new GenericKeyedObjectPool<>( - new RatisClient.Factory(manager, properties, clientRpc, config.getClient()), + isReconfiguration + ? new RatisClient.EndlessRetryFactory( + manager, properties, clientRpc, config.getClient()) + : new RatisClient.Factory(manager, properties, clientRpc, config.getClient()), new ClientPoolProperty.Builder() .setMaxClientNumForEachNode(config.getClient().getMaxClientNumForEachNode()) .build() diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/utils/Retriable.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/utils/Retriable.java index e3c42d30adb9b..1436a0f21de38 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/utils/Retriable.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/utils/Retriable.java @@ -19,23 +19,20 @@ package org.apache.iotdb.consensus.ratis.utils; -import org.apache.ratis.util.Preconditions; import org.apache.ratis.util.TimeDuration; import org.apache.ratis.util.function.CheckedSupplier; import org.slf4j.Logger; import java.util.Objects; import java.util.function.BooleanSupplier; -import java.util.function.Predicate; import java.util.function.Supplier; public class Retriable { + /** - * Attempt the given operation {@param supplier}. If the result is not expected (as indicated via - * {@param shouldRetry}), then retry this operation. + * * Attempt the given operation {@param supplier}. May retry several times according to the given + * retry policy {@param policy} * - * @param maxAttempts max retry attempts. *-1 indicates for retrying indefinitely.* - * @param sleepTime sleep time during each retry. * @param name the operation's name. * @param log the logger to print messages. * @throws InterruptedException if the sleep is interrupted. @@ -44,26 +41,22 @@ public class Retriable { */ public static RETURN attempt( CheckedSupplier supplier, - Predicate shouldRetry, - int maxAttempts, - TimeDuration sleepTime, + RetryPolicy policy, Supplier name, Logger log) throws THROWABLE, InterruptedException { Objects.requireNonNull(supplier, "supplier == null"); - Objects.requireNonNull(shouldRetry, "shouldRetry == null"); - Preconditions.assertTrue(maxAttempts == -1 || maxAttempts > 0); - Preconditions.assertTrue(!sleepTime.isNegative(), () -> "sleepTime = " + sleepTime + " < 0"); - - for (int i = 1; /* Forever Loop */ ; i++) { + for (int attempt = 0; ; attempt++) { try { final RETURN ret = supplier.get(); // if we should retry and the total attempt doesn't reach max allowed attempts - if (shouldRetry.test(ret) && (maxAttempts == -1 || i <= maxAttempts)) { + if (policy.shouldRetry(ret) && policy.shoudAttempt(attempt)) { + TimeDuration waitTime = policy.getWaitTime(attempt); if (log != null && log.isDebugEnabled()) { - log.debug("Failed {}, attempt #{}, sleep {} and then retry", name.get(), i, sleepTime); + log.debug( + "Failed {}, attempt #{}, sleep {} and then retry", name.get(), attempt, waitTime); } - sleepTime.sleep(); + waitTime.sleep(); continue; } return ret; @@ -91,26 +84,14 @@ public static void attemptUntilTrue( BooleanSupplier condition, int maxAttempts, TimeDuration sleepTime, String name, Logger log) throws InterruptedException { Objects.requireNonNull(condition, "condition == null"); - attempt(() -> null, ret -> !condition.getAsBoolean(), maxAttempts, sleepTime, () -> name, log); - } - - /** - * * Attempt the given operation {@param supplier}. May retry several times according to the given - * retry policy {@param policy} - * - * @param name the operation's name. - * @param logger the logger to print messages. - * @throws InterruptedException if the sleep is interrupted. - * @throws THROWABLE if the operation throws a pre-defined error. - * @return the result of given operation if it executes successfully - */ - public static RETURN attempt( - CheckedSupplier supplier, - RetryPolicy policy, - Supplier name, - Logger logger) - throws THROWABLE, InterruptedException { - return attempt( - supplier, policy::shouldRetry, policy.getMaxAttempts(), policy.getWaitTime(), name, logger); + attempt( + () -> null, + RetryPolicy.newBuilder() + .setRetryHandler(ret -> !condition.getAsBoolean()) + .setWaitTime(sleepTime) + .setMaxAttempts(maxAttempts) + .build(), + () -> name, + log); } } diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/utils/RetryPolicy.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/utils/RetryPolicy.java index 6320ef823c3e5..2f2ccc7828c31 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/utils/RetryPolicy.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/utils/RetryPolicy.java @@ -29,23 +29,43 @@ public class RetryPolicy { /** -1 means retry indefinitely */ private final int maxAttempts; + private final boolean exponentialBackoff; private final TimeDuration waitTime; - - public RetryPolicy(Function retryHandler, int maxAttempts, TimeDuration waitTime) { + private final TimeDuration maxWaitTime; + private org.apache.ratis.retry.RetryPolicy retryPolicy; + + public RetryPolicy( + Function retryHandler, + int maxAttempts, + TimeDuration waitTime, + TimeDuration maxWaitTime, + boolean exponentialBackoff) { this.retryHandler = retryHandler; this.maxAttempts = maxAttempts; this.waitTime = waitTime; + this.maxWaitTime = maxWaitTime; + this.exponentialBackoff = exponentialBackoff; } boolean shouldRetry(RESP resp) { return retryHandler.apply(resp); } + boolean shoudAttempt(int attempt) { + return maxAttempts == -1 || attempt < maxAttempts; + } + public int getMaxAttempts() { return maxAttempts; } - public TimeDuration getWaitTime() { + public TimeDuration getWaitTime(int attempt) { + if (exponentialBackoff) { + TimeDuration sleepTime = waitTime.multiply(Math.pow(2, attempt)); + return maxWaitTime.getDuration() != 0 && sleepTime.compareTo(maxWaitTime) > 0 + ? maxWaitTime + : sleepTime; + } return waitTime; } @@ -56,7 +76,9 @@ public static RetryPolicyBuilder newBuilder() { public static class RetryPolicyBuilder { private Function retryHandler = (r) -> false; private int maxAttempts = 0; + public boolean exponentialBackoff = false; private TimeDuration waitTime = TimeDuration.ZERO; + private TimeDuration maxWaitTime = TimeDuration.ZERO; public RetryPolicyBuilder setRetryHandler(Function retryHandler) { this.retryHandler = retryHandler; @@ -73,8 +95,19 @@ public RetryPolicyBuilder setWaitTime(TimeDuration waitTime) { return this; } + public RetryPolicyBuilder setMaxWaitTime(TimeDuration maxWaitTime) { + this.maxWaitTime = maxWaitTime; + return this; + } + + public RetryPolicyBuilder setExponentialBackoff(boolean exponentialBackoff) { + this.exponentialBackoff = exponentialBackoff; + return this; + } + public RetryPolicy build() { - return new RetryPolicy<>(retryHandler, maxAttempts, waitTime); + return new RetryPolicy<>( + retryHandler, maxAttempts, waitTime, maxWaitTime, exponentialBackoff); } } } diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/utils/Utils.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/utils/Utils.java index 9f25e83792479..07dac6f8cf2e5 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/utils/Utils.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/utils/Utils.java @@ -56,6 +56,7 @@ import java.util.stream.Collectors; public class Utils { + private static final int TEMP_BUFFER_SIZE = 1024; private static final byte PADDING_MAGIC = 0x47; private static final String DATA_REGION_GROUP = "group-0001"; @@ -252,6 +253,7 @@ public static void initRatisConfig(RaftProperties properties, RatisConfig config GrpcConfigKeys.Server.setLeaderOutstandingAppendsMax( properties, config.getGrpc().getLeaderOutstandingAppendsMax()); + RaftServerConfigKeys.setStagingTimeout(properties, TimeDuration.valueOf(240, TimeUnit.SECONDS)); RaftServerConfigKeys.Rpc.setSlownessTimeout(properties, config.getRpc().getSlownessTimeout()); RaftServerConfigKeys.Rpc.setTimeoutMin(properties, config.getRpc().getTimeoutMin()); RaftServerConfigKeys.Rpc.setTimeoutMax(properties, config.getRpc().getTimeoutMax()); @@ -288,9 +290,6 @@ public static void initRatisConfig(RaftProperties properties, RatisConfig config properties, config.getThreadPool().getServerSize()); RaftServerConfigKeys.Log.setUseMemory(properties, config.getLog().isUseMemory()); - RaftServerConfigKeys.Log.setQueueElementLimit( - properties, config.getLog().getQueueElementLimit()); - RaftServerConfigKeys.Log.setQueueByteLimit(properties, config.getLog().getQueueByteLimit()); RaftServerConfigKeys.Log.setPurgeGap(properties, config.getLog().getPurgeGap()); RaftServerConfigKeys.Log.setPurgeUptoSnapshotIndex( properties, config.getLog().isPurgeUptoSnapshotIndex()); @@ -302,15 +301,21 @@ public static void initRatisConfig(RaftProperties properties, RatisConfig config RaftServerConfigKeys.Log.setSegmentCacheSizeMax( properties, config.getLog().getSegmentCacheSizeMax()); RaftServerConfigKeys.Log.setPreallocatedSize(properties, config.getLog().getPreallocatedSize()); - final SizeInBytes writeBufferSize = - SizeInBytes.valueOf(config.getLeaderLogAppender().getBufferByteLimit().getSizeInt() + 8L); - RaftServerConfigKeys.Log.setWriteBufferSize(properties, writeBufferSize); + RaftServerConfigKeys.Log.setWriteBufferSize( + properties, + SizeInBytes.valueOf(config.getLeaderLogAppender().getBufferByteLimit().getSizeInt() + 8L)); RaftServerConfigKeys.Log.setForceSyncNum(properties, config.getLog().getForceSyncNum()); RaftServerConfigKeys.Log.setUnsafeFlushEnabled( properties, config.getLog().isUnsafeFlushEnabled()); RaftServerConfigKeys.Log.setCorruptionPolicy( properties, RaftServerConfigKeys.Log.CorruptionPolicy.WARN_AND_RETURN); + RaftServerConfigKeys.Write.setByteLimit( + properties, + SizeInBytes.valueOf(config.getLeaderLogAppender().getBufferByteLimit().getSize() * 10)); + + RaftServerConfigKeys.Log.setQueueByteLimit( + properties, config.getLeaderLogAppender().getBufferByteLimit()); RaftServerConfigKeys.Log.Appender.setBufferByteLimit( properties, config.getLeaderLogAppender().getBufferByteLimit()); RaftServerConfigKeys.Log.Appender.setSnapshotChunkSizeMax( @@ -331,8 +336,12 @@ public static void initRatisConfig(RaftProperties properties, RatisConfig config RaftServerConfigKeys.Read.setTimeout(properties, config.getRead().getReadTimeout()); RaftServerConfigKeys.setSleepDeviationThreshold( - properties, config.getUtils().getSleepDeviationThresholdMs()); - RaftServerConfigKeys.setCloseThreshold(properties, config.getUtils().getCloseThresholdMs()); + properties, + TimeDuration.valueOf( + config.getUtils().getSleepDeviationThresholdMs(), TimeUnit.MILLISECONDS)); + RaftServerConfigKeys.setCloseThreshold( + properties, + TimeDuration.valueOf(config.getUtils().getCloseThresholdMs(), TimeUnit.MILLISECONDS)); final TimeDuration clientMaxRetryGap = getMaxRetrySleepTime(config.getClient()); RaftServerConfigKeys.RetryCache.setExpiryTime(properties, clientMaxRetryGap); diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/simple/SimpleConsensus.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/simple/SimpleConsensus.java index 8547b52b7c453..5800e76b0081e 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/simple/SimpleConsensus.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/simple/SimpleConsensus.java @@ -38,7 +38,6 @@ import org.apache.iotdb.consensus.exception.ConsensusGroupNotExistException; import org.apache.iotdb.consensus.exception.IllegalPeerEndpointException; import org.apache.iotdb.consensus.exception.IllegalPeerNumException; -import org.apache.iotdb.consensus.iot.IoTConsensus; import org.apache.iotdb.rpc.TSStatusCode; import org.slf4j.Logger; @@ -211,6 +210,12 @@ public void removeRemotePeer(ConsensusGroupId groupId, Peer peer) throws Consens throw new ConsensusException("SimpleConsensus does not support membership changes"); } + @Override + public void recordCorrectPeerListBeforeStarting( + Map> correctPeerList) { + logger.info("SimpleConsensus will do nothing when calling recordCorrectPeerListBeforeStarting"); + } + @Override public void transferLeader(ConsensusGroupId groupId, Peer newLeader) throws ConsensusException { throw new ConsensusException("SimpleConsensus does not support leader transfer"); @@ -254,11 +259,6 @@ public List getAllConsensusGroupIds() { return new ArrayList<>(stateMachineMap.keySet()); } - @Override - public List getAllConsensusGroupIdsWithoutStarting() { - return IoTConsensus.getConsensusGroupIdsFromDir(storageDir, logger); - } - @Override public String getRegionDirFromConsensusGroupId(ConsensusGroupId groupId) { return buildPeerDir(groupId); @@ -270,7 +270,8 @@ public void reloadConsensusConfig(ConsensusConfig consensusConfig) { } @Override - public void resetPeerList(ConsensusGroupId groupId, List peers) throws ConsensusException { + public void resetPeerList(ConsensusGroupId groupId, List correctPeers) + throws ConsensusException { throw new ConsensusException("SimpleConsensus does not support reset peer list"); } diff --git a/iotdb-core/consensus/src/test/java/org/apache/iotdb/consensus/iot/ReplicateTest.java b/iotdb-core/consensus/src/test/java/org/apache/iotdb/consensus/iot/ReplicateTest.java index 8072ab100660e..f22d3fdadcd99 100644 --- a/iotdb-core/consensus/src/test/java/org/apache/iotdb/consensus/iot/ReplicateTest.java +++ b/iotdb-core/consensus/src/test/java/org/apache/iotdb/consensus/iot/ReplicateTest.java @@ -46,8 +46,8 @@ import java.net.ServerSocket; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; -import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; public class ReplicateTest { @@ -57,12 +57,6 @@ public class ReplicateTest { private final ConsensusGroupId gid = new DataRegionId(1); - private static final long timeout = TimeUnit.SECONDS.toMillis(300); - - private static final String CONFIGURATION_FILE_NAME = "configuration.dat"; - - private static final String CONFIGURATION_TMP_FILE_NAME = "configuration.dat.tmp"; - private int basePort = 9000; private final List peers = @@ -73,9 +67,9 @@ public class ReplicateTest { private final List peersStorage = Arrays.asList( - new File("target" + java.io.File.separator + "1"), - new File("target" + java.io.File.separator + "2"), - new File("target" + java.io.File.separator + "3")); + new File("target" + File.separator + "1"), + new File("target" + File.separator + "2"), + new File("target" + File.separator + "3")); private final ConsensusGroup group = new ConsensusGroup(gid, peers); private final List servers = new ArrayList<>(); @@ -120,6 +114,9 @@ private void initServer() throws IOException { String.format( ConsensusFactory.CONSTRUCT_FAILED_MSG, ConsensusFactory.IOT_CONSENSUS)))); + servers.get(i).recordCorrectPeerListBeforeStarting(Collections.singletonMap(gid, peers)); + } + for (int i = 0; i < peers.size(); i++) { servers.get(i).start(); } } catch (IOException e) { @@ -187,23 +184,9 @@ public void replicateUsingQueueTest() stopServer(); initServer(); - Assert.assertEquals( - peers.stream().map(Peer::getNodeId).collect(Collectors.toSet()), - servers.get(0).getImpl(gid).getConfiguration().stream() - .map(Peer::getNodeId) - .collect(Collectors.toSet())); - - Assert.assertEquals( - peers.stream().map(Peer::getNodeId).collect(Collectors.toSet()), - servers.get(1).getImpl(gid).getConfiguration().stream() - .map(Peer::getNodeId) - .collect(Collectors.toSet())); - - Assert.assertEquals( - peers.stream().map(Peer::getNodeId).collect(Collectors.toSet()), - servers.get(2).getImpl(gid).getConfiguration().stream() - .map(Peer::getNodeId) - .collect(Collectors.toSet())); + checkPeerList(servers.get(0).getImpl(gid)); + checkPeerList(servers.get(1).getImpl(gid)); + checkPeerList(servers.get(2).getImpl(gid)); Assert.assertEquals(CHECK_POINT_GAP, servers.get(0).getImpl(gid).getSearchIndex()); Assert.assertEquals(CHECK_POINT_GAP, servers.get(1).getImpl(gid).getSearchIndex()); @@ -264,23 +247,9 @@ public void replicateUsingWALTest() throws IOException, InterruptedException, Co initServer(); servers.get(2).createLocalPeer(group.getGroupId(), group.getPeers()); - Assert.assertEquals( - peers.stream().map(Peer::getNodeId).collect(Collectors.toSet()), - servers.get(0).getImpl(gid).getConfiguration().stream() - .map(Peer::getNodeId) - .collect(Collectors.toSet())); - - Assert.assertEquals( - peers.stream().map(Peer::getNodeId).collect(Collectors.toSet()), - servers.get(1).getImpl(gid).getConfiguration().stream() - .map(Peer::getNodeId) - .collect(Collectors.toSet())); - - Assert.assertEquals( - peers.stream().map(Peer::getNodeId).collect(Collectors.toSet()), - servers.get(2).getImpl(gid).getConfiguration().stream() - .map(Peer::getNodeId) - .collect(Collectors.toSet())); + checkPeerList(servers.get(0).getImpl(gid)); + checkPeerList(servers.get(1).getImpl(gid)); + checkPeerList(servers.get(2).getImpl(gid)); Assert.assertEquals(CHECK_POINT_GAP, servers.get(0).getImpl(gid).getSearchIndex()); Assert.assertEquals(CHECK_POINT_GAP, servers.get(1).getImpl(gid).getSearchIndex()); @@ -324,10 +293,6 @@ public void parsingAndConstructIDTest() throws Exception { for (int i = 0; i < CHECK_POINT_GAP; i++) { servers.get(0).write(gid, new TestEntry(i, peers.get(0))); } - List ids = servers.get(0).getAllConsensusGroupIdsWithoutStarting(); - - Assert.assertEquals(1, ids.size()); - Assert.assertEquals(gid, ids.get(0)); String regionDir = servers.get(0).getRegionDirFromConsensusGroupId(gid); try { @@ -349,4 +314,10 @@ private boolean checkPortAvailable() { } return true; } + + private void checkPeerList(IoTConsensusServerImpl iotServerImpl) { + Assert.assertEquals( + peers.stream().map(Peer::getNodeId).collect(Collectors.toSet()), + iotServerImpl.getConfiguration().stream().map(Peer::getNodeId).collect(Collectors.toSet())); + } } diff --git a/iotdb-core/consensus/src/test/java/org/apache/iotdb/consensus/iot/StabilityTest.java b/iotdb-core/consensus/src/test/java/org/apache/iotdb/consensus/iot/StabilityTest.java index 43e328e383351..5147632431f22 100644 --- a/iotdb-core/consensus/src/test/java/org/apache/iotdb/consensus/iot/StabilityTest.java +++ b/iotdb-core/consensus/src/test/java/org/apache/iotdb/consensus/iot/StabilityTest.java @@ -23,13 +23,12 @@ import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.commons.consensus.ConsensusGroupId; import org.apache.iotdb.commons.consensus.DataRegionId; +import org.apache.iotdb.commons.exception.StartupException; import org.apache.iotdb.consensus.ConsensusFactory; -import org.apache.iotdb.consensus.IConsensus; import org.apache.iotdb.consensus.common.Peer; import org.apache.iotdb.consensus.config.ConsensusConfig; import org.apache.iotdb.consensus.exception.ConsensusException; import org.apache.iotdb.consensus.exception.ConsensusGroupAlreadyExistException; -import org.apache.iotdb.consensus.exception.ConsensusGroupModifyPeerException; import org.apache.iotdb.consensus.exception.ConsensusGroupNotExistException; import org.apache.iotdb.consensus.exception.IllegalPeerEndpointException; import org.apache.iotdb.consensus.exception.IllegalPeerNumException; @@ -40,45 +39,55 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.File; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import static org.junit.Assert.assertTrue; public class StabilityTest { + private static Logger LOGGER = LoggerFactory.getLogger(StabilityTest.class); + private final ConsensusGroupId dataRegionId = new DataRegionId(1); private final File storageDir = new File("target" + java.io.File.separator + "stability"); - private IConsensus consensusImpl; + private IoTConsensus consensusImpl; private final int basePort = 6667; public void constructConsensus() throws IOException { consensusImpl = - ConsensusFactory.getConsensusImpl( - ConsensusFactory.IOT_CONSENSUS, - ConsensusConfig.newBuilder() - .setThisNodeId(1) - .setThisNode(new TEndPoint("0.0.0.0", basePort)) - .setStorageDir(storageDir.getAbsolutePath()) - .setConsensusGroupType(TConsensusGroupType.DataRegion) - .build(), - gid -> new TestStateMachine()) - .orElseThrow( - () -> - new IllegalArgumentException( - String.format( - ConsensusFactory.CONSTRUCT_FAILED_MSG, - ConsensusFactory.IOT_CONSENSUS))); + (IoTConsensus) + ConsensusFactory.getConsensusImpl( + ConsensusFactory.IOT_CONSENSUS, + ConsensusConfig.newBuilder() + .setThisNodeId(1) + .setThisNode(new TEndPoint("0.0.0.0", basePort)) + .setStorageDir(storageDir.getAbsolutePath()) + .setConsensusGroupType(TConsensusGroupType.DataRegion) + .build(), + gid -> new TestStateMachine()) + .orElseThrow( + () -> + new IllegalArgumentException( + String.format( + ConsensusFactory.CONSTRUCT_FAILED_MSG, + ConsensusFactory.IOT_CONSENSUS))); consensusImpl.start(); } @Before public void setUp() throws Exception { + FileUtils.deleteFully(storageDir); constructConsensus(); } @@ -95,7 +104,6 @@ public void allTest() throws Exception { peerTest(); transferLeader(); snapshotTest(); - snapshotUpgradeTest(); } public void addConsensusGroup() { @@ -213,37 +221,74 @@ public void snapshotTest() throws ConsensusException { consensusImpl.deleteLocalPeer(dataRegionId); } - public void snapshotUpgradeTest() throws Exception { - consensusImpl.createLocalPeer( - dataRegionId, - Collections.singletonList(new Peer(dataRegionId, 1, new TEndPoint("0.0.0.0", basePort)))); - consensusImpl.triggerSnapshot(dataRegionId, false); - long oldSnapshotIndex = System.currentTimeMillis(); - String oldSnapshotDirName = - String.format( - "%s_%s_%d", - IoTConsensusServerImpl.SNAPSHOT_DIR_NAME, dataRegionId.getId(), oldSnapshotIndex); - File regionDir = new File(storageDir, "1_1"); - File oldSnapshotDir = new File(regionDir, oldSnapshotDirName); - if (oldSnapshotDir.exists()) { - FileUtils.deleteFully(oldSnapshotDir); + @Test + public void recordAndResetPeerListTest() throws Exception { + try { + Assert.assertEquals(0, consensusImpl.getReplicationNum(dataRegionId)); + consensusImpl.createLocalPeer( + dataRegionId, + Collections.singletonList(new Peer(dataRegionId, 1, new TEndPoint("0.0.0.0", basePort)))); + Assert.assertEquals(1, consensusImpl.getReplicationNum(dataRegionId)); + Assert.assertEquals(1, consensusImpl.getImpl(dataRegionId).getConfiguration().size()); + } catch (ConsensusException e) { + Assert.fail(); } - if (!oldSnapshotDir.mkdirs()) { - throw new ConsensusGroupModifyPeerException( - String.format("%s: cannot mkdir for snapshot", dataRegionId)); + consensusImpl.stop(); + + // test add sync channel + Map> correctPeers = new HashMap<>(); + List peerList1And2 = new ArrayList<>(); + peerList1And2.add(new Peer(dataRegionId, 1, new TEndPoint("0.0.0.0", basePort))); + peerList1And2.add(new Peer(dataRegionId, 2, new TEndPoint("0.0.0.0", basePort))); + correctPeers.put(dataRegionId, peerList1And2); + consensusImpl.recordCorrectPeerListBeforeStarting(correctPeers); + try { + consensusImpl.start(); + Assert.assertEquals(2, consensusImpl.getImpl(dataRegionId).getConfiguration().size()); + consensusImpl.stop(); + } catch (IOException e) { + if (e.getCause() instanceof StartupException) { + LOGGER.info("Cannot start IoTConsensus because", e); + } else { + LOGGER.error("Failed because", e); + Assert.fail(e.getMessage()); + } } - consensusImpl.triggerSnapshot(dataRegionId, false); - Assert.assertFalse(oldSnapshotDir.exists()); - File dataDir = new File(IoTConsensus.buildPeerDir(storageDir, dataRegionId)); + // test remove sync channel + List peerList1 = new ArrayList<>(); + peerList1.add(new Peer(dataRegionId, 1, new TEndPoint("0.0.0.0", basePort))); + correctPeers.put(dataRegionId, peerList1); + consensusImpl.recordCorrectPeerListBeforeStarting(correctPeers); + try { + consensusImpl.start(); + Assert.assertEquals(1, consensusImpl.getImpl(dataRegionId).getConfiguration().size()); + consensusImpl.stop(); + } catch (IOException e) { + if (e.getCause() instanceof StartupException) { + LOGGER.info("Cannot start IoTConsensus because", e); + } else { + LOGGER.error("Failed because", e); + Assert.fail(e.getMessage()); + } + } - File[] snapshotFiles = - dataDir.listFiles((dir, name) -> name.startsWith(IoTConsensusServerImpl.SNAPSHOT_DIR_NAME)); - Assert.assertNotNull(snapshotFiles); - Assert.assertEquals(1, snapshotFiles.length); - Assert.assertEquals( - oldSnapshotIndex + 1, - Long.parseLong(snapshotFiles[0].getName().replaceAll(".*[^\\d](?=(\\d+))", ""))); - consensusImpl.deleteLocalPeer(dataRegionId); + // test remove invalid peer + List peerList2 = new ArrayList<>(); + peerList2.add(new Peer(dataRegionId, 2, new TEndPoint("0.0.0.0", basePort))); + correctPeers.put(dataRegionId, peerList2); + consensusImpl.recordCorrectPeerListBeforeStarting(correctPeers); + try { + consensusImpl.start(); + Assert.assertNull(consensusImpl.getImpl(dataRegionId)); + consensusImpl.stop(); + } catch (IOException e) { + if (e.getCause() instanceof StartupException) { + LOGGER.info("Cannot start IoTConsensus because", e); + } else { + LOGGER.error("Failed because", e); + Assert.fail(e.getMessage()); + } + } } } diff --git a/iotdb-core/consensus/src/test/java/org/apache/iotdb/consensus/ratis/RatisConsensusTest.java b/iotdb-core/consensus/src/test/java/org/apache/iotdb/consensus/ratis/RatisConsensusTest.java index 685f580e4baee..842ea78af7234 100644 --- a/iotdb-core/consensus/src/test/java/org/apache/iotdb/consensus/ratis/RatisConsensusTest.java +++ b/iotdb-core/consensus/src/test/java/org/apache/iotdb/consensus/ratis/RatisConsensusTest.java @@ -302,10 +302,6 @@ public void parsingAndConstructIDs() throws Exception { servers.get(0).createLocalPeer(gid, peers.subList(0, 1)); doConsensus(0, 10, 10); - List ids = servers.get(0).getAllConsensusGroupIdsWithoutStarting(); - Assert.assertEquals(1, ids.size()); - Assert.assertEquals(gid, ids.get(0)); - String regionDir = servers.get(0).getRegionDirFromConsensusGroupId(gid); try { File regionDirFile = new File(regionDir); diff --git a/iotdb-core/datanode/pom.xml b/iotdb-core/datanode/pom.xml index bbc40a0852011..b97a532249a66 100644 --- a/iotdb-core/datanode/pom.xml +++ b/iotdb-core/datanode/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-core - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT iotdb-server IoTDB: Core: Data-Node (Server) @@ -37,12 +37,12 @@ org.apache.iotdb service-rpc - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-consensus - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.tsfile @@ -57,72 +57,77 @@ org.apache.iotdb external-api - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb openapi - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb node-commons - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb isession - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-antlr - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-thrift-commons - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-thrift-consensus - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb udf-api - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb trigger-api - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb metrics-interface - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-thrift - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-thrift-confignode - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT + + + org.apache.iotdb + iotdb-thrift-ainode + 1.3.4-SNAPSHOT org.apache.iotdb pipe-api - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.iotdb iotdb-session - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.apache.commons @@ -160,6 +165,14 @@ org.slf4j slf4j-api + + net.java.dev.jna + jna + + + net.java.dev.jna + jna-platform + io.jsonwebtoken jjwt-api @@ -168,6 +181,10 @@ org.eclipse.milo stack-core + + org.eclipse.milo + sdk-core + commons-io commons-io @@ -245,7 +262,7 @@ jersey-container-servlet-core - io.moquette + com.github.moquette-io.moquette moquette-broker @@ -295,7 +312,7 @@ org.apache.iotdb metrics-core - 1.3.3-SNAPSHOT + 1.3.4-SNAPSHOT org.mockito @@ -359,6 +376,12 @@ awaitility test + + com.tngtech.archunit + archunit + 1.3.0 + test + diff --git a/iotdb-core/datanode/src/assembly/resources/conf/datanode-env.sh b/iotdb-core/datanode/src/assembly/resources/conf/datanode-env.sh index ac637869a6b91..b1f3c68732668 100755 --- a/iotdb-core/datanode/src/assembly/resources/conf/datanode-env.sh +++ b/iotdb-core/datanode/src/assembly/resources/conf/datanode-env.sh @@ -143,6 +143,45 @@ calculate_memory_sizes() } +# find first dir of dn_data_dirs from properties file +get_first_data_dir() { + local config_file="$1" + local data_dir_value="" + + data_dir_value=`sed '/^dn_data_dirs=/!d;s/.*=//' ${IOTDB_CONF}/${config_file} | tail -n 1` + + if [ -z "$data_dir_value" ]; then + echo "" + return 0 + fi + + if [[ "$data_dir_value" == *";"* ]]; then + data_dir_value=$(echo "$data_dir_value" | cut -d';' -f1) + fi + if [[ "$data_dir_value" == *","* ]]; then + data_dir_value=$(echo "$data_dir_value" | cut -d',' -f1) + fi + + if [[ "$data_dir_value" == /* ]]; then + echo "$data_dir_value" + else + echo "$IOTDB_HOME/$data_dir_value" + fi +} + +if [ -f "${IOTDB_CONF}/iotdb-system.properties" ]; then + heap_dump_dir=$(get_first_data_dir "iotdb-system.properties") +else + heap_dump_dir=$(get_first_data_dir "iotdb-datanode.properties") +fi + +if [ -z "$heap_dump_dir" ]; then + heap_dump_dir="$IOTDB_HOME/data/datanode/data" +fi +if [ ! -d "$heap_dump_dir" ]; then + mkdir -p "$heap_dump_dir" +fi + # find java in JAVA_HOME if [ -n "$JAVA_HOME" ]; then for java in "$JAVA_HOME"/bin/amd64/java "$JAVA_HOME"/bin/java; do @@ -300,8 +339,8 @@ IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+SafepointTimeout" # NOTE: it may reduce memory utilization and trigger OOM killer when memory is tight. # IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+AlwaysPreTouch" -# if you want to dump the heap memory while OOM happening, you can use the following command, remember to replace /tmp/heapdump.hprof with your own file path and the folder where this file is located needs to be created in advance -# IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/tmp/datanode_heapdump.hprof" +# if you want to dump the heap memory while OOM happening, you can use the following command, remember to replace ${heap_dump_dir}/datanode_heapdump.hprof with your own file path and the folder where this file is located needs to be created in advance +# IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${heap_dump_dir}/datanode_heapdump.hprof" echo "DataNode on heap memory size = ${ON_HEAP_MEMORY}B, off heap memory size = ${OFF_HEAP_MEMORY}B" diff --git a/iotdb-core/datanode/src/assembly/resources/conf/logback-datanode.xml b/iotdb-core/datanode/src/assembly/resources/conf/logback-datanode.xml index 698bdfc3a825e..557fd8b160f51 100644 --- a/iotdb-core/datanode/src/assembly/resources/conf/logback-datanode.xml +++ b/iotdb-core/datanode/src/assembly/resources/conf/logback-datanode.xml @@ -177,6 +177,21 @@ INFO + + ${IOTDB_HOME}/logs/log_datanode_sampled_queries.log + + ${IOTDB_HOME}/logs/log-datanode-sampled-queries-%d{yyyyMMdd}.log.gz + 30 + + true + + %d %m %n + utf-8 + + + INFO + + ${IOTDB_HOME}/logs/log_datanode_compaction.log @@ -242,9 +257,12 @@ - + + + + @@ -261,7 +279,7 @@ - + diff --git a/iotdb-core/datanode/src/assembly/resources/sbin/remove-datanode.bat b/iotdb-core/datanode/src/assembly/resources/sbin/remove-datanode.bat deleted file mode 100644 index 1903e224d8e41..0000000000000 --- a/iotdb-core/datanode/src/assembly/resources/sbin/remove-datanode.bat +++ /dev/null @@ -1,121 +0,0 @@ -@REM -@REM Licensed to the Apache Software Foundation (ASF) under one -@REM or more contributor license agreements. See the NOTICE file -@REM distributed with this work for additional information -@REM regarding copyright ownership. The ASF licenses this file -@REM to you under the Apache License, Version 2.0 (the -@REM "License"); you may not use this file except in compliance -@REM with the License. You may obtain a copy of the License at -@REM -@REM http://www.apache.org/licenses/LICENSE-2.0 -@REM -@REM Unless required by applicable law or agreed to in writing, -@REM software distributed under the License is distributed on an -@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -@REM KIND, either express or implied. See the License for the -@REM specific language governing permissions and limitations -@REM under the License. -@REM - -@echo off - -IF "%~1"=="--help" ( - echo The script will remove a DataNode. - echo Before removing a DataNode, ensure that the cluster has at least the number of data/schema replicas DataNodes. - echo Usage: - echo Remove the DataNode with datanode_id - echo ./sbin/remove-datanode.bat [datanode_id] - echo Remove the DataNode with address:port - echo ./sbin/remove-datanode.bat [dn_rpc_address:dn_rpc_port] - EXIT /B 0 -) - -echo ```````````````````````` -echo Starting to remove a DataNode -echo ```````````````````````` - -PATH %PATH%;%JAVA_HOME%\bin\ -set "FULL_VERSION=" -set "MAJOR_VERSION=" -set "MINOR_VERSION=" - - -for /f tokens^=2-5^ delims^=.-_+^" %%j in ('java -fullversion 2^>^&1') do ( - set "FULL_VERSION=%%j-%%k-%%l-%%m" - IF "%%j" == "1" ( - set "MAJOR_VERSION=%%k" - set "MINOR_VERSION=%%l" - ) else ( - set "MAJOR_VERSION=%%j" - set "MINOR_VERSION=%%k" - ) -) - -set JAVA_VERSION=%MAJOR_VERSION% - -IF NOT %JAVA_VERSION% == 8 ( - IF NOT %JAVA_VERSION% == 11 ( - echo IoTDB only supports jdk8 or jdk11, please check your java version. - goto finally - ) -) - -if "%OS%" == "Windows_NT" setlocal - -pushd %~dp0.. -if NOT DEFINED IOTDB_HOME set IOTDB_HOME=%cd% -popd - -set IOTDB_CONF=%IOTDB_HOME%\conf -set IOTDB_LOGS=%IOTDB_HOME%\logs - -@setlocal ENABLEDELAYEDEXPANSION ENABLEEXTENSIONS -set CONF_PARAMS=-r -set is_conf_path=false -for %%i in (%*) do ( - set CONF_PARAMS=!CONF_PARAMS! %%i -) - -if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.db.service.DataNode -if NOT DEFINED JAVA_HOME goto :err - -@REM ----------------------------------------------------------------------------- -@REM JVM Opts we'll use in legacy run or installation -set JAVA_OPTS=-ea^ - -Dlogback.configurationFile="%IOTDB_CONF%\logback-datanode.xml"^ - -DIOTDB_HOME="%IOTDB_HOME%"^ - -DTSFILE_HOME="%IOTDB_HOME%"^ - -DIOTDB_CONF="%IOTDB_CONF%" - -@REM ***** CLASSPATH library setting ***** -@REM Ensure that any user defined CLASSPATH variables are not used on startup -if EXIST "%IOTDB_HOME%\lib" (set CLASSPATH="%IOTDB_HOME%\lib\*") else set CLASSPATH="%IOTDB_HOME%\..\lib\*" - -@REM For each jar in the IOTDB_HOME lib directory call append to build the CLASSPATH variable. -set CLASSPATH=%CLASSPATH%;"%IOTDB_HOME%\lib\*" -set CLASSPATH=%CLASSPATH%;iotdb.db.service.DataNode -goto okClasspath - -:append -set CLASSPATH=%CLASSPATH%;%1 -goto :eof - -@REM ----------------------------------------------------------------------------- -:okClasspath - -rem echo CLASSPATH: %CLASSPATH% - -"%JAVA_HOME%\bin\java" %JAVA_OPTS% %IOTDB_HEAP_OPTS% -cp %CLASSPATH% %IOTDB_JMX_OPTS% %MAIN_CLASS% %CONF_PARAMS% -goto finally - -:err -echo JAVA_HOME environment variable must be set! -pause - - -@REM ----------------------------------------------------------------------------- -:finally - -pause - -ENDLOCAL \ No newline at end of file diff --git a/iotdb-core/datanode/src/assembly/resources/sbin/remove-datanode.sh b/iotdb-core/datanode/src/assembly/resources/sbin/remove-datanode.sh deleted file mode 100755 index ba27784ca3b3c..0000000000000 --- a/iotdb-core/datanode/src/assembly/resources/sbin/remove-datanode.sh +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/bash -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -if [ "$#" -eq 1 ] && [ "$1" == "--help" ]; then - echo "The script will remove a DataNode." - echo "Before removing a DataNode, ensure that the cluster has at least the number of data/schema replicas DataNodes." - echo "Usage:" - echo "Remove the DataNode with datanode_id" - echo "./sbin/remove-datanode.sh [datanode_id]" - echo "Remove the DataNode with address:port" - echo "./sbin/remove-datanode.sh [dn_rpc_address:dn_rpc_port]" - exit 0 -fi - -echo --------------------- -echo "Starting to remove a DataNode" -echo --------------------- - -source "$(dirname "$0")/iotdb-common.sh" - -#get_iotdb_include wil remove -D parameters -VARS=$(get_iotdb_include "$*") -checkAllVariables -eval set -- "$VARS" - -PARAMS="-r "$* - -#initEnv is in iotdb-common.sh -initEnv - -CLASSPATH="" -for f in ${IOTDB_HOME}/lib/*.jar; do - CLASSPATH=${CLASSPATH}":"$f -done - -classname=org.apache.iotdb.db.service.DataNode - -launch_service() -{ - class="$1" - iotdb_parms="-Dlogback.configurationFile=${IOTDB_LOG_CONFIG}" - iotdb_parms="$iotdb_parms -DIOTDB_HOME=${IOTDB_HOME}" - iotdb_parms="$iotdb_parms -DIOTDB_DATA_HOME=${IOTDB_DATA_HOME}" - iotdb_parms="$iotdb_parms -DTSFILE_HOME=${IOTDB_HOME}" - iotdb_parms="$iotdb_parms -DIOTDB_CONF=${IOTDB_CONF}" - iotdb_parms="$iotdb_parms -DTSFILE_CONF=${IOTDB_CONF}" - iotdb_parms="$iotdb_parms -Dname=iotdb\.IoTDB" - iotdb_parms="$iotdb_parms -DIOTDB_LOG_DIR=${IOTDB_LOG_DIR}" - - exec "$JAVA" $iotdb_parms $IOTDB_JMX_OPTS -cp "$CLASSPATH" "$class" $PARAMS - return $? -} - -# Start up the service -launch_service "$classname" - -exit $? - - diff --git a/iotdb-core/datanode/src/assembly/resources/sbin/start-datanode.bat b/iotdb-core/datanode/src/assembly/resources/sbin/start-datanode.bat index c6fd64e8fb34b..fcbce7ed4b01b 100755 --- a/iotdb-core/datanode/src/assembly/resources/sbin/start-datanode.bat +++ b/iotdb-core/datanode/src/assembly/resources/sbin/start-datanode.bat @@ -29,7 +29,7 @@ echo ```````````````````````` @REM ----------------------------------------------------------------------------- @REM SET JAVA -PATH %PATH%;%JAVA_HOME%\bin\ +set PATH="%JAVA_HOME%\bin\";%PATH% set "FULL_VERSION=" set "MAJOR_VERSION=" set "MINOR_VERSION=" diff --git a/iotdb-core/datanode/src/assembly/resources/sbin/stop-datanode.bat b/iotdb-core/datanode/src/assembly/resources/sbin/stop-datanode.bat index fec6924123462..251243e4c1e6d 100644 --- a/iotdb-core/datanode/src/assembly/resources/sbin/stop-datanode.bat +++ b/iotdb-core/datanode/src/assembly/resources/sbin/stop-datanode.bat @@ -44,8 +44,8 @@ for /f "eol=# tokens=2 delims==" %%i in ('findstr /i "^dn_rpc_port" ) if not defined dn_rpc_port ( - echo dn_rpc_port not found in the configuration file. Exiting. - exit /b 1 + echo "WARNING: dn_rpc_port not found in the configuration file. Using default value dn_rpc_port = 6667" + set dn_rpc_port=6667 ) echo Check whether the rpc_port is used..., port is %dn_rpc_port% @@ -56,8 +56,8 @@ for /f "eol=# tokens=2 delims==" %%i in ('findstr /i "dn_rpc_address" ) if not defined dn_rpc_address ( - echo dn_rpc_address not found in the configuration file. Exiting. - exit /b 1 + echo "WARNING: dn_rpc_address not found in the configuration file. Using default value dn_rpc_address = 0.0.0.0" + set dn_rpc_address=0.0.0.0 ) for /f "tokens=5" %%a in ('netstat /ano ^| findstr %dn_rpc_address%:%dn_rpc_port%') do ( diff --git a/iotdb-core/datanode/src/assembly/resources/sbin/stop-datanode.sh b/iotdb-core/datanode/src/assembly/resources/sbin/stop-datanode.sh index 94441cd6252a0..430367ec70850 100644 --- a/iotdb-core/datanode/src/assembly/resources/sbin/stop-datanode.sh +++ b/iotdb-core/datanode/src/assembly/resources/sbin/stop-datanode.sh @@ -27,6 +27,11 @@ else dn_rpc_port=`sed '/^dn_rpc_port=/!d;s/.*=//' ${DATANODE_CONF}/iotdb-datanode.properties` fi +if [ -z "$dn_rpc_port" ]; then + echo "WARNING: dn_rpc_port not found in the configuration file. Using default value dn_rpc_port=6667" + dn_rpc_port=6667 +fi + check_config_unique "dn_rpc_port" "$dn_rpc_port" force="" diff --git a/iotdb-core/datanode/src/assembly/resources/tools/tsfile/reset-resource-pipe-statistics.bat b/iotdb-core/datanode/src/assembly/resources/tools/tsfile/reset-resource-pipe-statistics.bat new file mode 100644 index 0000000000000..098072a4b4f19 --- /dev/null +++ b/iotdb-core/datanode/src/assembly/resources/tools/tsfile/reset-resource-pipe-statistics.bat @@ -0,0 +1,59 @@ +@REM +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM + +@echo off +echo ```````````````````````````````````````````````````````````````````````` +echo Starting Resetting the Pipe Related Statistics in TsFile Resources +echo ```````````````````````````````````````````````````````````````````````` + +if "%OS%" == "Windows_NT" setlocal + +pushd %~dp0..\.. +if NOT DEFINED IOTDB_HOME set IOTDB_HOME=%CD% +popd + +if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.db.tools.validate.TsFileResourcePipeStatisticsSetTool +if NOT DEFINED JAVA_HOME goto :err + +@REM ----------------------------------------------------------------------------- +@REM ***** CLASSPATH library setting ***** +@REM Ensure that any user defined CLASSPATH variables are not used on startup +set CLASSPATH="%IOTDB_HOME%\lib\*" + +goto okClasspath + +:append +set CLASSPATH=%CLASSPATH%;%1 +goto :eof + +@REM ----------------------------------------------------------------------------- +:okClasspath + +"%JAVA_HOME%\bin\java" -cp "%CLASSPATH%" %MAIN_CLASS% %* + +goto finally + +:err +echo JAVA_HOME environment variable must be set! +pause + +@REM ----------------------------------------------------------------------------- +:finally + +ENDLOCAL \ No newline at end of file diff --git a/iotdb-core/datanode/src/assembly/resources/tools/tsfile/reset-resource-pipe-statistics.sh b/iotdb-core/datanode/src/assembly/resources/tools/tsfile/reset-resource-pipe-statistics.sh new file mode 100644 index 0000000000000..5dc3fab86999e --- /dev/null +++ b/iotdb-core/datanode/src/assembly/resources/tools/tsfile/reset-resource-pipe-statistics.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +echo ------------------------------------------------------------------------------------ +echo Starting Resetting the Pipe Related Statistics in TsFile Resources +echo ------------------------------------------------------------------------------------ + +source "$(dirname "$0")/../../sbin/iotdb-common.sh" +#get_iotdb_include and checkAllVariables is in iotdb-common.sh +VARS=$(get_iotdb_include "$*") +checkAllVariables +export IOTDB_HOME="${IOTDB_HOME}/.." +eval set -- "$VARS" + +if [ -n "$JAVA_HOME" ]; then + for java in "$JAVA_HOME"/bin/amd64/java "$JAVA_HOME"/bin/java; do + if [ -x "$java" ]; then + JAVA="$java" + break + fi + done +else + JAVA=java +fi + +CLASSPATH="" +for f in ${IOTDB_HOME}/lib/*.jar; do + CLASSPATH=${CLASSPATH}":"$f +done + +MAIN_CLASS=org.apache.iotdb.db.tools.validate.TsFileResourcePipeStatisticsSetTool + +"$JAVA" -cp "$CLASSPATH" "$MAIN_CLASS" "$@" +exit $? diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/audit/AuditLogger.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/audit/AuditLogger.java index bb5f557ec0ef5..ed3efee9a1bfe 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/audit/AuditLogger.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/audit/AuditLogger.java @@ -210,6 +210,7 @@ private static AuditLogOperation judgeLogOperation(StatementType type) { case RENAME_LOGICAL_VIEW: case CREATE_TOPIC: case DROP_TOPIC: + case DROP_SUBSCRIPTION: return AuditLogOperation.DDL; case LOAD_DATA: case INSERT: diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/AuthorityChecker.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/AuthorityChecker.java index 395f1f42d8a01..aeb48149694c4 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/AuthorityChecker.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/AuthorityChecker.java @@ -209,6 +209,15 @@ public static boolean checkRole(String username, String rolename) { return authorityFetcher.get().checkRole(username, rolename); } + public static TSStatus checkSuperUserOrMaintain(String userName) { + if (AuthorityChecker.SUPER_USER.equals(userName)) { + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } + return AuthorityChecker.getTSStatus( + AuthorityChecker.checkSystemPermission(userName, PrivilegeType.MAINTAIN.ordinal()), + PrivilegeType.MAINTAIN); + } + public static void buildTSBlock( TAuthorizerResp authResp, SettableFuture future) { List types = new ArrayList<>(); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/ClusterAuthorityFetcher.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/ClusterAuthorityFetcher.java index 107561bdca317..703330399574a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/ClusterAuthorityFetcher.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/ClusterAuthorityFetcher.java @@ -34,6 +34,7 @@ import org.apache.iotdb.commons.exception.MetadataException; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PathPatternTree; +import org.apache.iotdb.commons.security.encrypt.AsymmetricEncrypt; import org.apache.iotdb.commons.utils.AuthUtils; import org.apache.iotdb.confignode.rpc.thrift.TAuthizedPatternTreeResp; import org.apache.iotdb.confignode.rpc.thrift.TAuthorizerReq; @@ -396,6 +397,10 @@ public TSStatus checkUser(String username, String password) { return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS); } else if (password != null && AuthUtils.validatePassword(password, user.getPassword())) { return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS); + } else if (password != null + && AuthUtils.validatePassword( + password, user.getPassword(), AsymmetricEncrypt.DigestAlgorithm.MD5)) { + return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS); } else { return RpcUtils.getStatus(TSStatusCode.WRONG_LOGIN_PASSWORD, "Authentication failed."); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java index 4c1dddfa97a01..1a3f87a1a836d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java @@ -31,6 +31,7 @@ import org.apache.iotdb.db.audit.AuditLogStorage; import org.apache.iotdb.db.exception.LoadConfigurationException; import org.apache.iotdb.db.protocol.thrift.impl.ClientRPCServiceImpl; +import org.apache.iotdb.db.queryengine.plan.relational.metadata.fetcher.cache.LastCacheLoadStrategy; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.constant.CrossCompactionPerformer; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.constant.InnerSeqCompactionPerformer; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.constant.InnerUnseqCompactionPerformer; @@ -40,6 +41,7 @@ import org.apache.iotdb.db.storageengine.dataregion.compaction.selector.constant.InnerUnsequenceCompactionSelector; import org.apache.iotdb.db.storageengine.dataregion.tsfile.timeindex.TimeIndexLevel; import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALMode; +import org.apache.iotdb.db.storageengine.load.disk.ILoadDiskSelector.LoadDiskSelectorType; import org.apache.iotdb.db.utils.datastructure.TVListSortAlgorithm; import org.apache.iotdb.metrics.config.MetricConfigDescriptor; import org.apache.iotdb.metrics.metricsets.system.SystemMetrics; @@ -143,7 +145,7 @@ public class IoTDBConfig { private int rpcMinConcurrentClientNum = Runtime.getRuntime().availableProcessors(); /** Max concurrent client number */ - private int rpcMaxConcurrentClientNum = 65535; + private int rpcMaxConcurrentClientNum = 1000; /** Memory allocated for the write process */ private long allocateMemoryForStorageEngine = Runtime.getRuntime().maxMemory() * 3 / 10; @@ -160,6 +162,8 @@ public class IoTDBConfig { /** Memory allocated for the pipe */ private long allocateMemoryForPipe = Runtime.getRuntime().maxMemory() / 10; + private long allocateMemoryPerWalCache = 512 * 1024; + /** Ratio of memory allocated for buffered arrays */ private double bufferedArraysMemoryProportion = 0.6; @@ -175,6 +179,9 @@ public class IoTDBConfig { /** The proportion of write memory for compaction */ private double compactionProportion = 0.2; + /** The proportion of memtable memory for WAL queue */ + private double walBufferQueueProportion = 0.1; + /** The proportion of memtable memory for device path cache */ private double devicePathCacheProportion = 0.05; @@ -219,9 +226,6 @@ public class IoTDBConfig { /** max total direct buffer off heap memory size proportion */ private double maxDirectBufferOffHeapMemorySizeProportion = 0.8; - /** Blocking queue capacity of each wal buffer */ - private int walBufferQueueCapacity = 500; - /** Size threshold of each wal file. Unit: byte */ private volatile long walFileSizeThresholdInByte = 30 * 1024 * 1024L; @@ -244,6 +248,13 @@ public class IoTDBConfig { /** The period when outdated wal files are periodically deleted. Unit: millisecond */ private volatile long deleteWalFilesPeriodInMs = 20 * 1000L; + /** + * Enables or disables the automatic clearing of the WAL cache when a memory compaction is + * triggered. When enabled, the WAL cache will be cleared to release memory during the compaction + * process. + */ + private volatile boolean WALCacheShrinkClearEnabled = true; + // endregion /** * The cycle when metadata log is periodically forced to be written to disk(in milliseconds) If @@ -303,6 +314,8 @@ public class IoTDBConfig { private String extPipeDir = IoTDBConstant.EXT_FOLDER_NAME + File.separator + IoTDBConstant.EXT_PIPE_FOLDER_NAME; + private int pipeTaskThreadCount = 5; + /** External lib directory for MQTT, stores user-uploaded JAR files */ private String mqttDir = IoTDBConstant.EXT_FOLDER_NAME + File.separator + IoTDBConstant.MQTT_FOLDER_NAME; @@ -420,8 +433,11 @@ public class IoTDBConfig { /** The sort algorithm used in TVList */ private TVListSortAlgorithm tvListSortAlgorithm = TVListSortAlgorithm.TIM; - /** When average series point number reaches this, flush the memtable to disk */ - private int avgSeriesPointNumberThreshold = 100000; + /** + * the threshold when working TVList is sorted and added into immutable TVList list in the + * writable memtable + */ + private int tvListSortThreshold = 0; /** Enable inner space compaction for sequence files */ private volatile boolean enableSeqSpaceCompaction = true; @@ -432,21 +448,27 @@ public class IoTDBConfig { /** Compact the unsequence files into the overlapped sequence files */ private volatile boolean enableCrossSpaceCompaction = true; + /** Enable auto repair compaction */ + private volatile boolean enableAutoRepairCompaction = true; + /** The buffer for sort operation */ - private long sortBufferSize = 1024 * 1024L; + private long sortBufferSize = 32 * 1024 * 1024L; + + /** Mods cache size limit per fi */ + private long modsCacheSizeLimitPerFI = 32 * 1024 * 1024; /** * The strategy of inner space compaction task. There are just one inner space compaction strategy * SIZE_TIRED_COMPACTION: */ private InnerSequenceCompactionSelector innerSequenceCompactionSelector = - InnerSequenceCompactionSelector.SIZE_TIERED; + InnerSequenceCompactionSelector.SIZE_TIERED_MULTI_TARGET; private InnerSeqCompactionPerformer innerSeqCompactionPerformer = InnerSeqCompactionPerformer.READ_CHUNK; private InnerUnsequenceCompactionSelector innerUnsequenceCompactionSelector = - InnerUnsequenceCompactionSelector.SIZE_TIERED; + InnerUnsequenceCompactionSelector.SIZE_TIERED_MULTI_TARGET; private InnerUnseqCompactionPerformer innerUnseqCompactionPerformer = InnerUnseqCompactionPerformer.FAST; @@ -469,13 +491,19 @@ public class IoTDBConfig { private double chunkMetadataSizeProportion = 0.1; + private long innerCompactionTotalFileSizeThresholdInByte = 10737418240L; + + private int innerCompactionTotalFileNumThreshold = 100; + + private int maxLevelGapInInnerCompaction = 2; + /** The target tsfile size in compaction, 2 GB by default */ private long targetCompactionFileSize = 2147483648L; - /** The target chunk size in compaction. */ - private long targetChunkSize = 1048576L; + /** The target chunk size in compaction and flushing. */ + private long targetChunkSize = 1600000L; - /** The target chunk point num in compaction. */ + /** The target chunk point num in compaction and flushing. */ private long targetChunkPointNum = 100000L; /** @@ -496,8 +524,11 @@ public class IoTDBConfig { */ private long compactionAcquireWriteLockTimeout = 60_000L; - /** The max candidate file num in one inner space compaction task */ - private volatile int fileLimitPerInnerTask = 30; + /** + * When the number of selected files reaches this value, the conditions for constructing a merge + * task are met. + */ + private volatile int innerCompactionCandidateFileNum = 30; /** The max candidate file num in one cross space compaction task */ private volatile int fileLimitPerCrossTask = 500; @@ -517,9 +548,6 @@ public class IoTDBConfig { /** The interval of compaction task schedulation in each virtual database. The unit is ms. */ private long compactionScheduleIntervalInMs = 60_000L; - /** The interval of ttl check task in each database. The unit is ms. Default is 2 hours. */ - private long ttlCheckInterval = 7_200_000L; - /** The number of threads to be set up to check ttl. */ private int ttlCheckerNum = 1; @@ -562,9 +590,6 @@ public class IoTDBConfig { */ private volatile double innerCompactionTaskSelectionDiskRedundancy = 0.05; - /** The size of global compaction estimation file info cahce. */ - private int globalCompactionFileInfoCacheSize = 1000; - /** whether to cache meta data(ChunkMetaData and TsFileMetaData) or not. */ private boolean metaDataCacheEnable = true; @@ -718,6 +743,12 @@ public class IoTDBConfig { */ private int compactionThreadCount = 10; + /** + * How many chunk will be compact in aligned series compaction, 10 by default. Set to + * Integer.MAX_VALUE when less than or equal to 0. + */ + private int compactionMaxAlignedSeriesNumInOneBatch = 10; + /* * How many thread will be set up to perform continuous queries. When <= 0, use max(1, CPU core number / 2). */ @@ -1017,13 +1048,15 @@ public class IoTDBConfig { /** Policy of DataNodeSchemaCache eviction */ private String dataNodeSchemaCacheEvictionPolicy = "FIFO"; + private int schemaThreadCount = 5; + private String readConsistencyLevel = "strong"; /** Maximum execution time of a DriverTask */ private int driverTaskExecutionTimeSliceInMs = 200; /** Maximum size of wal buffer used in IoTConsensus. Unit: byte */ - private long throttleThreshold = 50 * 1024 * 1024 * 1024L; + private long throttleThreshold = 200 * 1024 * 1024 * 1024L; /** Maximum wait time of write cache in IoTConsensus. Unit: ms */ private long cacheWindowTimeInMs = 10 * 1000L; @@ -1103,7 +1136,7 @@ public class IoTDBConfig { private int maxSizePerBatch = 16 * 1024 * 1024; private int maxPendingBatchesNum = 5; private double maxMemoryRatioForQueue = 0.6; - private long regionMigrationSpeedLimitBytesPerSecond = 32 * 1024 * 1024L; + private long regionMigrationSpeedLimitBytesPerSecond = 48 * 1024 * 1024L; // PipeConsensus Config private int pipeConsensusPipelineSize = 5; @@ -1111,17 +1144,61 @@ public class IoTDBConfig { /** Load related */ private double maxAllocateMemoryRatioForLoad = 0.8; + private int loadTsFileAnalyzeSchemaBatchReadTimeSeriesMetadataCount = 4096; private int loadTsFileAnalyzeSchemaBatchFlushTimeSeriesNumber = 4096; private long loadTsFileAnalyzeSchemaMemorySizeInBytes = 0L; // 0 means that the decision will be adaptive based on the number of sequences + private long loadTsFileTabletConversionBatchMemorySizeInBytes = 4096 * 1024; + + private long loadChunkMetadataMemorySizeInBytes = 33554432; // 32MB + private long loadMemoryAllocateRetryIntervalMs = 1000L; private int loadMemoryAllocateMaxRetries = 5; private long loadCleanupTaskExecutionDelayTimeSeconds = 1800L; // 30 min + private int loadTsFileRetryCountOnRegionChange = 10; + private double loadWriteThroughputBytesPerSecond = -1; // Bytes/s + private long loadTabletConversionThresholdBytes = -1; + + private boolean loadActiveListeningEnable = true; + + private String[] loadActiveListeningDirs = + new String[] { + IoTDBConstant.EXT_FOLDER_NAME + + File.separator + + IoTDBConstant.LOAD_TSFILE_FOLDER_NAME + + File.separator + + IoTDBConstant.LOAD_TSFILE_ACTIVE_LISTENING_PENDING_FOLDER_NAME + }; + + private String loadActiveListeningPipeDir = + IoTDBConstant.EXT_FOLDER_NAME + + File.separator + + IoTDBConstant.LOAD_TSFILE_FOLDER_NAME + + File.separator + + IoTDBConstant.PIPE_FOLDER_NAME; + + private String loadActiveListeningFailDir = + IoTDBConstant.EXT_FOLDER_NAME + + File.separator + + IoTDBConstant.LOAD_TSFILE_FOLDER_NAME + + File.separator + + IoTDBConstant.LOAD_TSFILE_ACTIVE_LISTENING_FAILED_FOLDER_NAME; + private long loadActiveListeningCheckIntervalSeconds = 5L; + + private int loadActiveListeningMaxThreadNum = Runtime.getRuntime().availableProcessors(); + + private boolean loadActiveListeningVerifyEnable = true; + + private String loadDiskSelectStrategy = LoadDiskSelectorType.MIN_IO_FIRST.getValue(); + + private String loadDiskSelectStrategyForIoTV2AndPipe = + LoadDiskSelectorType.INHERIT_LOAD.getValue(); + /** Pipe related */ /** initialized as empty, updated based on the latest `systemDir` during querying */ private String[] pipeReceiverFileDirs = new String[0]; @@ -1140,6 +1217,17 @@ public class IoTDBConfig { private CompressionType WALCompressionAlgorithm = CompressionType.LZ4; + private LastCacheLoadStrategy lastCacheLoadStrategy = LastCacheLoadStrategy.UPDATE; + + /** + * Whether to cache last values when constructing TsFileResource during LOAD. When set to true, + * blob series will be forcibly ignored even if lastCacheLoadStrategy = + * LastCacheLoadStrategy.UPDATE. + */ + private boolean cacheLastValuesForLoad = true; + + private long cacheLastValuesMemoryBudgetInByte = 4 * 1024 * 1024; + IoTDBConfig() {} public int getMaxLogEntriesNumPerBatch() { @@ -1287,6 +1375,11 @@ private void formulateFolders() { schemaRegionConsensusDir = addDataHomeDir(schemaRegionConsensusDir); indexRootFolder = addDataHomeDir(indexRootFolder); extDir = addDataHomeDir(extDir); + for (int i = 0; i < loadActiveListeningDirs.length; i++) { + loadActiveListeningDirs[i] = addDataHomeDir(loadActiveListeningDirs[i]); + } + loadActiveListeningPipeDir = addDataHomeDir(loadActiveListeningPipeDir); + loadActiveListeningFailDir = addDataHomeDir(loadActiveListeningFailDir); udfDir = addDataHomeDir(udfDir); udfTemporaryLibDir = addDataHomeDir(udfTemporaryLibDir); triggerDir = addDataHomeDir(triggerDir); @@ -1372,7 +1465,7 @@ void reloadSystemMetrics() { // if IOTDB_DATA_HOME is not set, then we keep dataHomeDir prefix being the same with IOTDB_HOME // In this way, we can keep consistent with v0.13.0~2. - private String addDataHomeDir(String dir) { + public static String addDataHomeDir(final String dir) { String dataHomeDir = System.getProperty(IoTDBConstant.IOTDB_DATA_HOME, null); if (dataHomeDir == null) { dataHomeDir = System.getProperty(IoTDBConstant.IOTDB_HOME, null); @@ -1891,14 +1984,6 @@ public void setMaxDirectBufferOffHeapMemorySizeProportion( this.maxDirectBufferOffHeapMemorySizeProportion = maxDirectBufferOffHeapMemorySizeProportion; } - public int getWalBufferQueueCapacity() { - return walBufferQueueCapacity; - } - - void setWalBufferQueueCapacity(int walBufferQueueCapacity) { - this.walBufferQueueCapacity = walBufferQueueCapacity; - } - public long getWalFileSizeThresholdInByte() { return walFileSizeThresholdInByte; } @@ -1947,6 +2032,14 @@ void setDeleteWalFilesPeriodInMs(long deleteWalFilesPeriodInMs) { this.deleteWalFilesPeriodInMs = deleteWalFilesPeriodInMs; } + public boolean getWALCacheShrinkClearEnabled() { + return WALCacheShrinkClearEnabled; + } + + void setWALCacheShrinkClearEnabled(boolean WALCacheShrinkClearEnabled) { + this.WALCacheShrinkClearEnabled = WALCacheShrinkClearEnabled; + } + public boolean isChunkBufferPoolEnable() { return chunkBufferPoolEnable; } @@ -2048,6 +2141,14 @@ public void setAllocateMemoryForPipe(long allocateMemoryForPipe) { this.allocateMemoryForPipe = allocateMemoryForPipe; } + public long getAllocateMemoryPerWalCache() { + return allocateMemoryPerWalCache; + } + + public void setAllocateMemoryPerWalCache(final long allocateMemoryForWalCache) { + this.allocateMemoryPerWalCache = allocateMemoryForWalCache; + } + public long getAllocateMemoryForFree() { return Runtime.getRuntime().maxMemory() - allocateMemoryForStorageEngine @@ -2079,6 +2180,15 @@ public void setCompactionThreadCount(int compactionThreadCount) { this.compactionThreadCount = compactionThreadCount; } + public int getCompactionMaxAlignedSeriesNumInOneBatch() { + return compactionMaxAlignedSeriesNumInOneBatch; + } + + public void setCompactionMaxAlignedSeriesNumInOneBatch( + int compactionMaxAlignedSeriesNumInOneBatch) { + this.compactionMaxAlignedSeriesNumInOneBatch = compactionMaxAlignedSeriesNumInOneBatch; + } + public int getContinuousQueryThreadNum() { return continuousQueryThreadNum; } @@ -2199,12 +2309,12 @@ public void setTvListSortAlgorithm(TVListSortAlgorithm tvListSortAlgorithm) { this.tvListSortAlgorithm = tvListSortAlgorithm; } - public int getAvgSeriesPointNumberThreshold() { - return avgSeriesPointNumberThreshold; + public int getTvListSortThreshold() { + return tvListSortThreshold; } - public void setAvgSeriesPointNumberThreshold(int avgSeriesPointNumberThreshold) { - this.avgSeriesPointNumberThreshold = avgSeriesPointNumberThreshold; + public void setTVListSortThreshold(int tvListSortThreshold) { + this.tvListSortThreshold = tvListSortThreshold; } public boolean isRpcThriftCompressionEnable() { @@ -2363,7 +2473,19 @@ public int getDefaultStorageGroupLevel() { return defaultStorageGroupLevel; } - void setDefaultStorageGroupLevel(int defaultStorageGroupLevel) { + void setDefaultStorageGroupLevel(int defaultStorageGroupLevel, boolean startUp) { + if (defaultStorageGroupLevel < 1) { + if (startUp) { + logger.warn( + "Illegal defaultStorageGroupLevel: {}, should >= 1, use default value 1", + defaultStorageGroupLevel); + defaultStorageGroupLevel = 1; + } else { + throw new IllegalArgumentException( + String.format( + "Illegal defaultStorageGroupLevel: %d, should >= 1", defaultStorageGroupLevel)); + } + } this.defaultStorageGroupLevel = defaultStorageGroupLevel; } @@ -2790,6 +2912,14 @@ public void setEnableCrossSpaceCompaction(boolean enableCrossSpaceCompaction) { this.enableCrossSpaceCompaction = enableCrossSpaceCompaction; } + public boolean isEnableAutoRepairCompaction() { + return enableAutoRepairCompaction; + } + + public void setEnableAutoRepairCompaction(boolean enableAutoRepairCompaction) { + this.enableAutoRepairCompaction = enableAutoRepairCompaction; + } + public InnerSequenceCompactionSelector getInnerSequenceCompactionSelector() { return innerSequenceCompactionSelector; } @@ -2858,6 +2988,31 @@ public void setTargetCompactionFileSize(long targetCompactionFileSize) { this.targetCompactionFileSize = targetCompactionFileSize; } + public int getMaxLevelGapInInnerCompaction() { + return maxLevelGapInInnerCompaction; + } + + public void setMaxLevelGapInInnerCompaction(int maxLevelGapInInnerCompaction) { + this.maxLevelGapInInnerCompaction = maxLevelGapInInnerCompaction; + } + + public long getInnerCompactionTotalFileSizeThresholdInByte() { + return innerCompactionTotalFileSizeThresholdInByte; + } + + public void setInnerCompactionTotalFileSizeThresholdInByte( + long innerCompactionTotalFileSizeThresholdInByte) { + this.innerCompactionTotalFileSizeThresholdInByte = innerCompactionTotalFileSizeThresholdInByte; + } + + public int getInnerCompactionTotalFileNumThreshold() { + return innerCompactionTotalFileNumThreshold; + } + + public void setInnerCompactionTotalFileNumThreshold(int innerCompactionTotalFileNumThreshold) { + this.innerCompactionTotalFileNumThreshold = innerCompactionTotalFileNumThreshold; + } + public long getTargetChunkSize() { return targetChunkSize; } @@ -2906,18 +3061,10 @@ public void setCompactionScheduleIntervalInMs(long compactionScheduleIntervalInM this.compactionScheduleIntervalInMs = compactionScheduleIntervalInMs; } - public long getTTlCheckInterval() { - return ttlCheckInterval; - } - public int getTTlCheckerNum() { return ttlCheckerNum; } - public void setTtlCheckInterval(long ttlCheckInterval) { - this.ttlCheckInterval = ttlCheckInterval; - } - public long getMaxExpiredTime() { return maxExpiredTime; } @@ -2934,12 +3081,12 @@ public void setExpiredDataRatio(float expiredDataRatio) { this.expiredDataRatio = expiredDataRatio; } - public int getFileLimitPerInnerTask() { - return fileLimitPerInnerTask; + public int getInnerCompactionCandidateFileNum() { + return innerCompactionCandidateFileNum; } - public void setFileLimitPerInnerTask(int fileLimitPerInnerTask) { - this.fileLimitPerInnerTask = fileLimitPerInnerTask; + public void setInnerCompactionCandidateFileNum(int innerCompactionCandidateFileNum) { + this.innerCompactionCandidateFileNum = innerCompactionCandidateFileNum; } public int getFileLimitPerCrossTask() { @@ -3200,6 +3347,14 @@ public void setExtPipeDir(String extPipeDir) { this.extPipeDir = extPipeDir; } + public int getPipeTaskThreadCount() { + return pipeTaskThreadCount; + } + + public void setPipeTaskThreadCount(int pipeTaskThreadCount) { + this.pipeTaskThreadCount = pipeTaskThreadCount; + } + public void setPartitionCacheSize(int partitionCacheSize) { this.partitionCacheSize = partitionCacheSize; } @@ -3336,6 +3491,14 @@ public void setDataNodeSchemaCacheEvictionPolicy(String dataNodeSchemaCacheEvict this.dataNodeSchemaCacheEvictionPolicy = dataNodeSchemaCacheEvictionPolicy; } + public int getSchemaThreadCount() { + return schemaThreadCount; + } + + public void setSchemaThreadCount(int schemaThreadCount) { + this.schemaThreadCount = schemaThreadCount; + } + public String getReadConsistencyLevel() { return readConsistencyLevel; } @@ -3364,6 +3527,14 @@ public double getCompactionProportion() { return compactionProportion; } + public double getWalBufferQueueProportion() { + return walBufferQueueProportion; + } + + public void setWalBufferQueueProportion(double walBufferQueueProportion) { + this.walBufferQueueProportion = walBufferQueueProportion; + } + public double getDevicePathCacheProportion() { return devicePathCacheProportion; } @@ -3778,14 +3949,6 @@ public void setCandidateCompactionTaskQueueSize(int candidateCompactionTaskQueue this.candidateCompactionTaskQueueSize = candidateCompactionTaskQueueSize; } - public int getGlobalCompactionFileInfoCacheSize() { - return globalCompactionFileInfoCacheSize; - } - - public void setGlobalCompactionFileInfoCacheSize(int globalCompactionFileInfoCacheSize) { - this.globalCompactionFileInfoCacheSize = globalCompactionFileInfoCacheSize; - } - public boolean isEnableAuditLog() { return enableAuditLog; } @@ -3834,6 +3997,16 @@ public void setMaxAllocateMemoryRatioForLoad(double maxAllocateMemoryRatioForLoa this.maxAllocateMemoryRatioForLoad = maxAllocateMemoryRatioForLoad; } + public int getLoadTsFileAnalyzeSchemaBatchReadTimeSeriesMetadataCount() { + return loadTsFileAnalyzeSchemaBatchReadTimeSeriesMetadataCount; + } + + public void setLoadTsFileAnalyzeSchemaBatchReadTimeSeriesMetadataCount( + int loadTsFileAnalyzeSchemaBatchReadTimeSeriesMetadataCount) { + this.loadTsFileAnalyzeSchemaBatchReadTimeSeriesMetadataCount = + loadTsFileAnalyzeSchemaBatchReadTimeSeriesMetadataCount; + } + public int getLoadTsFileAnalyzeSchemaBatchFlushTimeSeriesNumber() { return loadTsFileAnalyzeSchemaBatchFlushTimeSeriesNumber; } @@ -3853,6 +4026,24 @@ public void setLoadTsFileAnalyzeSchemaMemorySizeInBytes( this.loadTsFileAnalyzeSchemaMemorySizeInBytes = loadTsFileAnalyzeSchemaMemorySizeInBytes; } + public long getLoadTsFileTabletConversionBatchMemorySizeInBytes() { + return loadTsFileTabletConversionBatchMemorySizeInBytes; + } + + public void setLoadTsFileTabletConversionBatchMemorySizeInBytes( + long loadTsFileTabletConversionBatchMemorySizeInBytes) { + this.loadTsFileTabletConversionBatchMemorySizeInBytes = + loadTsFileTabletConversionBatchMemorySizeInBytes; + } + + public long getLoadChunkMetadataMemorySizeInBytes() { + return loadChunkMetadataMemorySizeInBytes; + } + + public void setLoadChunkMetadataMemorySizeInBytes(long loadChunkMetadataMemorySizeInBytes) { + this.loadChunkMetadataMemorySizeInBytes = loadChunkMetadataMemorySizeInBytes; + } + public long getLoadMemoryAllocateRetryIntervalMs() { return loadMemoryAllocateRetryIntervalMs; } @@ -3878,6 +4069,14 @@ public void setLoadCleanupTaskExecutionDelayTimeSeconds( this.loadCleanupTaskExecutionDelayTimeSeconds = loadCleanupTaskExecutionDelayTimeSeconds; } + public int getLoadTsFileRetryCountOnRegionChange() { + return loadTsFileRetryCountOnRegionChange; + } + + public void setLoadTsFileRetryCountOnRegionChange(int loadTsFileRetryCountOnRegionChange) { + this.loadTsFileRetryCountOnRegionChange = loadTsFileRetryCountOnRegionChange; + } + public double getLoadWriteThroughputBytesPerSecond() { return loadWriteThroughputBytesPerSecond; } @@ -3886,6 +4085,106 @@ public void setLoadWriteThroughputBytesPerSecond(double loadWriteThroughputBytes this.loadWriteThroughputBytesPerSecond = loadWriteThroughputBytesPerSecond; } + public long getLoadTabletConversionThresholdBytes() { + return loadTabletConversionThresholdBytes; + } + + public void setLoadTabletConversionThresholdBytes(long loadTabletConversionThresholdBytes) { + this.loadTabletConversionThresholdBytes = loadTabletConversionThresholdBytes; + } + + public int getLoadActiveListeningMaxThreadNum() { + return loadActiveListeningMaxThreadNum; + } + + public void setLoadActiveListeningMaxThreadNum(int loadActiveListeningMaxThreadNum) { + this.loadActiveListeningMaxThreadNum = loadActiveListeningMaxThreadNum; + } + + public boolean isLoadActiveListeningVerifyEnable() { + return loadActiveListeningVerifyEnable; + } + + public void setLoadActiveListeningVerifyEnable(boolean loadActiveListeningVerifyEnable) { + this.loadActiveListeningVerifyEnable = loadActiveListeningVerifyEnable; + } + + public String getLoadDiskSelectStrategy() { + return loadDiskSelectStrategy; + } + + public void setLoadDiskSelectStrategy(String loadDiskSelectStrategy) { + this.loadDiskSelectStrategy = loadDiskSelectStrategy; + } + + public String getLoadDiskSelectStrategyForIoTV2AndPipe() { + return LoadDiskSelectorType.INHERIT_LOAD + .getValue() + .equals(loadDiskSelectStrategyForIoTV2AndPipe) + ? getLoadDiskSelectStrategy() + : loadDiskSelectStrategyForIoTV2AndPipe; + } + + public void setLoadDiskSelectStrategyForIoTV2AndPipe( + String loadDiskSelectStrategyForIoTV2AndPipe) { + this.loadDiskSelectStrategyForIoTV2AndPipe = loadDiskSelectStrategyForIoTV2AndPipe; + } + + public long getLoadActiveListeningCheckIntervalSeconds() { + return loadActiveListeningCheckIntervalSeconds; + } + + public void setLoadActiveListeningCheckIntervalSeconds( + long loadActiveListeningCheckIntervalSeconds) { + this.loadActiveListeningCheckIntervalSeconds = loadActiveListeningCheckIntervalSeconds; + } + + public String getLoadActiveListeningFailDir() { + return loadActiveListeningFailDir == null || Objects.equals(loadActiveListeningFailDir, "") + ? extDir + + File.separator + + IoTDBConstant.LOAD_TSFILE_FOLDER_NAME + + File.separator + + IoTDBConstant.LOAD_TSFILE_ACTIVE_LISTENING_FAILED_FOLDER_NAME + : loadActiveListeningFailDir; + } + + public void setLoadActiveListeningFailDir(String loadActiveListeningFailDir) { + this.loadActiveListeningFailDir = addDataHomeDir(loadActiveListeningFailDir); + } + + public String getLoadActiveListeningPipeDir() { + return loadActiveListeningPipeDir; + } + + public String[] getLoadActiveListeningDirs() { + return (Objects.isNull(this.loadActiveListeningDirs) + || this.loadActiveListeningDirs.length == 0) + ? new String[] { + extDir + + File.separator + + IoTDBConstant.LOAD_TSFILE_FOLDER_NAME + + File.separator + + IoTDBConstant.LOAD_TSFILE_ACTIVE_LISTENING_PENDING_FOLDER_NAME + } + : this.loadActiveListeningDirs; + } + + public void setLoadActiveListeningDirs(String[] loadActiveListeningDirs) { + for (int i = 0; i < loadActiveListeningDirs.length; i++) { + loadActiveListeningDirs[i] = addDataHomeDir(loadActiveListeningDirs[i]); + } + this.loadActiveListeningDirs = loadActiveListeningDirs; + } + + public boolean getLoadActiveListeningEnable() { + return loadActiveListeningEnable; + } + + public void setLoadActiveListeningEnable(boolean loadActiveListeningEnable) { + this.loadActiveListeningEnable = loadActiveListeningEnable; + } + public void setPipeReceiverFileDirs(String[] pipeReceiverFileDirs) { this.pipeReceiverFileDirs = pipeReceiverFileDirs; } @@ -3939,6 +4238,14 @@ public long getSortBufferSize() { return sortBufferSize; } + public void setModsCacheSizeLimitPerFI(long modsCacheSizeLimitPerFI) { + this.modsCacheSizeLimitPerFI = modsCacheSizeLimitPerFI; + } + + public long getModsCacheSizeLimitPerFI() { + return modsCacheSizeLimitPerFI; + } + public void setSortTmpDir(String sortTmpDir) { this.sortTmpDir = sortTmpDir; } @@ -4015,4 +4322,30 @@ public CompressionType getWALCompressionAlgorithm() { public void setWALCompressionAlgorithm(CompressionType WALCompressionAlgorithm) { this.WALCompressionAlgorithm = WALCompressionAlgorithm; } + + public LastCacheLoadStrategy getLastCacheLoadStrategy() { + return lastCacheLoadStrategy; + } + + public void setLastCacheLoadStrategy(LastCacheLoadStrategy lastCacheLoadStrategy) { + this.lastCacheLoadStrategy = lastCacheLoadStrategy; + } + + public boolean isCacheLastValuesForLoad() { + return (lastCacheLoadStrategy == LastCacheLoadStrategy.UPDATE + || lastCacheLoadStrategy == LastCacheLoadStrategy.UPDATE_NO_BLOB) + && cacheLastValuesForLoad; + } + + public void setCacheLastValuesForLoad(boolean cacheLastValuesForLoad) { + this.cacheLastValuesForLoad = cacheLastValuesForLoad; + } + + public long getCacheLastValuesMemoryBudgetInByte() { + return cacheLastValuesMemoryBudgetInByte; + } + + public void setCacheLastValuesMemoryBudgetInByte(long cacheLastValuesMemoryBudgetInByte) { + this.cacheLastValuesMemoryBudgetInByte = cacheLastValuesMemoryBudgetInByte; + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java index 564c146bc9430..b8b29fad99cb6 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java @@ -18,11 +18,14 @@ */ package org.apache.iotdb.db.conf; +import org.apache.iotdb.commons.binaryallocator.BinaryAllocator; import org.apache.iotdb.commons.conf.CommonConfig; import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.conf.ConfigurationFileUtils; import org.apache.iotdb.commons.conf.IoTDBConstant; +import org.apache.iotdb.commons.conf.TrimProperties; import org.apache.iotdb.commons.exception.BadNodeUrlException; +import org.apache.iotdb.commons.pipe.config.PipeDescriptor; import org.apache.iotdb.commons.schema.SchemaConstant; import org.apache.iotdb.commons.service.metric.MetricService; import org.apache.iotdb.commons.utils.NodeUrlUtils; @@ -31,6 +34,7 @@ import org.apache.iotdb.confignode.rpc.thrift.TRatisConfig; import org.apache.iotdb.db.consensus.DataRegionConsensusImpl; import org.apache.iotdb.db.exception.query.QueryProcessException; +import org.apache.iotdb.db.queryengine.plan.relational.metadata.fetcher.cache.LastCacheLoadStrategy; import org.apache.iotdb.db.service.metrics.IoTDBInternalLocalReporter; import org.apache.iotdb.db.storageengine.StorageEngine; import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.constant.CrossCompactionPerformer; @@ -44,6 +48,7 @@ import org.apache.iotdb.db.storageengine.dataregion.compaction.selector.constant.InnerUnsequenceCompactionSelector; import org.apache.iotdb.db.storageengine.dataregion.wal.WALManager; import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALMode; +import org.apache.iotdb.db.storageengine.load.disk.ILoadDiskSelector; import org.apache.iotdb.db.storageengine.rescon.disk.TierManager; import org.apache.iotdb.db.storageengine.rescon.memory.SystemInfo; import org.apache.iotdb.db.utils.DateTimeUtils; @@ -85,6 +90,8 @@ import java.util.Properties; import java.util.ServiceLoader; import java.util.Set; +import java.util.function.LongConsumer; +import java.util.regex.Pattern; public class IoTDBDescriptor { @@ -102,6 +109,10 @@ public class IoTDBDescriptor { private static final double MIN_DIR_USE_PROPORTION = 0.5; + private static final String[] DEFAULT_WAL_THRESHOLD_NAME = { + "iot_consensus_throttle_threshold_in_byte", "wal_throttle_threshold_in_byte" + }; + static { URL systemConfigUrl = getPropsUrl(CommonConfig.SYSTEM_CONFIG_NAME); URL configNodeUrl = getPropsUrl(CommonConfig.OLD_CONFIG_NODE_CONFIG_NAME); @@ -125,8 +136,10 @@ protected IoTDBDescriptor() { for (IPropertiesLoader loader : propertiesLoaderServiceLoader) { LOGGER.info("Will reload properties from {} ", loader.getClass().getName()); Properties properties = loader.loadProperties(); + TrimProperties trimProperties = new TrimProperties(); + trimProperties.putAll(properties); try { - loadProperties(properties); + loadProperties(trimProperties); } catch (Exception e) { LOGGER.error( "Failed to reload properties from {}, reject DataNode startup.", @@ -193,13 +206,13 @@ else if (!urlString.endsWith(".properties")) { /** load a property file and set TsfileDBConfig variables. */ @SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity warning private void loadProps() { - Properties commonProperties = new Properties(); + TrimProperties commonProperties = new TrimProperties(); // if new properties file exist, skip old properties files URL url = getPropsUrl(CommonConfig.SYSTEM_CONFIG_NAME); if (url != null) { try (InputStream inputStream = url.openStream()) { LOGGER.info("Start to read config file {}", url); - Properties properties = new Properties(); + Properties properties = new TrimProperties(); properties.load(new InputStreamReader(inputStream, StandardCharsets.UTF_8)); commonProperties.putAll(properties); loadProperties(commonProperties); @@ -228,7 +241,7 @@ private void loadProps() { } } - public void loadProperties(Properties properties) throws BadNodeUrlException, IOException { + public void loadProperties(TrimProperties properties) throws BadNodeUrlException, IOException { conf.setClusterName( properties.getProperty(IoTDBConstant.CLUSTER_NAME, conf.getClusterName()).trim()); @@ -307,6 +320,15 @@ public void loadProperties(Properties properties) throws BadNodeUrlException, IO .getProperty("reject_proportion", Double.toString(conf.getRejectProportion())) .trim()); + final double walBufferQueueProportion = + Double.parseDouble( + Optional.ofNullable( + properties.getProperty( + "wal_buffer_queue_proportion", + Double.toString(conf.getWalBufferQueueProportion()))) + .map(String::trim) + .orElse(Double.toString(conf.getWalBufferQueueProportion()))); + final double devicePathCacheProportion = Double.parseDouble( properties @@ -315,11 +337,12 @@ public void loadProperties(Properties properties) throws BadNodeUrlException, IO Double.toString(conf.getDevicePathCacheProportion())) .trim()); - if (rejectProportion + devicePathCacheProportion >= 1) { + if (rejectProportion + walBufferQueueProportion + devicePathCacheProportion >= 1) { LOGGER.warn( - "The sum of write_memory_proportion and device_path_cache_proportion is too large, use default values 0.8 and 0.05."); + "The sum of reject_proportion, wal_buffer_queue_proportion and device_path_cache_proportion is too large, use default values 0.8, 0.1 and 0.05."); } else { conf.setRejectProportion(rejectProportion); + conf.setWalBufferQueueProportion(walBufferQueueProportion); conf.setDevicePathCacheProportion(devicePathCacheProportion); } @@ -394,11 +417,10 @@ public void loadProperties(Properties properties) throws BadNodeUrlException, IO properties.getProperty( "tvlist_sort_algorithm", conf.getTvListSortAlgorithm().toString()))); - conf.setAvgSeriesPointNumberThreshold( + conf.setTVListSortThreshold( Integer.parseInt( properties.getProperty( - "avg_series_point_number_threshold", - Integer.toString(conf.getAvgSeriesPointNumberThreshold())))); + "tvlist_sort_threshold", Integer.toString(conf.getTvListSortThreshold())))); conf.setCheckPeriodWhenInsertBlocked( Integer.parseInt( @@ -431,6 +453,12 @@ public void loadProperties(Properties properties) throws BadNodeUrlException, IO "compaction_schedule_interval_in_ms", Long.toString(conf.getCompactionScheduleIntervalInMs())))); + conf.setEnableAutoRepairCompaction( + Boolean.parseBoolean( + properties.getProperty( + "enable_auto_repair_compaction", + Boolean.toString(conf.isEnableAutoRepairCompaction())))); + conf.setEnableCrossSpaceCompaction( Boolean.parseBoolean( properties.getProperty( @@ -601,12 +629,29 @@ public void loadProperties(Properties properties) throws BadNodeUrlException, IO } conf.setMergeIntervalSec( Long.parseLong( - properties.getProperty( - "merge_interval_sec", Long.toString(conf.getMergeIntervalSec())))); - conf.setCompactionThreadCount( - Integer.parseInt( - properties.getProperty( - "compaction_thread_count", Integer.toString(conf.getCompactionThreadCount())))); + Optional.ofNullable( + properties.getProperty( + "merge_interval_sec", Long.toString(conf.getMergeIntervalSec()))) + .map(String::trim) + .orElse(Long.toString(conf.getMergeIntervalSec())))); + int compactionThreadCount = + Integer.parseInt( + Optional.ofNullable( + properties.getProperty( + "compaction_thread_count", + Integer.toString(conf.getCompactionThreadCount()))) + .map(String::trim) + .orElse(Integer.toString(conf.getCompactionThreadCount()))); + conf.setCompactionThreadCount(compactionThreadCount <= 0 ? 1 : compactionThreadCount); + int maxConcurrentAlignedSeriesInCompaction = + Integer.parseInt( + properties.getProperty( + "compaction_max_aligned_series_num_in_one_batch", + Integer.toString(conf.getCompactionMaxAlignedSeriesNumInOneBatch()))); + conf.setCompactionMaxAlignedSeriesNumInOneBatch( + maxConcurrentAlignedSeriesInCompaction <= 0 + ? Integer.MAX_VALUE + : maxConcurrentAlignedSeriesInCompaction); conf.setChunkMetadataSizeProportion( Double.parseDouble( properties.getProperty( @@ -616,6 +661,22 @@ public void loadProperties(Properties properties) throws BadNodeUrlException, IO Long.parseLong( properties.getProperty( "target_compaction_file_size", Long.toString(conf.getTargetCompactionFileSize())))); + conf.setInnerCompactionTotalFileSizeThresholdInByte( + Long.parseLong( + properties.getProperty( + "inner_compaction_total_file_size_threshold", + Long.toString(conf.getInnerCompactionTotalFileSizeThresholdInByte())))); + conf.setInnerCompactionTotalFileNumThreshold( + Integer.parseInt( + properties.getProperty( + "inner_compaction_total_file_num_threshold", + Integer.toString(conf.getInnerCompactionTotalFileNumThreshold())))); + conf.setMaxLevelGapInInnerCompaction( + Integer.parseInt( + properties.getProperty( + "max_level_gap_in_inner_compaction", + Integer.toString(conf.getMaxLevelGapInInnerCompaction())))); + conf.setTargetChunkSize( Long.parseLong( properties.getProperty("target_chunk_size", Long.toString(conf.getTargetChunkSize())))); @@ -633,11 +694,11 @@ public void loadProperties(Properties properties) throws BadNodeUrlException, IO properties.getProperty( "chunk_size_lower_bound_in_compaction", Long.toString(conf.getChunkSizeLowerBoundInCompaction())))); - conf.setFileLimitPerInnerTask( + conf.setInnerCompactionCandidateFileNum( Integer.parseInt( properties.getProperty( - "max_inner_compaction_candidate_file_num", - Integer.toString(conf.getFileLimitPerInnerTask())))); + "inner_compaction_candidate_file_num", + Integer.toString(conf.getInnerCompactionCandidateFileNum())))); conf.setFileLimitPerCrossTask( Integer.parseInt( properties.getProperty( @@ -694,11 +755,6 @@ public void loadProperties(Properties properties) throws BadNodeUrlException, IO "inner_compaction_task_selection_mods_file_threshold", Long.toString(conf.getInnerCompactionTaskSelectionModsFileThreshold())))); - conf.setTtlCheckInterval( - Long.parseLong( - properties.getProperty( - "ttl_check_interval", Long.toString(conf.getTTlCheckInterval())))); - conf.setMaxExpiredTime( Long.parseLong( properties.getProperty("max_expired_time", Long.toString(conf.getMaxExpiredTime())))); @@ -746,12 +802,13 @@ public void loadProperties(Properties properties) throws BadNodeUrlException, IO "dn_rpc_max_concurrent_client_num", Integer.toString(conf.getRpcMaxConcurrentClientNum()).trim())); if (maxConcurrentClientNum <= 0) { - maxConcurrentClientNum = 65535; + maxConcurrentClientNum = 1000; } conf.setRpcMaxConcurrentClientNum(maxConcurrentClientNum); - loadAutoCreateSchemaProps(properties); + boolean startUp = true; + loadAutoCreateSchemaProps(properties, startUp); conf.setTsFileStorageFs( properties.getProperty("tsfile_storage_fs", conf.getTsFileStorageFs().toString())); @@ -899,6 +956,10 @@ public void loadProperties(Properties properties) throws BadNodeUrlException, IO } conf.setExtPipeDir(properties.getProperty("ext_pipe_dir", conf.getExtPipeDir()).trim()); + conf.setPipeTaskThreadCount( + Integer.parseInt( + properties.getProperty( + "pipe_task_thread_count", Integer.toString(conf.getPipeTaskThreadCount()).trim()))); // At the same time, set TSFileConfig List fsTypes = new ArrayList<>(); @@ -1020,11 +1081,11 @@ public void loadProperties(Properties properties) throws BadNodeUrlException, IO properties.getProperty("quota_enable", String.valueOf(conf.isQuotaEnable())))); // The buffer for sort operator to calculate - conf.setSortBufferSize( - Long.parseLong( - properties - .getProperty("sort_buffer_size_in_bytes", Long.toString(conf.getSortBufferSize())) - .trim())); + + loadFixedSizeLimitForQuery(properties, "sort_buffer_size_in_bytes", conf::setSortBufferSize); + + loadFixedSizeLimitForQuery( + properties, "mods_cache_size_limit_per_fi_in_bytes", conf::setModsCacheSizeLimitPerFI); // tmp filePath for sort operator conf.setSortTmpDir(properties.getProperty("sort_tmp_dir", conf.getSortTmpDir())); @@ -1035,17 +1096,40 @@ public void loadProperties(Properties properties) throws BadNodeUrlException, IO properties.getProperty( "datanode_schema_cache_eviction_policy", conf.getDataNodeSchemaCacheEvictionPolicy())); + conf.setSchemaThreadCount( + Integer.parseInt( + properties.getProperty( + "schema_thread_count", Integer.toString(conf.getSchemaThreadCount())))); + loadIoTConsensusProps(properties); loadPipeConsensusProps(properties); + + // update query_sample_throughput_bytes_per_sec + loadQuerySampleThroughput(properties); + // update trusted_uri_pattern + loadTrustedUriPattern(properties); + } + + private void loadFixedSizeLimitForQuery( + TrimProperties properties, String name, LongConsumer setFunction) { + long defaultValue = + Math.min( + 32 * 1024 * 1024L, + conf.getAllocateMemoryForOperators() / conf.getQueryThreadCount() / 2); + long size = Long.parseLong(properties.getProperty(name, Long.toString(defaultValue))); + if (size <= 0) { + size = defaultValue; + } + setFunction.accept(size); } - private void reloadConsensusProps(Properties properties) throws IOException { + private void reloadConsensusProps(TrimProperties properties) throws IOException { loadIoTConsensusProps(properties); loadPipeConsensusProps(properties); DataRegionConsensusImpl.reloadConsensusConfig(); } - private void loadIoTConsensusProps(Properties properties) throws IOException { + private void loadIoTConsensusProps(TrimProperties properties) throws IOException { conf.setMaxLogEntriesNumPerBatch( Integer.parseInt( properties @@ -1088,7 +1172,7 @@ private void loadIoTConsensusProps(Properties properties) throws IOException { .trim())); } - private void loadPipeConsensusProps(Properties properties) throws IOException { + private void loadPipeConsensusProps(TrimProperties properties) throws IOException { conf.setPipeConsensusPipelineSize( Integer.parseInt( properties.getProperty( @@ -1100,7 +1184,7 @@ private void loadPipeConsensusProps(Properties properties) throws IOException { } } - private void loadAuthorCache(Properties properties) { + private void loadAuthorCache(TrimProperties properties) { conf.setAuthorCacheSize( Integer.parseInt( properties.getProperty( @@ -1111,7 +1195,7 @@ private void loadAuthorCache(Properties properties) { "author_cache_expire_time", String.valueOf(conf.getAuthorCacheExpireTime())))); } - private void loadWALProps(Properties properties) throws IOException { + private void loadWALProps(TrimProperties properties) throws IOException { conf.setWalMode( WALMode.valueOf((properties.getProperty("wal_mode", conf.getWalMode().toString())))); @@ -1131,18 +1215,19 @@ private void loadWALProps(Properties properties) throws IOException { conf.setWalBufferSize(walBufferSize); } - int walBufferQueueCapacity = - Integer.parseInt( + boolean WALInsertNodeCacheShrinkClearEnabled = + Boolean.parseBoolean( properties.getProperty( - "wal_buffer_queue_capacity", Integer.toString(conf.getWalBufferQueueCapacity()))); - if (walBufferQueueCapacity > 0) { - conf.setWalBufferQueueCapacity(walBufferQueueCapacity); + "wal_cache_shrink_clear_enabled", + Boolean.toString(conf.getWALCacheShrinkClearEnabled()))); + if (conf.getWALCacheShrinkClearEnabled() != WALInsertNodeCacheShrinkClearEnabled) { + conf.setWALCacheShrinkClearEnabled(WALInsertNodeCacheShrinkClearEnabled); } loadWALHotModifiedProps(properties); } - private void loadCompactionHotModifiedProps(Properties properties) + private void loadCompactionHotModifiedProps(TrimProperties properties) throws InterruptedException, IOException { boolean compactionTaskConfigHotModified = loadCompactionTaskHotModifiedProps(properties); if (compactionTaskConfigHotModified) { @@ -1161,8 +1246,8 @@ private void loadCompactionHotModifiedProps(Properties properties) CompactionScheduleTaskManager.getInstance().checkAndMayApplyConfigurationChange(); // hot load compaction task manager configurations - loadCompactionIsEnabledHotModifiedProps(properties); - boolean restartCompactionTaskManager = loadCompactionThreadCountHotModifiedProps(properties); + boolean restartCompactionTaskManager = loadCompactionIsEnabledHotModifiedProps(properties); + restartCompactionTaskManager |= loadCompactionThreadCountHotModifiedProps(properties); restartCompactionTaskManager |= loadCompactionSubTaskCountHotModifiedProps(properties); if (restartCompactionTaskManager) { CompactionTaskManager.getInstance().restart(); @@ -1175,9 +1260,15 @@ private void loadCompactionHotModifiedProps(Properties properties) .setCompactionReadThroughputRate(conf.getCompactionReadThroughputMbPerSec()); CompactionTaskManager.getInstance() .setWriteMergeRate(conf.getCompactionWriteThroughputMbPerSec()); + + conf.setEnableAutoRepairCompaction( + Boolean.parseBoolean( + properties.getProperty( + "enable_auto_repair_compaction", + Boolean.toString(conf.isEnableAutoRepairCompaction())))); } - private boolean loadCompactionTaskHotModifiedProps(Properties properties) throws IOException { + private boolean loadCompactionTaskHotModifiedProps(TrimProperties properties) throws IOException { boolean configModified = false; // update merge_write_throughput_mb_per_sec int compactionWriteThroughput = conf.getCompactionWriteThroughputMbPerSec(); @@ -1209,15 +1300,16 @@ private boolean loadCompactionTaskHotModifiedProps(Properties properties) throws "compaction_read_throughput_mb_per_sec")))); configModified |= compactionReadThroughput != conf.getCompactionReadThroughputMbPerSec(); - // update max_inner_compaction_candidate_file_num - int maxInnerCompactionCandidateFileNum = conf.getFileLimitPerInnerTask(); - conf.setFileLimitPerInnerTask( + // update inner_compaction_candidate_file_num + int maxInnerCompactionCandidateFileNum = conf.getInnerCompactionCandidateFileNum(); + conf.setInnerCompactionCandidateFileNum( Integer.parseInt( properties.getProperty( - "max_inner_compaction_candidate_file_num", + "inner_compaction_candidate_file_num", ConfigurationFileUtils.getConfigurationDefaultValue( - "max_inner_compaction_candidate_file_num")))); - configModified |= maxInnerCompactionCandidateFileNum != conf.getFileLimitPerInnerTask(); + "inner_compaction_candidate_file_num")))); + configModified |= + maxInnerCompactionCandidateFileNum != conf.getInnerCompactionCandidateFileNum(); // update target_compaction_file_size long targetCompactionFilesize = conf.getTargetCompactionFileSize(); @@ -1289,10 +1381,80 @@ private boolean loadCompactionTaskHotModifiedProps(Properties properties) throws innerCompactionTaskSelectionModsFileThreshold != conf.getInnerCompactionTaskSelectionModsFileThreshold(); + // update inner_seq_selector + InnerSequenceCompactionSelector innerSequenceCompactionSelector = + conf.getInnerSequenceCompactionSelector(); + conf.setInnerSequenceCompactionSelector( + InnerSequenceCompactionSelector.getInnerSequenceCompactionSelector( + properties.getProperty( + "inner_seq_selector", + ConfigurationFileUtils.getConfigurationDefaultValue("inner_seq_selector")))); + configModified |= innerSequenceCompactionSelector != conf.getInnerSequenceCompactionSelector(); + + // update inner_unseq_selector + InnerUnsequenceCompactionSelector innerUnsequenceCompactionSelector = + conf.getInnerUnsequenceCompactionSelector(); + conf.setInnerUnsequenceCompactionSelector( + InnerUnsequenceCompactionSelector.getInnerUnsequenceCompactionSelector( + properties.getProperty( + "inner_unseq_selector", + ConfigurationFileUtils.getConfigurationDefaultValue("inner_unseq_selector")))); + configModified |= + innerUnsequenceCompactionSelector != conf.getInnerUnsequenceCompactionSelector(); + + // update inner_compaction_total_file_size_threshold + long innerCompactionFileSizeThresholdInByte = + conf.getInnerCompactionTotalFileSizeThresholdInByte(); + conf.setInnerCompactionTotalFileSizeThresholdInByte( + Long.parseLong( + properties.getProperty( + "inner_compaction_total_file_size_threshold", + ConfigurationFileUtils.getConfigurationDefaultValue( + "inner_compaction_total_file_size_threshold")))); + configModified |= + innerCompactionFileSizeThresholdInByte + != conf.getInnerCompactionTotalFileSizeThresholdInByte(); + + // update inner_compaction_total_file_num_threshold + int innerCompactionTotalFileNumThreshold = conf.getInnerCompactionTotalFileNumThreshold(); + conf.setInnerCompactionTotalFileNumThreshold( + Integer.parseInt( + properties.getProperty( + "inner_compaction_total_file_num_threshold", + ConfigurationFileUtils.getConfigurationDefaultValue( + "inner_compaction_total_file_num_threshold")))); + configModified |= + innerCompactionTotalFileNumThreshold != conf.getInnerCompactionTotalFileNumThreshold(); + + // update max_level_gap_in_inner_compaction + int maxLevelGapInInnerCompaction = conf.getMaxLevelGapInInnerCompaction(); + conf.setMaxLevelGapInInnerCompaction( + Integer.parseInt( + properties.getProperty( + "max_level_gap_in_inner_compaction", + ConfigurationFileUtils.getConfigurationDefaultValue( + "max_level_gap_in_inner_compaction")))); + configModified |= maxLevelGapInInnerCompaction != conf.getMaxLevelGapInInnerCompaction(); + + // update compaction_max_aligned_series_num_in_one_batch + int compactionMaxAlignedSeriesNumInOneBatch = conf.getCompactionMaxAlignedSeriesNumInOneBatch(); + int newCompactionMaxAlignedSeriesNumInOneBatch = + Integer.parseInt( + properties.getProperty( + "compaction_max_aligned_series_num_in_one_batch", + ConfigurationFileUtils.getConfigurationDefaultValue( + "compaction_max_aligned_series_num_in_one_batch"))); + conf.setCompactionMaxAlignedSeriesNumInOneBatch( + newCompactionMaxAlignedSeriesNumInOneBatch > 0 + ? newCompactionMaxAlignedSeriesNumInOneBatch + : Integer.MAX_VALUE); + configModified |= + compactionMaxAlignedSeriesNumInOneBatch + != conf.getCompactionMaxAlignedSeriesNumInOneBatch(); return configModified; } - private boolean loadCompactionThreadCountHotModifiedProps(Properties properties) + private boolean loadCompactionThreadCountHotModifiedProps(TrimProperties properties) throws IOException { int newConfigCompactionThreadCount = Integer.parseInt( @@ -1300,8 +1462,7 @@ private boolean loadCompactionThreadCountHotModifiedProps(Properties properties) "compaction_thread_count", ConfigurationFileUtils.getConfigurationDefaultValue("compaction_thread_count"))); if (newConfigCompactionThreadCount <= 0) { - LOGGER.error("compaction_thread_count must greater than 0"); - return false; + newConfigCompactionThreadCount = 1; } if (newConfigCompactionThreadCount == conf.getCompactionThreadCount()) { return false; @@ -1314,7 +1475,7 @@ private boolean loadCompactionThreadCountHotModifiedProps(Properties properties) return true; } - private boolean loadCompactionSubTaskCountHotModifiedProps(Properties properties) + private boolean loadCompactionSubTaskCountHotModifiedProps(TrimProperties properties) throws IOException { int newConfigSubtaskNum = Integer.parseInt( @@ -1323,8 +1484,7 @@ private boolean loadCompactionSubTaskCountHotModifiedProps(Properties properties ConfigurationFileUtils.getConfigurationDefaultValue( "sub_compaction_thread_count"))); if (newConfigSubtaskNum <= 0) { - LOGGER.error("sub_compaction_thread_count must greater than 0"); - return false; + newConfigSubtaskNum = 1; } if (newConfigSubtaskNum == conf.getSubCompactionTaskNum()) { return false; @@ -1333,7 +1493,8 @@ private boolean loadCompactionSubTaskCountHotModifiedProps(Properties properties return true; } - private void loadCompactionIsEnabledHotModifiedProps(Properties properties) throws IOException { + private boolean loadCompactionIsEnabledHotModifiedProps(TrimProperties properties) + throws IOException { boolean isCompactionEnabled = conf.isEnableSeqSpaceCompaction() || conf.isEnableUnseqSpaceCompaction() @@ -1361,17 +1522,13 @@ private void loadCompactionIsEnabledHotModifiedProps(Properties properties) thro || newConfigEnableSeqSpaceCompaction || newConfigEnableUnseqSpaceCompaction; - if (!isCompactionEnabled && compactionEnabledInNewConfig) { - LOGGER.error("Compaction cannot start in current status."); - return; - } - conf.setEnableCrossSpaceCompaction(newConfigEnableCrossSpaceCompaction); conf.setEnableSeqSpaceCompaction(newConfigEnableSeqSpaceCompaction); conf.setEnableUnseqSpaceCompaction(newConfigEnableUnseqSpaceCompaction); + return !isCompactionEnabled && compactionEnabledInNewConfig; } - private void loadWALHotModifiedProps(Properties properties) throws IOException { + private void loadWALHotModifiedProps(TrimProperties properties) throws IOException { long walAsyncModeFsyncDelayInMs = Long.parseLong( properties.getProperty( @@ -1442,12 +1599,7 @@ private void loadWALHotModifiedProps(Properties properties) throws IOException { conf.setDeleteWalFilesPeriodInMs(deleteWalFilesPeriod); } - long throttleDownThresholdInByte = - Long.parseLong( - properties.getProperty( - "iot_consensus_throttle_threshold_in_byte", - ConfigurationFileUtils.getConfigurationDefaultValue( - "iot_consensus_throttle_threshold_in_byte"))); + long throttleDownThresholdInByte = Long.parseLong(getWalThrottleThreshold(properties)); if (throttleDownThresholdInByte > 0) { conf.setThrottleThreshold(throttleDownThresholdInByte); } @@ -1463,6 +1615,20 @@ private void loadWALHotModifiedProps(Properties properties) throws IOException { } } + private String getWalThrottleThreshold(TrimProperties prop) throws IOException { + String old_throttleThreshold = prop.getProperty(DEFAULT_WAL_THRESHOLD_NAME[0], null); + if (old_throttleThreshold != null) { + LOGGER.warn( + "The throttle threshold params: {} is deprecated, please use {}", + DEFAULT_WAL_THRESHOLD_NAME[0], + DEFAULT_WAL_THRESHOLD_NAME[1]); + return old_throttleThreshold; + } + return prop.getProperty( + DEFAULT_WAL_THRESHOLD_NAME[1], + ConfigurationFileUtils.getConfigurationDefaultValue(DEFAULT_WAL_THRESHOLD_NAME[1])); + } + public long getThrottleThresholdWithDirs() { ArrayList dataDiskDirs = new ArrayList<>(Arrays.asList(conf.getDataDirs())); ArrayList walDiskDirs = @@ -1490,7 +1656,8 @@ public long getThrottleThresholdWithDirs() { return Math.max(Math.min(newThrottleThreshold, MAX_THROTTLE_THRESHOLD), MIN_THROTTLE_THRESHOLD); } - private void loadAutoCreateSchemaProps(Properties properties) throws IOException { + private void loadAutoCreateSchemaProps(TrimProperties properties, boolean startUp) + throws IOException { conf.setAutoCreateSchemaEnabled( Boolean.parseBoolean( properties.getProperty( @@ -1522,7 +1689,8 @@ private void loadAutoCreateSchemaProps(Properties properties) throws IOException properties.getProperty( "default_storage_group_level", ConfigurationFileUtils.getConfigurationDefaultValue( - "default_storage_group_level")))); + "default_storage_group_level"))), + startUp); conf.setDefaultBooleanEncoding( properties.getProperty( "default_boolean_encoding", @@ -1549,7 +1717,7 @@ private void loadAutoCreateSchemaProps(Properties properties) throws IOException ConfigurationFileUtils.getConfigurationDefaultValue("default_text_encoding"))); } - private void loadTsFileProps(Properties properties) throws IOException { + private void loadTsFileProps(TrimProperties properties) throws IOException { TSFileDescriptor.getInstance() .getConfig() .setGroupSizeInByte( @@ -1623,11 +1791,39 @@ private void loadTsFileProps(Properties properties) throws IOException { "max_tsblock_line_number", ConfigurationFileUtils.getConfigurationDefaultValue( "max_tsblock_line_number")))); + + String booleanCompressor = properties.getProperty("boolean_compressor"); + if (booleanCompressor != null) { + TSFileDescriptor.getInstance().getConfig().setBooleanCompression(booleanCompressor); + } + String int32Compressor = properties.getProperty("int32_compressor"); + if (int32Compressor != null) { + TSFileDescriptor.getInstance().getConfig().setInt32Compression(int32Compressor); + } + String int64Compressor = properties.getProperty("int64_compressor"); + if (int64Compressor != null) { + TSFileDescriptor.getInstance().getConfig().setInt64Compression(int64Compressor); + } + String floatCompressor = properties.getProperty("float_compressor"); + if (floatCompressor != null) { + TSFileDescriptor.getInstance().getConfig().setFloatCompression(floatCompressor); + } + String doubleCompressor = properties.getProperty("double_compressor"); + if (doubleCompressor != null) { + TSFileDescriptor.getInstance().getConfig().setDoubleCompression(doubleCompressor); + } + String textCompressor = properties.getProperty("text_compressor"); + if (textCompressor != null) { + TSFileDescriptor.getInstance().getConfig().setTextCompression(textCompressor); + } } // Mqtt related - private void loadMqttProps(Properties properties) { - conf.setMqttDir(properties.getProperty("mqtt_root_dir", conf.getMqttDir())); + private void loadMqttProps(TrimProperties properties) { + conf.setMqttDir( + Optional.ofNullable(properties.getProperty("mqtt_root_dir", conf.getMqttDir())) + .map(String::trim) + .orElse(conf.getMqttDir())); if (properties.getProperty(IoTDBConstant.MQTT_HOST_NAME) != null) { conf.setMqttHost(properties.getProperty(IoTDBConstant.MQTT_HOST_NAME)); @@ -1663,7 +1859,7 @@ private void loadMqttProps(Properties properties) { } // timed flush memtable - private void loadTimedService(Properties properties) throws IOException { + private void loadTimedService(TrimProperties properties) throws IOException { conf.setEnableTimedFlushSeqMemtable( Boolean.parseBoolean( properties.getProperty( @@ -1736,7 +1932,7 @@ private String[][] parseDataDirs(String dataDirs) { return tierDataDirs; } - public synchronized void loadHotModifiedProps(Properties properties) + public synchronized void loadHotModifiedProps(TrimProperties properties) throws QueryProcessException { try { // update data dirs @@ -1762,7 +1958,8 @@ public synchronized void loadHotModifiedProps(Properties properties) loadTimedService(properties); StorageEngine.getInstance().rebootTimedService(); // update params of creating schema automatically - loadAutoCreateSchemaProps(properties); + boolean startUp = false; + loadAutoCreateSchemaProps(properties, startUp); // update tsfile-format config loadTsFileProps(properties); @@ -1818,29 +2015,10 @@ public synchronized void loadHotModifiedProps(Properties properties) loadCompactionHotModifiedProps(properties); // update load config - conf.setLoadCleanupTaskExecutionDelayTimeSeconds( - Long.parseLong( - properties.getProperty( - "load_clean_up_task_execution_delay_time_seconds", - ConfigurationFileUtils.getConfigurationDefaultValue( - "load_clean_up_task_execution_delay_time_seconds")))); - - conf.setLoadWriteThroughputBytesPerSecond( - Double.parseDouble( - properties.getProperty( - "load_write_throughput_bytes_per_second", - ConfigurationFileUtils.getConfigurationDefaultValue( - "load_write_throughput_bytes_per_second")))); + loadLoadTsFileHotModifiedProp(properties); // update pipe config - commonDescriptor - .getConfig() - .setPipeAllSinksRateLimitBytesPerSecond( - Double.parseDouble( - properties.getProperty( - "pipe_all_sinks_rate_limit_bytes_per_second", - ConfigurationFileUtils.getConfigurationDefaultValue( - "pipe_all_sinks_rate_limit_bytes_per_second")))); + loadPipeHotModifiedProp(properties); // update merge_threshold_of_explain_analyze conf.setMergeThresholdOfExplainAnalyze( @@ -1862,6 +2040,53 @@ public synchronized void loadHotModifiedProps(Properties properties) // update retry config commonDescriptor.loadRetryProperties(properties); + + // update binary allocator + commonDescriptor + .getConfig() + .setEnableBinaryAllocator( + Boolean.parseBoolean( + Optional.ofNullable( + properties.getProperty( + "enable_binary_allocator", + ConfigurationFileUtils.getConfigurationDefaultValue( + "enable_binary_allocator"))) + .map(String::trim) + .orElse( + ConfigurationFileUtils.getConfigurationDefaultValue( + "enable_binary_allocator")))); + if (commonDescriptor.getConfig().isEnableBinaryAllocator()) { + BinaryAllocator.getInstance().start(); + } else { + BinaryAllocator.getInstance().close(true); + } + + commonDescriptor + .getConfig() + .setTimestampPrecisionCheckEnabled( + Boolean.parseBoolean( + properties.getProperty( + "timestamp_precision_check_enabled", + ConfigurationFileUtils.getConfigurationDefaultValue( + "timestamp_precision_check_enabled")))); + + // update query_sample_throughput_bytes_per_sec + loadQuerySampleThroughput(properties); + // update trusted_uri_pattern + loadTrustedUriPattern(properties); + + // tvlist_sort_threshold + conf.setTVListSortThreshold( + Integer.parseInt( + properties.getProperty( + "tvlist_sort_threshold", + ConfigurationFileUtils.getConfigurationDefaultValue("tvlist_sort_threshold")))); + + // sort_buffer_size_in_bytes + loadFixedSizeLimitForQuery(properties, "sort_buffer_size_in_bytes", conf::setSortBufferSize); + + loadFixedSizeLimitForQuery( + properties, "mods_cache_size_limit_per_fi_in_bytes", conf::setModsCacheSizeLimitPerFI); } catch (Exception e) { if (e instanceof InterruptedException) { Thread.currentThread().interrupt(); @@ -1870,6 +2095,51 @@ public synchronized void loadHotModifiedProps(Properties properties) } } + private void loadQuerySampleThroughput(TrimProperties properties) throws IOException { + String querySamplingRateLimitNumber = + Optional.ofNullable( + properties.getProperty( + "query_sample_throughput_bytes_per_sec", + ConfigurationFileUtils.getConfigurationDefaultValue( + "query_sample_throughput_bytes_per_sec"))) + .map(String::trim) + .orElse( + ConfigurationFileUtils.getConfigurationDefaultValue( + "query_sample_throughput_bytes_per_sec")); + if (querySamplingRateLimitNumber != null) { + try { + int rateLimit = Integer.parseInt(querySamplingRateLimitNumber); + commonDescriptor.getConfig().setQuerySamplingRateLimit(rateLimit); + } catch (Exception e) { + LOGGER.warn( + "Failed to parse query_sample_throughput_bytes_per_sec {} to integer", + querySamplingRateLimitNumber); + } + } + } + + private void loadTrustedUriPattern(TrimProperties properties) throws IOException { + String trustedUriPattern = + Optional.ofNullable( + properties.getProperty( + "trusted_uri_pattern", + ConfigurationFileUtils.getConfigurationDefaultValue("trusted_uri_pattern"))) + .map(String::trim) + .orElse(ConfigurationFileUtils.getConfigurationDefaultValue("trusted_uri_pattern")); + Pattern pattern; + if (trustedUriPattern != null) { + try { + pattern = Pattern.compile(trustedUriPattern); + } catch (Exception e) { + LOGGER.warn("Failed to parse trusted_uri_pattern {}", trustedUriPattern); + pattern = commonDescriptor.getConfig().getTrustedUriPattern(); + } + } else { + pattern = commonDescriptor.getConfig().getTrustedUriPattern(); + } + commonDescriptor.getConfig().setTrustedUriPattern(pattern); + } + public synchronized void loadHotModifiedProps() throws QueryProcessException { URL url = getPropsUrl(CommonConfig.SYSTEM_CONFIG_NAME); if (url == null) { @@ -1877,11 +2147,11 @@ public synchronized void loadHotModifiedProps() throws QueryProcessException { return; } - Properties commonProperties = new Properties(); + TrimProperties commonProperties = new TrimProperties(); try (InputStream inputStream = url.openStream()) { LOGGER.info("Start to reload config file {}", url); commonProperties.load(new InputStreamReader(inputStream, StandardCharsets.UTF_8)); - ConfigurationFileUtils.getConfigurationDefaultValue(); + ConfigurationFileUtils.loadConfigurationDefaultValueFromTemplate(); loadHotModifiedProps(commonProperties); } catch (Exception e) { LOGGER.warn("Fail to reload config file {}", url, e); @@ -1893,7 +2163,7 @@ public synchronized void loadHotModifiedProps() throws QueryProcessException { reloadMetricProperties(commonProperties); } - public void reloadMetricProperties(Properties properties) { + public void reloadMetricProperties(TrimProperties properties) { ReloadLevel reloadLevel = MetricConfigDescriptor.getInstance().loadHotProps(properties, false); LOGGER.info("Reload metric service in level {}", reloadLevel); if (reloadLevel == ReloadLevel.RESTART_INTERNAL_REPORTER) { @@ -1910,7 +2180,7 @@ public void reloadMetricProperties(Properties properties) { } } - private void initMemoryAllocate(Properties properties) { + private void initMemoryAllocate(TrimProperties properties) { String memoryAllocateProportion = properties.getProperty("datanode_memory_proportion", null); if (memoryAllocateProportion == null) { memoryAllocateProportion = @@ -1953,6 +2223,11 @@ private void initMemoryAllocate(Properties properties) { } } } + conf.setAllocateMemoryPerWalCache( + Long.parseLong( + properties.getProperty( + "allocate_memory_per_wal_cache", + Long.toString(conf.getAllocateMemoryPerWalCache())))); LOGGER.info("initial allocateMemoryForRead = {}", conf.getAllocateMemoryForRead()); LOGGER.info("initial allocateMemoryForWrite = {}", conf.getAllocateMemoryForStorageEngine()); @@ -2022,7 +2297,7 @@ private void initMemoryAllocate(Properties properties) { } @SuppressWarnings("java:S3518") - private void initStorageEngineAllocate(Properties properties) { + private void initStorageEngineAllocate(TrimProperties properties) { long storageMemoryTotal = conf.getAllocateMemoryForStorageEngine(); String valueOfStorageEngineMemoryProportion = properties.getProperty("storage_engine_memory_proportion"); @@ -2077,7 +2352,7 @@ private void initStorageEngineAllocate(Properties properties) { } @SuppressWarnings("squid:S3518") - private void initSchemaMemoryAllocate(Properties properties) { + private void initSchemaMemoryAllocate(TrimProperties properties) { long schemaMemoryTotal = conf.getAllocateMemoryForSchema(); String schemaMemoryPortionInput = properties.getProperty("schema_memory_proportion"); @@ -2135,36 +2410,183 @@ private void initSchemaMemoryAllocate(Properties properties) { LOGGER.info("allocateMemoryForPartitionCache = {}", conf.getAllocateMemoryForPartitionCache()); } - private void loadLoadTsFileProps(Properties properties) { + private void loadLoadTsFileProps(TrimProperties properties) throws IOException { conf.setMaxAllocateMemoryRatioForLoad( Double.parseDouble( properties.getProperty( "max_allocate_memory_ratio_for_load", String.valueOf(conf.getMaxAllocateMemoryRatioForLoad())))); + conf.setLoadTsFileAnalyzeSchemaBatchReadTimeSeriesMetadataCount( + Integer.parseInt( + properties.getProperty( + "load_tsfile_analyze_schema_batch_read_time_series_metadata_count", + String.valueOf( + conf.getLoadTsFileAnalyzeSchemaBatchReadTimeSeriesMetadataCount())))); conf.setLoadTsFileAnalyzeSchemaBatchFlushTimeSeriesNumber( Integer.parseInt( properties.getProperty( "load_tsfile_analyze_schema_batch_flush_time_series_number", String.valueOf(conf.getLoadTsFileAnalyzeSchemaBatchFlushTimeSeriesNumber())))); conf.setLoadTsFileAnalyzeSchemaMemorySizeInBytes( + Long.parseLong( + Optional.ofNullable( + properties.getProperty( + "load_tsfile_analyze_schema_memory_size_in_bytes", + String.valueOf(conf.getLoadTsFileAnalyzeSchemaMemorySizeInBytes()))) + .map(String::trim) + .orElse(String.valueOf(conf.getLoadTsFileAnalyzeSchemaMemorySizeInBytes())))); + conf.setLoadTsFileTabletConversionBatchMemorySizeInBytes( Long.parseLong( properties.getProperty( - "load_tsfile_analyze_schema_memory_size_in_bytes", - String.valueOf(conf.getLoadTsFileAnalyzeSchemaMemorySizeInBytes())))); + "load_tsfile_tablet_conversion_batch_memory_size_in_bytes", + String.valueOf(conf.getLoadTsFileTabletConversionBatchMemorySizeInBytes())))); + conf.setLoadChunkMetadataMemorySizeInBytes( + Long.parseLong( + Optional.ofNullable( + properties.getProperty( + "load_chunk_metadata_memory_size_in_bytes", + String.valueOf(conf.getLoadChunkMetadataMemorySizeInBytes()))) + .map(String::trim) + .orElse(String.valueOf(conf.getLoadChunkMetadataMemorySizeInBytes())))); conf.setLoadCleanupTaskExecutionDelayTimeSeconds( Long.parseLong( properties.getProperty( "load_clean_up_task_execution_delay_time_seconds", String.valueOf(conf.getLoadCleanupTaskExecutionDelayTimeSeconds())))); + conf.setLoadTsFileRetryCountOnRegionChange( + Integer.parseInt( + properties.getProperty( + "load_tsfile_retry_count_on_region_change", + String.valueOf(conf.getLoadTsFileRetryCountOnRegionChange())))); conf.setLoadWriteThroughputBytesPerSecond( Double.parseDouble( properties.getProperty( "load_write_throughput_bytes_per_second", String.valueOf(conf.getLoadWriteThroughputBytesPerSecond())))); + conf.setLoadTabletConversionThresholdBytes( + Long.parseLong( + properties.getProperty( + "load_tablet_conversion_threshold_bytes", + String.valueOf(conf.getLoadTabletConversionThresholdBytes())))); + + conf.setLoadActiveListeningEnable( + Boolean.parseBoolean( + properties.getProperty( + "load_active_listening_enable", + Boolean.toString(conf.getLoadActiveListeningEnable())))); + conf.setLoadActiveListeningDirs( + Arrays.stream( + properties + .getProperty( + "load_active_listening_dirs", + String.join(",", conf.getLoadActiveListeningDirs())) + .trim() + .split(",")) + .filter(dir -> !dir.isEmpty()) + .toArray(String[]::new)); + conf.setLoadActiveListeningFailDir( + properties.getProperty( + "load_active_listening_fail_dir", conf.getLoadActiveListeningFailDir())); + + final long loadActiveListeningCheckIntervalSeconds = + Long.parseLong( + properties.getProperty( + "load_active_listening_check_interval_seconds", + Long.toString(conf.getLoadActiveListeningCheckIntervalSeconds()))); + conf.setLoadActiveListeningCheckIntervalSeconds( + loadActiveListeningCheckIntervalSeconds <= 0 + ? conf.getLoadActiveListeningCheckIntervalSeconds() + : loadActiveListeningCheckIntervalSeconds); + + conf.setLoadActiveListeningMaxThreadNum( + Integer.parseInt( + properties.getProperty( + "load_active_listening_max_thread_num", + Integer.toString(conf.getLoadActiveListeningMaxThreadNum())))); + + if (conf.getLoadActiveListeningMaxThreadNum() <= 0) { + conf.setLoadActiveListeningMaxThreadNum(Runtime.getRuntime().availableProcessors()); + } + + conf.setLoadActiveListeningVerifyEnable( + Boolean.parseBoolean( + properties.getProperty( + "load_active_listening_verify_enable", + Boolean.toString(conf.isLoadActiveListeningVerifyEnable())))); + + conf.setLoadDiskSelectStrategy( + properties.getProperty( + "load_disk_select_strategy", + ILoadDiskSelector.LoadDiskSelectorType.MIN_IO_FIRST.getValue())); + + conf.setLoadDiskSelectStrategyForIoTV2AndPipe( + properties.getProperty( + "load_disk_select_strategy_for_pipe_and_iotv2", + ILoadDiskSelector.LoadDiskSelectorType.INHERIT_LOAD.getValue())); + + conf.setLastCacheLoadStrategy( + LastCacheLoadStrategy.valueOf( + properties.getProperty( + "last_cache_operation_on_load", LastCacheLoadStrategy.UPDATE.name()))); + + conf.setCacheLastValuesForLoad( + Boolean.parseBoolean( + properties.getProperty( + "cache_last_values_for_load", String.valueOf(conf.isCacheLastValuesForLoad())))); + + conf.setCacheLastValuesMemoryBudgetInByte( + Long.parseLong( + properties.getProperty( + "cache_last_values_memory_budget_in_byte", + String.valueOf(conf.getCacheLastValuesMemoryBudgetInByte())))); + } + + private void loadLoadTsFileHotModifiedProp(TrimProperties properties) throws IOException { + conf.setLoadCleanupTaskExecutionDelayTimeSeconds( + Long.parseLong( + properties.getProperty( + "load_clean_up_task_execution_delay_time_seconds", + ConfigurationFileUtils.getConfigurationDefaultValue( + "load_clean_up_task_execution_delay_time_seconds")))); + + conf.setLoadWriteThroughputBytesPerSecond( + Double.parseDouble( + properties.getProperty( + "load_write_throughput_bytes_per_second", + ConfigurationFileUtils.getConfigurationDefaultValue( + "load_write_throughput_bytes_per_second")))); + + conf.setLoadActiveListeningEnable( + Boolean.parseBoolean( + properties.getProperty( + "load_active_listening_enable", + ConfigurationFileUtils.getConfigurationDefaultValue( + "load_active_listening_enable")))); + conf.setLoadActiveListeningDirs( + Arrays.stream( + properties + .getProperty( + "load_active_listening_dirs", + String.join( + ",", + ConfigurationFileUtils.getConfigurationDefaultValue( + "load_active_listening_dirs"))) + .trim() + .split(",")) + .filter(dir -> !dir.isEmpty()) + .toArray(String[]::new)); + conf.setLoadActiveListeningFailDir( + properties.getProperty( + "load_active_listening_fail_dir", + ConfigurationFileUtils.getConfigurationDefaultValue("load_active_listening_fail_dir"))); + } + + private void loadPipeHotModifiedProp(TrimProperties properties) throws IOException { + PipeDescriptor.loadPipeProps(commonDescriptor.getConfig(), properties, true); } @SuppressWarnings("squid:S3518") // "proportionSum" can't be zero - private void loadUDFProps(Properties properties) { + private void loadUDFProps(TrimProperties properties) { String initialByteArrayLengthForMemoryControl = properties.getProperty("udf_initial_byte_array_length_for_memory_control"); if (initialByteArrayLengthForMemoryControl != null) { @@ -2207,7 +2629,7 @@ private void loadUDFProps(Properties properties) { } } - private void initThriftSSL(Properties properties) { + private void initThriftSSL(TrimProperties properties) { conf.setEnableSSL( Boolean.parseBoolean( properties.getProperty("enable_thrift_ssl", Boolean.toString(conf.isEnableSSL())))); @@ -2215,8 +2637,8 @@ private void initThriftSSL(Properties properties) { conf.setKeyStorePwd(properties.getProperty("key_store_pwd", conf.getKeyStorePath()).trim()); } - private void loadTriggerProps(Properties properties) { - conf.setTriggerDir(properties.getProperty("trigger_lib_dir", conf.getTriggerDir())); + private void loadTriggerProps(TrimProperties properties) { + conf.setTriggerDir(properties.getProperty("trigger_lib_dir", conf.getTriggerDir()).trim()); conf.setRetryNumToFindStatefulTrigger( Integer.parseInt( properties.getProperty( @@ -2262,8 +2684,11 @@ private void loadTriggerProps(Properties properties) { Integer.toString(conf.getTriggerForwardMQTTPoolSize())))); } - private void loadPipeProps(Properties properties) { - conf.setPipeLibDir(properties.getProperty("pipe_lib_dir", conf.getPipeLibDir())); + private void loadPipeProps(TrimProperties properties) { + conf.setPipeLibDir( + Optional.ofNullable(properties.getProperty("pipe_lib_dir", conf.getPipeLibDir())) + .map(String::trim) + .orElse(conf.getPipeLibDir())); conf.setPipeReceiverFileDirs( Arrays.stream( @@ -2289,7 +2714,7 @@ private void loadPipeProps(Properties properties) { .toArray(String[]::new)); } - private void loadCQProps(Properties properties) { + private void loadCQProps(TrimProperties properties) { conf.setContinuousQueryThreadNum( Integer.parseInt( properties.getProperty( @@ -2306,7 +2731,7 @@ private void loadCQProps(Properties properties) { false)); } - public void loadClusterProps(Properties properties) throws IOException { + public void loadClusterProps(TrimProperties properties) throws IOException { String configNodeUrls = properties.getProperty(IoTDBConstant.DN_SEED_CONFIG_NODE); if (configNodeUrls == null) { configNodeUrls = properties.getProperty(IoTDBConstant.DN_TARGET_CONFIG_NODE_LIST); @@ -2361,7 +2786,7 @@ public void loadClusterProps(Properties properties) throws IOException { .trim())); } - public void loadShuffleProps(Properties properties) { + public void loadShuffleProps(TrimProperties properties) { conf.setMppDataExchangePort( Integer.parseInt( properties.getProperty( @@ -2409,6 +2834,9 @@ public TSEncoding getDefaultEncodingByType(TSDataType dataType) { return conf.getDefaultFloatEncoding(); case DOUBLE: return conf.getDefaultDoubleEncoding(); + case STRING: + case BLOB: + case TEXT: default: return conf.getDefaultTextEncoding(); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBStartCheck.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBStartCheck.java index 989a3328aa775..1795ffc4a8fb1 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBStartCheck.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBStartCheck.java @@ -95,6 +95,7 @@ public class IoTDBStartCheck { private static final String IOTDB_VERSION_STRING = "iotdb_version"; private static final String COMMIT_ID_STRING = "commit_id"; private static final String DATA_NODE_ID = "data_node_id"; + private static final String CLUSTER_ID = "cluster_id"; private static final String SCHEMA_REGION_CONSENSUS_PROTOCOL = "schema_region_consensus_protocol"; private static final String DATA_REGION_CONSENSUS_PROTOCOL = "data_region_consensus_protocol"; // endregion @@ -267,6 +268,9 @@ private void checkImmutableSystemProperties() throws IOException { if (properties.containsKey(DATA_NODE_ID)) { config.setDataNodeId(Integer.parseInt(properties.getProperty(DATA_NODE_ID))); } + if (properties.containsKey(CLUSTER_ID)) { + config.setClusterId(properties.getProperty(CLUSTER_ID)); + } if (properties.containsKey(SCHEMA_REGION_CONSENSUS_PROTOCOL)) { config.setSchemaRegionConsensusProtocolClass( properties.getProperty(SCHEMA_REGION_CONSENSUS_PROTOCOL)); @@ -289,6 +293,10 @@ public void serializeDataNodeId(int dataNodeId) throws IOException { systemPropertiesHandler.put(DATA_NODE_ID, String.valueOf(dataNodeId)); } + public void serializeClusterID(String clusterId) throws IOException { + systemPropertiesHandler.put(CLUSTER_ID, clusterId); + } + public boolean checkConsensusProtocolExists(TConsensusGroupType type) { if (type == TConsensusGroupType.DataRegion) { return properties.containsKey(DATA_REGION_CONSENSUS_PROTOCOL); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/rest/IoTDBRestServiceDescriptor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/rest/IoTDBRestServiceDescriptor.java index 48725ef1839d9..2e9f49917f030 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/rest/IoTDBRestServiceDescriptor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/rest/IoTDBRestServiceDescriptor.java @@ -20,6 +20,7 @@ import org.apache.iotdb.commons.conf.CommonConfig; import org.apache.iotdb.commons.conf.IoTDBConstant; +import org.apache.iotdb.commons.conf.TrimProperties; import org.apache.iotdb.db.conf.IoTDBConfig; import org.slf4j.Logger; @@ -33,7 +34,6 @@ import java.net.MalformedURLException; import java.net.URL; import java.nio.charset.StandardCharsets; -import java.util.Properties; public class IoTDBRestServiceDescriptor { private static final Logger logger = LoggerFactory.getLogger(IoTDBRestServiceDescriptor.class); @@ -43,9 +43,9 @@ public class IoTDBRestServiceDescriptor { protected IoTDBRestServiceDescriptor() { URL systemConfig = getPropsUrl(CommonConfig.SYSTEM_CONFIG_NAME); if (systemConfig != null) { - Properties properties = loadProps(CommonConfig.SYSTEM_CONFIG_NAME); - if (properties != null) { - loadProps(properties); + TrimProperties trimProperties = loadProps(CommonConfig.SYSTEM_CONFIG_NAME); + if (trimProperties != null) { + loadProps(trimProperties); } } } @@ -56,7 +56,7 @@ public static IoTDBRestServiceDescriptor getInstance() { /** load an property file. */ @SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity warning - private Properties loadProps(String configName) { + private TrimProperties loadProps(String configName) { URL url = getPropsUrl(configName); if (url == null) { logger.warn("Couldn't load the REST Service configuration from any of the known sources."); @@ -64,9 +64,9 @@ private Properties loadProps(String configName) { } try (InputStream inputStream = url.openStream()) { logger.info("Start to read config file {}", url); - Properties properties = new Properties(); - properties.load(new InputStreamReader(inputStream, StandardCharsets.UTF_8)); - return properties; + TrimProperties trimProperties = new TrimProperties(); + trimProperties.load(new InputStreamReader(inputStream, StandardCharsets.UTF_8)); + return trimProperties; } catch (FileNotFoundException e) { logger.warn("REST service fail to find config file {}", url, e); } catch (IOException e) { @@ -77,7 +77,7 @@ private Properties loadProps(String configName) { return null; } - private void loadProps(Properties properties) { + private void loadProps(TrimProperties properties) { conf.setEnableRestService( Boolean.parseBoolean( properties.getProperty( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/DataRegionConsensusImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/DataRegionConsensusImpl.java index 515a9f54ddca4..453bd9a1bdb9c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/DataRegionConsensusImpl.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/DataRegionConsensusImpl.java @@ -23,7 +23,7 @@ import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.commons.consensus.ConsensusGroupId; import org.apache.iotdb.commons.consensus.DataRegionId; -import org.apache.iotdb.commons.pipe.plugin.builtin.BuiltinPipePlugin; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.BuiltinPipePlugin; import org.apache.iotdb.consensus.ConsensusFactory; import org.apache.iotdb.consensus.IConsensus; import org.apache.iotdb.consensus.config.ConsensusConfig; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataExecutionVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataExecutionVisitor.java index efd9859588ce3..cb634ad479bb6 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataExecutionVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataExecutionVisitor.java @@ -27,7 +27,7 @@ import org.apache.iotdb.db.exception.WriteProcessException; import org.apache.iotdb.db.exception.WriteProcessRejectException; import org.apache.iotdb.db.exception.query.OutOfTTLException; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.listener.PipeInsertionDataNodeListener; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.listener.PipeInsertionDataNodeListener; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanVisitor; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedDeleteDataNode; @@ -221,6 +221,7 @@ public TSStatus visitPipeEnrichedInsertNode(PipeEnrichedInsertNode node, DataReg @Override public TSStatus visitDeleteData(DeleteDataNode node, DataRegion dataRegion) { + dataRegion.writeLock("deleteData"); try { for (PartialPath path : node.getPathList()) { PartialPath databaseToDelete = new PartialPath(dataRegion.getDatabaseName() + ".**"); @@ -246,6 +247,8 @@ public TSStatus visitDeleteData(DeleteDataNode node, DataRegion dataRegion) { } catch (IOException | IllegalPathException e) { LOGGER.error("Error in executing plan node: {}", node, e); return new TSStatus(TSStatusCode.WRITE_PROCESS_ERROR.getStatusCode()); + } finally { + dataRegion.writeUnlock(); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataRegionStateMachine.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataRegionStateMachine.java index 94f8f1a9e1ea9..c430be4b17414 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataRegionStateMachine.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataRegionStateMachine.java @@ -21,6 +21,7 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.conf.CommonDescriptor; +import org.apache.iotdb.commons.consensus.ConsensusGroupId; import org.apache.iotdb.commons.consensus.DataRegionId; import org.apache.iotdb.consensus.common.DataSet; import org.apache.iotdb.consensus.common.request.IConsensusRequest; @@ -28,14 +29,10 @@ import org.apache.iotdb.consensus.iot.log.GetConsensusReqReaderPlan; import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.consensus.statemachine.BaseStateMachine; +import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; import org.apache.iotdb.db.queryengine.execution.fragment.FragmentInstanceManager; import org.apache.iotdb.db.queryengine.plan.planner.plan.FragmentInstance; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNode; -import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertMultiTabletsNode; -import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; -import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowNode; -import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowsNode; -import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertTabletNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.SearchNode; import org.apache.iotdb.db.storageengine.StorageEngine; import org.apache.iotdb.db.storageengine.buffer.BloomFilterCache; @@ -46,7 +43,6 @@ import org.apache.iotdb.db.storageengine.dataregion.snapshot.SnapshotTaker; import org.apache.iotdb.rpc.TSStatusCode; -import org.apache.tsfile.write.UnSupportedDataTypeException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -149,92 +145,38 @@ public void loadSnapshot(File latestSnapshotRootDir) { } } - protected PlanNode grabInsertNode(IndexedConsensusRequest indexedRequest) { - List insertNodes = new ArrayList<>(indexedRequest.getRequests().size()); + protected PlanNode grabPlanNode(IndexedConsensusRequest indexedRequest) { + List searchNodes = new ArrayList<>(); + PlanNode onlyOne = null; for (IConsensusRequest req : indexedRequest.getRequests()) { // PlanNode in IndexedConsensusRequest should always be InsertNode PlanNode planNode = getPlanNode(req); if (planNode instanceof SearchNode) { ((SearchNode) planNode).setSearchIndex(indexedRequest.getSearchIndex()); - } - if (planNode instanceof InsertNode) { - InsertNode innerNode = (InsertNode) planNode; - insertNodes.add(innerNode); - } else if (indexedRequest.getRequests().size() == 1) { - // If the planNode is not InsertNode, it is expected that the IndexedConsensusRequest only - // contains one request - return planNode; + searchNodes.add((SearchNode) planNode); } else { - throw new IllegalArgumentException( - "PlanNodes in IndexedConsensusRequest are not InsertNode and " - + "the size of requests are larger than 1"); + logger.warn("Unexpected PlanNode type {}, which is not SearchNode", planNode.getClass()); + if (onlyOne == null) { + onlyOne = planNode; + } else { + throw new IllegalArgumentException( + String.format( + "There are two types of PlanNode in one request: %s and %s", + onlyOne.getClass(), planNode.getClass())); + } } } - return mergeInsertNodes(insertNodes); - } - - /** - * Merge insert nodes sharing same search index ( e.g. tablet-100, tablet-100, tablet-100 will be - * merged to one multi-tablet).
- * Notice: the continuity of insert nodes sharing same search index should be protected by the - * upper layer. - * - * @exception RuntimeException when insertNodes is empty - */ - protected InsertNode mergeInsertNodes(List insertNodes) { - int size = insertNodes.size(); - if (size == 0) { - throw new RuntimeException(); - } - if (size == 1) { - return insertNodes.get(0); - } - - InsertNode result; - List index = new ArrayList<>(); - int i = 0; - switch (insertNodes.get(0).getType()) { - case INSERT_TABLET: - // merge to InsertMultiTabletsNode - List insertTabletNodes = new ArrayList<>(size); - for (InsertNode insertNode : insertNodes) { - insertTabletNodes.add((InsertTabletNode) insertNode); - index.add(i); - i++; - } - result = - new InsertMultiTabletsNode( - insertNodes.get(0).getPlanNodeId(), index, insertTabletNodes); - break; - case INSERT_ROW: - // merge to InsertRowsNode - List insertRowNodes = new ArrayList<>(size); - for (InsertNode insertNode : insertNodes) { - insertRowNodes.add((InsertRowNode) insertNode); - index.add(i); - i++; - } - result = new InsertRowsNode(insertNodes.get(0).getPlanNodeId(), index, insertRowNodes); - break; - case INSERT_ROWS: - // merge to InsertRowsNode - List list = new ArrayList<>(); - for (InsertNode insertNode : insertNodes) { - for (InsertRowNode insertRowNode : ((InsertRowsNode) insertNode).getInsertRowNodeList()) { - list.add(insertRowNode); - index.add(i); - i++; - } - } - result = new InsertRowsNode(insertNodes.get(0).getPlanNodeId(), index, list); - break; - default: - throw new UnSupportedDataTypeException( - "Unsupported node type " + insertNodes.get(0).getType()); + if (onlyOne != null) { + if (!searchNodes.isEmpty()) { + throw new IllegalArgumentException( + String.format( + "There are two types of PlanNode in one request: %s and SearchNode", + onlyOne.getClass())); + } + return onlyOne; } - result.setSearchIndex(insertNodes.get(0).getSearchIndex()); - result.setDevicePath(insertNodes.get(0).getDevicePath()); - return result; + // searchNodes should never be empty here + return searchNodes.get(0).merge(searchNodes); } @Override @@ -303,13 +245,24 @@ public DataSet read(IConsensusRequest request) { try { fragmentInstance = getFragmentInstance(request); } catch (IllegalArgumentException e) { - logger.error(e.getMessage()); + logger.error("Get fragment instance failed", e); return null; } return QUERY_INSTANCE_MANAGER.execDataQueryFragmentInstance(fragmentInstance, region); } } + public boolean hasPipeReleaseRegionRelatedResource(ConsensusGroupId groupId) { + return PipeDataNodeAgent.task().hasPipeReleaseRegionRelatedResource(groupId.getId()); + } + + @Override + public boolean hasReleaseAllRegionRelatedResource(ConsensusGroupId groupId) { + boolean releaseAllResource = true; + releaseAllResource &= hasPipeReleaseRegionRelatedResource(groupId); + return releaseAllResource; + } + @Override public File getSnapshotRoot() { String snapshotDir = ""; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/IoTConsensusDataRegionStateMachine.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/IoTConsensusDataRegionStateMachine.java index 775f1bf6f49b6..240c1b1caa0fe 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/IoTConsensusDataRegionStateMachine.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/IoTConsensusDataRegionStateMachine.java @@ -79,7 +79,7 @@ public IConsensusRequest deserializeRequest(IConsensusRequest request) { IConsensusRequest result; if (request instanceof IndexedConsensusRequest) { IndexedConsensusRequest indexedRequest = (IndexedConsensusRequest) request; - result = grabInsertNode(indexedRequest); + result = grabPlanNode(indexedRequest); } else if (request instanceof BatchIndexedConsensusRequest) { BatchIndexedConsensusRequest batchRequest = (BatchIndexedConsensusRequest) request; DeserializedBatchIndexedConsensusRequest deserializedRequest = @@ -88,7 +88,7 @@ public IConsensusRequest deserializeRequest(IConsensusRequest request) { batchRequest.getEndSyncIndex(), batchRequest.getRequests().size()); for (IndexedConsensusRequest indexedRequest : batchRequest.getRequests()) { - final PlanNode planNode = grabInsertNode(indexedRequest); + final PlanNode planNode = grabPlanNode(indexedRequest); if (planNode instanceof ComparableConsensusRequest) { final IoTProgressIndex ioTProgressIndex = new IoTProgressIndex(batchRequest.getSourcePeerId(), indexedRequest.getSyncIndex()); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/schemaregion/SchemaExecutionVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/schemaregion/SchemaExecutionVisitor.java index 6efdeed997e11..9698c2a463a55 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/schemaregion/SchemaExecutionVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/schemaregion/SchemaExecutionVisitor.java @@ -29,7 +29,7 @@ import org.apache.iotdb.db.exception.metadata.MeasurementAlreadyExistException; import org.apache.iotdb.db.exception.metadata.template.TemplateIsInUseException; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; -import org.apache.iotdb.db.pipe.extractor.schemaregion.SchemaRegionListeningQueue; +import org.apache.iotdb.db.pipe.source.schemaregion.SchemaRegionListeningQueue; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanVisitor; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.WritePlanNode; @@ -79,16 +79,18 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Objects; /** Schema write {@link PlanNode} visitor */ public class SchemaExecutionVisitor extends PlanVisitor { private static final Logger logger = LoggerFactory.getLogger(SchemaExecutionVisitor.class); @Override - public TSStatus visitCreateTimeSeries(CreateTimeSeriesNode node, ISchemaRegion schemaRegion) { + public TSStatus visitCreateTimeSeries( + final CreateTimeSeriesNode node, final ISchemaRegion schemaRegion) { try { schemaRegion.createTimeSeries(node, -1); - } catch (MetadataException e) { + } catch (final MetadataException e) { logger.error("{}: MetaData error: ", IoTDBConstant.GLOBAL_DB_NAME, e); return RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); } @@ -97,10 +99,25 @@ public TSStatus visitCreateTimeSeries(CreateTimeSeriesNode node, ISchemaRegion s @Override public TSStatus visitCreateAlignedTimeSeries( - CreateAlignedTimeSeriesNode node, ISchemaRegion schemaRegion) { + final CreateAlignedTimeSeriesNode node, final ISchemaRegion schemaRegion) { try { - schemaRegion.createAlignedTimeSeries(node); - } catch (MetadataException e) { + if (node.isGeneratedByPipe()) { + final ICreateAlignedTimeSeriesPlan plan = + SchemaRegionWritePlanFactory.getCreateAlignedTimeSeriesPlan( + node.getDevicePath(), + node.getMeasurements(), + node.getDataTypes(), + node.getEncodings(), + node.getCompressors(), + node.getAliasList(), + node.getTagsList(), + node.getAttributesList()); + ((CreateAlignedTimeSeriesPlanImpl) plan).setWithMerge(true); + schemaRegion.createAlignedTimeSeries(plan); + } else { + schemaRegion.createAlignedTimeSeries(node); + } + } catch (final MetadataException e) { logger.error("{}: MetaData error: ", IoTDBConstant.GLOBAL_DB_NAME, e); return RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); } @@ -109,22 +126,24 @@ public TSStatus visitCreateAlignedTimeSeries( @Override public TSStatus visitCreateMultiTimeSeries( - CreateMultiTimeSeriesNode node, ISchemaRegion schemaRegion) { - Map measurementGroupMap = node.getMeasurementGroupMap(); - List failingStatus = new ArrayList<>(); + final CreateMultiTimeSeriesNode node, final ISchemaRegion schemaRegion) { + final Map measurementGroupMap = node.getMeasurementGroupMap(); + final List failingStatus = new ArrayList<>(); PartialPath devicePath; MeasurementGroup measurementGroup; int size; - for (Map.Entry entry : measurementGroupMap.entrySet()) { + for (final Map.Entry entry : measurementGroupMap.entrySet()) { devicePath = entry.getKey(); measurementGroup = entry.getValue(); size = measurementGroup.getMeasurements().size(); // todo implement batch creation of one device in SchemaRegion for (int i = 0; i < size; i++) { try { - schemaRegion.createTimeSeries( - transformToCreateTimeSeriesPlan(devicePath, measurementGroup, i), -1); - } catch (MetadataException e) { + final ICreateTimeSeriesPlan createTimeSeriesPlan = + transformToCreateTimeSeriesPlan(devicePath, measurementGroup, i); + ((CreateTimeSeriesPlanImpl) createTimeSeriesPlan).setWithMerge(node.isGeneratedByPipe()); + schemaRegion.createTimeSeries(createTimeSeriesPlan, -1); + } catch (final MetadataException e) { logger.error("{}: MetaData error: ", IoTDBConstant.GLOBAL_DB_NAME, e); failingStatus.add(RpcUtils.getStatus(e.getErrorCode(), e.getMessage())); } @@ -138,7 +157,7 @@ public TSStatus visitCreateMultiTimeSeries( } private ICreateTimeSeriesPlan transformToCreateTimeSeriesPlan( - PartialPath devicePath, MeasurementGroup measurementGroup, int index) { + final PartialPath devicePath, final MeasurementGroup measurementGroup, final int index) { return SchemaRegionWritePlanFactory.getCreateTimeSeriesPlan( devicePath.concatNode(measurementGroup.getMeasurements().get(index)), measurementGroup.getDataTypes().get(index), @@ -170,7 +189,7 @@ public TSStatus visitInternalCreateTimeSeries( schemaRegion, alreadyExistingTimeSeries, failingStatus, - false); + node.isGeneratedByPipe()); } else { executeInternalCreateTimeSeries( devicePath, @@ -178,7 +197,7 @@ public TSStatus visitInternalCreateTimeSeries( schemaRegion, alreadyExistingTimeSeries, failingStatus, - false); + node.isGeneratedByPipe()); } if (!failingStatus.isEmpty()) { @@ -277,6 +296,10 @@ private void executeInternalCreateAlignedTimeSeries( final List dataTypeList = measurementGroup.getDataTypes(); final List encodingList = measurementGroup.getEncodings(); final List compressionTypeList = measurementGroup.getCompressors(); + final List aliasList = measurementGroup.getAliasList(); + final List> tagsList = measurementGroup.getTagsList(); + final List> attributesList = measurementGroup.getAttributesList(); + final ICreateAlignedTimeSeriesPlan createAlignedTimeSeriesPlan = SchemaRegionWritePlanFactory.getCreateAlignedTimeSeriesPlan( devicePath, @@ -284,9 +307,10 @@ private void executeInternalCreateAlignedTimeSeries( dataTypeList, encodingList, compressionTypeList, - null, - null, - null); + aliasList, + tagsList, + attributesList); + // With merge is only true for pipe to upsert the receiver alias/tags/attributes in historical // transfer. // For normal internal creation, the alias/tags/attributes are not set @@ -313,6 +337,38 @@ private void executeInternalCreateAlignedTimeSeries( encodingList.remove(index); compressionTypeList.remove(index); + if (Objects.nonNull(aliasList)) { + aliasList.remove(index); + } + if (Objects.nonNull(tagsList)) { + tagsList.remove(index); + } + if (Objects.nonNull(attributesList)) { + attributesList.remove(index); + } + + // If with merge is set, the lists are deep copied and need to be altered here. + // We still remove the element from the original list to help cascading pipe transfer + // schema. + // If this exception is thrown, the measurements, data types, etc. must be unchanged. + // Thus, the index for the copied lists are identical to that in the original lists. + if (withMerge) { + createAlignedTimeSeriesPlan.getMeasurements().remove(index); + createAlignedTimeSeriesPlan.getDataTypes().remove(index); + createAlignedTimeSeriesPlan.getEncodings().remove(index); + createAlignedTimeSeriesPlan.getCompressors().remove(index); + + if (Objects.nonNull(aliasList)) { + createAlignedTimeSeriesPlan.getAliasList().remove(index); + } + if (Objects.nonNull(tagsList)) { + createAlignedTimeSeriesPlan.getTagsList().remove(index); + } + if (Objects.nonNull(attributesList)) { + createAlignedTimeSeriesPlan.getAttributesList().remove(index); + } + } + if (measurementList.isEmpty()) { shouldRetry = false; } @@ -326,7 +382,8 @@ private void executeInternalCreateAlignedTimeSeries( } @Override - public TSStatus visitAlterTimeSeries(AlterTimeSeriesNode node, ISchemaRegion schemaRegion) { + public TSStatus visitAlterTimeSeries( + final AlterTimeSeriesNode node, final ISchemaRegion schemaRegion) { try { switch (node.getAlterType()) { case RENAME: @@ -362,12 +419,14 @@ public TSStatus visitAlterTimeSeries(AlterTimeSeriesNode node, ISchemaRegion sch } @Override - public TSStatus visitActivateTemplate(ActivateTemplateNode node, ISchemaRegion schemaRegion) { + public TSStatus visitActivateTemplate( + final ActivateTemplateNode node, final ISchemaRegion schemaRegion) { try { - Template template = ClusterTemplateManager.getInstance().getTemplate(node.getTemplateId()); + final Template template = + ClusterTemplateManager.getInstance().getTemplate(node.getTemplateId()); schemaRegion.activateSchemaTemplate(node, template); return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS); - } catch (MetadataException e) { + } catch (final MetadataException e) { logger.error(e.getMessage(), e); return RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); } @@ -375,40 +434,54 @@ public TSStatus visitActivateTemplate(ActivateTemplateNode node, ISchemaRegion s @Override public TSStatus visitBatchActivateTemplate( - BatchActivateTemplateNode node, ISchemaRegion schemaRegion) { - for (Map.Entry> entry : + final BatchActivateTemplateNode node, final ISchemaRegion schemaRegion) { + final List statusList = new ArrayList<>(); + final List alreadyActivatedDeviceList = new ArrayList<>(); + for (final Map.Entry> entry : node.getTemplateActivationMap().entrySet()) { - Template template = ClusterTemplateManager.getInstance().getTemplate(entry.getValue().left); + final Template template = + ClusterTemplateManager.getInstance().getTemplate(entry.getValue().left); try { schemaRegion.activateSchemaTemplate( SchemaRegionWritePlanFactory.getActivateTemplateInClusterPlan( entry.getKey(), entry.getValue().right, entry.getValue().left), template); - } catch (MetadataException e) { - logger.error(e.getMessage(), e); - return RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); + } catch (final MetadataException e) { + if (e.getErrorCode() == TSStatusCode.TEMPLATE_IS_IN_USE.getStatusCode()) { + alreadyActivatedDeviceList.add(entry.getKey()); + } else { + logger.error(e.getMessage(), e); + statusList.add(RpcUtils.getStatus(e.getErrorCode(), e.getMessage())); + } } } - return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS); + if (!alreadyActivatedDeviceList.isEmpty()) { + final TemplateIsInUseException e = + new TemplateIsInUseException(alreadyActivatedDeviceList.toString()); + logger.error(e.getMessage(), e); + statusList.add(RpcUtils.getStatus(e.getErrorCode(), e.getMessage())); + } + return statusList.isEmpty() ? RpcUtils.SUCCESS_STATUS : RpcUtils.getStatus(statusList); } @Override public TSStatus visitInternalBatchActivateTemplate( - InternalBatchActivateTemplateNode node, ISchemaRegion schemaRegion) { - for (Map.Entry> entry : + final InternalBatchActivateTemplateNode node, final ISchemaRegion schemaRegion) { + for (final Map.Entry> entry : node.getTemplateActivationMap().entrySet()) { - Template template = ClusterTemplateManager.getInstance().getTemplate(entry.getValue().left); + final Template template = + ClusterTemplateManager.getInstance().getTemplate(entry.getValue().left); try { schemaRegion.activateSchemaTemplate( SchemaRegionWritePlanFactory.getActivateTemplateInClusterPlan( entry.getKey(), entry.getValue().right, entry.getValue().left), template); - } catch (TemplateIsInUseException e) { + } catch (final TemplateIsInUseException e) { logger.info( String.format( "Device Template has already been activated on path %s, there's no need to activate again.", entry.getKey())); - } catch (MetadataException e) { + } catch (final MetadataException e) { logger.error(e.getMessage(), e); return RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); } @@ -435,22 +508,23 @@ public TSStatus visitConstructSchemaBlackList( @Override public TSStatus visitRollbackSchemaBlackList( - RollbackSchemaBlackListNode node, ISchemaRegion schemaRegion) { + final RollbackSchemaBlackListNode node, final ISchemaRegion schemaRegion) { try { schemaRegion.rollbackSchemaBlackList(node.getPatternTree()); return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS); - } catch (MetadataException e) { + } catch (final MetadataException e) { logger.error(e.getMessage(), e); return RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); } } @Override - public TSStatus visitDeleteTimeseries(DeleteTimeSeriesNode node, ISchemaRegion schemaRegion) { + public TSStatus visitDeleteTimeseries( + final DeleteTimeSeriesNode node, final ISchemaRegion schemaRegion) { try { schemaRegion.deleteTimeseriesInBlackList(node.getPatternTree()); return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS); - } catch (MetadataException e) { + } catch (final MetadataException e) { logger.error(e.getMessage(), e); return RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); } @@ -458,11 +532,12 @@ public TSStatus visitDeleteTimeseries(DeleteTimeSeriesNode node, ISchemaRegion s @Override public TSStatus visitPreDeactivateTemplate( - PreDeactivateTemplateNode node, ISchemaRegion schemaRegion) { + final PreDeactivateTemplateNode node, final ISchemaRegion schemaRegion) { try { - long preDeactivateNum = schemaRegion.constructSchemaBlackListWithTemplate(node); - return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS, String.valueOf(preDeactivateNum)); - } catch (MetadataException e) { + return RpcUtils.getStatus( + TSStatusCode.SUCCESS_STATUS, + String.valueOf(schemaRegion.constructSchemaBlackListWithTemplate(node))); + } catch (final MetadataException e) { logger.error(e.getMessage(), e); return RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); } @@ -470,18 +545,19 @@ public TSStatus visitPreDeactivateTemplate( @Override public TSStatus visitRollbackPreDeactivateTemplate( - RollbackPreDeactivateTemplateNode node, ISchemaRegion schemaRegion) { + final RollbackPreDeactivateTemplateNode node, final ISchemaRegion schemaRegion) { try { schemaRegion.rollbackSchemaBlackListWithTemplate(node); return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS); - } catch (MetadataException e) { + } catch (final MetadataException e) { logger.error(e.getMessage(), e); return RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); } } @Override - public TSStatus visitDeactivateTemplate(DeactivateTemplateNode node, ISchemaRegion schemaRegion) { + public TSStatus visitDeactivateTemplate( + final DeactivateTemplateNode node, final ISchemaRegion schemaRegion) { try { schemaRegion.deactivateTemplateInBlackList(node); return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS); @@ -492,15 +568,17 @@ public TSStatus visitDeactivateTemplate(DeactivateTemplateNode node, ISchemaRegi } @Override - public TSStatus visitCreateLogicalView(CreateLogicalViewNode node, ISchemaRegion schemaRegion) { - Map viewPathToSourceMap = node.getViewPathToSourceExpressionMap(); - List failingStatus = new ArrayList<>(); - for (Map.Entry entry : viewPathToSourceMap.entrySet()) { + public TSStatus visitCreateLogicalView( + final CreateLogicalViewNode node, final ISchemaRegion schemaRegion) { + final Map viewPathToSourceMap = + node.getViewPathToSourceExpressionMap(); + final List failingStatus = new ArrayList<>(); + for (final Map.Entry entry : viewPathToSourceMap.entrySet()) { try { schemaRegion.createLogicalView( SchemaRegionWritePlanFactory.getCreateLogicalViewPlan( entry.getKey(), entry.getValue())); - } catch (MetadataException e) { + } catch (final MetadataException e) { logger.error("{}: MetaData error: ", IoTDBConstant.GLOBAL_DB_NAME, e); failingStatus.add(RpcUtils.getStatus(e.getErrorCode(), e.getMessage())); } @@ -512,14 +590,15 @@ public TSStatus visitCreateLogicalView(CreateLogicalViewNode node, ISchemaRegion } @Override - public TSStatus visitAlterLogicalView(AlterLogicalViewNode node, ISchemaRegion schemaRegion) { - Map viewPathToSourceMap = node.getViewPathToSourceMap(); - List failingStatus = new ArrayList<>(); - for (Map.Entry entry : viewPathToSourceMap.entrySet()) { + public TSStatus visitAlterLogicalView( + final AlterLogicalViewNode node, final ISchemaRegion schemaRegion) { + final Map viewPathToSourceMap = node.getViewPathToSourceMap(); + final List failingStatus = new ArrayList<>(); + for (final Map.Entry entry : viewPathToSourceMap.entrySet()) { try { schemaRegion.alterLogicalView( SchemaRegionWritePlanFactory.getAlterLogicalViewPlan(entry.getKey(), entry.getValue())); - } catch (MetadataException e) { + } catch (final MetadataException e) { logger.warn("{}: MetaData error: ", IoTDBConstant.GLOBAL_DB_NAME, e); failingStatus.add(RpcUtils.getStatus(e.getErrorCode(), e.getMessage())); } @@ -532,10 +611,11 @@ public TSStatus visitAlterLogicalView(AlterLogicalViewNode node, ISchemaRegion s @Override public TSStatus visitConstructLogicalViewBlackList( - ConstructLogicalViewBlackListNode node, ISchemaRegion schemaRegion) { + final ConstructLogicalViewBlackListNode node, final ISchemaRegion schemaRegion) { try { - long preDeletedNum = schemaRegion.constructLogicalViewBlackList(node.getPatternTree()); - return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS, String.valueOf(preDeletedNum)); + return RpcUtils.getStatus( + TSStatusCode.SUCCESS_STATUS, + String.valueOf(schemaRegion.constructLogicalViewBlackList(node.getPatternTree()))); } catch (MetadataException e) { logger.error(e.getMessage(), e); return RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); @@ -544,11 +624,11 @@ public TSStatus visitConstructLogicalViewBlackList( @Override public TSStatus visitRollbackLogicalViewBlackList( - RollbackLogicalViewBlackListNode node, ISchemaRegion schemaRegion) { + final RollbackLogicalViewBlackListNode node, final ISchemaRegion schemaRegion) { try { schemaRegion.rollbackLogicalViewBlackList(node.getPatternTree()); return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS); - } catch (MetadataException e) { + } catch (final MetadataException e) { logger.error(e.getMessage(), e); return RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/schemaregion/SchemaRegionStateMachine.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/schemaregion/SchemaRegionStateMachine.java index ca2c5f2821253..f9efb4ef4e1ce 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/schemaregion/SchemaRegionStateMachine.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/schemaregion/SchemaRegionStateMachine.java @@ -28,7 +28,7 @@ import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.consensus.statemachine.BaseStateMachine; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; -import org.apache.iotdb.db.pipe.extractor.schemaregion.SchemaRegionListeningQueue; +import org.apache.iotdb.db.pipe.source.schemaregion.SchemaRegionListeningQueue; import org.apache.iotdb.db.queryengine.execution.fragment.FragmentInstanceManager; import org.apache.iotdb.db.queryengine.plan.planner.plan.FragmentInstance; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNode; @@ -69,24 +69,33 @@ public void stop() { @Override public void notifyLeaderChanged(ConsensusGroupId groupId, int newLeaderId) { - if (schemaRegion.getSchemaRegionId().equals(groupId) - && newLeaderId != IoTDBDescriptor.getInstance().getConfig().getDataNodeId()) { + if (newLeaderId != IoTDBDescriptor.getInstance().getConfig().getDataNodeId()) { logger.info( "Current node [nodeId: {}] is no longer the schema region leader [regionId: {}], " + "the new leader is [nodeId:{}]", IoTDBDescriptor.getInstance().getConfig().getDataNodeId(), schemaRegion.getSchemaRegionId(), newLeaderId); + } + } - // Shutdown leader related service for schema pipe - PipeDataNodeAgent.runtime().notifySchemaLeaderUnavailable(schemaRegion.getSchemaRegionId()); + @Override + public void notifyNotLeader() { + int dataNodeId = IoTDBDescriptor.getInstance().getConfig().getDataNodeId(); + logger.info( + "Current node [nodeId: {}] is no longer the schema region leader [regionId: {}], " + + "start cleaning up related services.", + dataNodeId, + schemaRegion.getSchemaRegionId()); - logger.info( - "Current node [nodeId: {}] is no longer the schema region leader [regionId: {}], " - + "all services on old leader are unavailable now.", - IoTDBDescriptor.getInstance().getConfig().getDataNodeId(), - schemaRegion.getSchemaRegionId()); - } + // Shutdown leader related service for schema pipe + PipeDataNodeAgent.runtime().notifySchemaLeaderUnavailable(schemaRegion.getSchemaRegionId()); + + logger.info( + "Current node [nodeId: {}] is no longer the schema region leader [regionId: {}], " + + "all services on old leader are unavailable now.", + dataNodeId, + schemaRegion.getSchemaRegionId()); } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/template/NoTemplateOnMNodeException.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/LoadAnalyzeException.java similarity index 69% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/template/NoTemplateOnMNodeException.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/LoadAnalyzeException.java index 0f2a274235a4c..f062072597903 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/template/NoTemplateOnMNodeException.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/LoadAnalyzeException.java @@ -15,20 +15,20 @@ * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. - * */ -package org.apache.iotdb.db.exception.metadata.template; +package org.apache.iotdb.db.exception; -import org.apache.iotdb.commons.exception.MetadataException; +import org.apache.iotdb.commons.exception.IoTDBException; import org.apache.iotdb.rpc.TSStatusCode; -public class NoTemplateOnMNodeException extends MetadataException { +public class LoadAnalyzeException extends IoTDBException { + + public LoadAnalyzeException(String message) { + super(message, TSStatusCode.VERIFY_METADATA_ERROR.getStatusCode()); + } - public NoTemplateOnMNodeException(String path) { - super( - String.format("NO template on %s", path), - TSStatusCode.TEMPLATE_NOT_SET.getStatusCode(), - true); + public LoadAnalyzeException(String message, int errorCode) { + super(message, errorCode); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/LoadAnalyzeTypeMismatchException.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/LoadAnalyzeTypeMismatchException.java new file mode 100644 index 0000000000000..744eab2841acf --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/LoadAnalyzeTypeMismatchException.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.exception; + +import org.apache.iotdb.rpc.TSStatusCode; + +public class LoadAnalyzeTypeMismatchException extends LoadAnalyzeException { + + public LoadAnalyzeTypeMismatchException(String message) { + super(message, TSStatusCode.VERIFY_METADATA_ERROR.getStatusCode()); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/ainode/GetModelInfoException.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/ainode/GetModelInfoException.java new file mode 100644 index 0000000000000..03402d30c6435 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/ainode/GetModelInfoException.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.exception.ainode; + +import org.apache.iotdb.rpc.TSStatusCode; + +public class GetModelInfoException extends ModelException { + public GetModelInfoException(String message) { + super(message, TSStatusCode.GET_MODEL_INFO_ERROR); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/ainode/ModelException.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/ainode/ModelException.java new file mode 100644 index 0000000000000..4a007e7048ce1 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/ainode/ModelException.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.exception.ainode; + +import org.apache.iotdb.rpc.TSStatusCode; + +public class ModelException extends RuntimeException { + TSStatusCode statusCode; + + public ModelException(String message, TSStatusCode code) { + super(message); + this.statusCode = code; + } + + public TSStatusCode getStatusCode() { + return statusCode; + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/ainode/ModelNotFoundException.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/ainode/ModelNotFoundException.java new file mode 100644 index 0000000000000..38a5105cded11 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/ainode/ModelNotFoundException.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.exception.ainode; + +import org.apache.iotdb.rpc.TSStatusCode; + +public class ModelNotFoundException extends ModelException { + public ModelNotFoundException(String message) { + super(message, TSStatusCode.MODEL_NOT_FOUND_ERROR); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/LoadEmptyFileException.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/load/LoadEmptyFileException.java similarity index 87% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/LoadEmptyFileException.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/load/LoadEmptyFileException.java index 1c9e9bc4590fd..ea336c29f3071 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/LoadEmptyFileException.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/load/LoadEmptyFileException.java @@ -17,11 +17,11 @@ * under the License. */ -package org.apache.iotdb.db.exception; +package org.apache.iotdb.db.exception.load; public class LoadEmptyFileException extends LoadFileException { - public LoadEmptyFileException() { - super("Cannot load an empty file"); + public LoadEmptyFileException(final String fileName) { + super(fileName); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/LoadFileException.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/load/LoadFileException.java similarity index 96% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/LoadFileException.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/load/LoadFileException.java index 523044aab0aba..0d1c4c5388201 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/LoadFileException.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/load/LoadFileException.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.exception; +package org.apache.iotdb.db.exception.load; import org.apache.iotdb.commons.exception.IoTDBException; import org.apache.iotdb.rpc.TSStatusCode; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/LoadReadOnlyException.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/load/LoadReadOnlyException.java similarity index 96% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/LoadReadOnlyException.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/load/LoadReadOnlyException.java index d2da6e70df4ae..d10c859092907 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/LoadReadOnlyException.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/load/LoadReadOnlyException.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.exception; +package org.apache.iotdb.db.exception.load; import org.apache.iotdb.commons.exception.IoTDBException; import org.apache.iotdb.rpc.TSStatusCode; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/LoadRuntimeOutOfMemoryException.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/load/LoadRuntimeOutOfMemoryException.java similarity index 95% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/LoadRuntimeOutOfMemoryException.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/load/LoadRuntimeOutOfMemoryException.java index 050274e99408e..fa31054c053db 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/LoadRuntimeOutOfMemoryException.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/load/LoadRuntimeOutOfMemoryException.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.exception; +package org.apache.iotdb.db.exception.load; public class LoadRuntimeOutOfMemoryException extends RuntimeException { public LoadRuntimeOutOfMemoryException(String message) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/PartitionViolationException.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/load/PartitionViolationException.java similarity index 96% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/PartitionViolationException.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/load/PartitionViolationException.java index 8b4c8dfb8ab02..ffb47633e51ef 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/PartitionViolationException.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/load/PartitionViolationException.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.exception; +package org.apache.iotdb.db.exception.load; public class PartitionViolationException extends LoadFileException { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/load/RegionReplicaSetChangedException.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/load/RegionReplicaSetChangedException.java new file mode 100644 index 0000000000000..e362c0b66349a --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/load/RegionReplicaSetChangedException.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.exception.load; + +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; + +public class RegionReplicaSetChangedException extends LoadFileException { + + public RegionReplicaSetChangedException(TRegionReplicaSet original, TRegionReplicaSet current) { + super( + String.format( + "Region replica set changed from %s to %s during loading TsFile, maybe due to region migration", + original, current)); + } + + public RegionReplicaSetChangedException() { + super("Region replica set changed during loading TsFile, maybe due to region migration"); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/AliasAlreadyExistException.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/AliasAlreadyExistException.java index 2df35dead3d7f..9b8dcd7ca698f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/AliasAlreadyExistException.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/AliasAlreadyExistException.java @@ -30,7 +30,7 @@ public class AliasAlreadyExistException extends MetadataException { public AliasAlreadyExistException(String path, String alias) { super( String.format("Alias [%s] for Path [%s] already exist", alias, path), - TSStatusCode.ALIAS_ALREADY_EXIST.getStatusCode()); - this.isUserException = true; + TSStatusCode.ALIAS_ALREADY_EXIST.getStatusCode(), + true); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/AlignedTimeseriesException.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/AlignedTimeseriesException.java index 1594d6b203b44..f11c986afdc5d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/AlignedTimeseriesException.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/AlignedTimeseriesException.java @@ -25,8 +25,9 @@ public class AlignedTimeseriesException extends MetadataException { public AlignedTimeseriesException(String message, String path) { - super(String.format("%s (Path: %s)", message, path)); - errorCode = TSStatusCode.ALIGNED_TIMESERIES_ERROR.getStatusCode(); - this.isUserException = true; + super( + String.format("%s (Path: %s)", message, path), + TSStatusCode.ALIGNED_TIMESERIES_ERROR.getStatusCode(), + true); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/DatabaseConflictException.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/DatabaseConflictException.java new file mode 100644 index 0000000000000..ee08ef4e7591c --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/DatabaseConflictException.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.exception.metadata; + +import org.apache.iotdb.commons.exception.MetadataException; +import org.apache.iotdb.rpc.TSStatusCode; + +public class DatabaseConflictException extends MetadataException { + + private final boolean isChild; + + private final String databasePath; + + public DatabaseConflictException(final String path, final boolean isChild) { + super(getMessage(path, isChild), TSStatusCode.DATABASE_CONFLICT.getStatusCode()); + this.isChild = isChild; + databasePath = path; + } + + public boolean isChild() { + return isChild; + } + + public String getDatabasePath() { + return databasePath; + } + + private static String getMessage(final String path, final boolean isChild) { + if (isChild) { + return String.format("some children of %s have already been created as database", path); + } else { + return String.format("%s has already been created as database", path); + } + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/DatabaseNotSetException.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/DatabaseNotSetException.java index ef395f5de48b9..7e700c117e4c8 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/DatabaseNotSetException.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/DatabaseNotSetException.java @@ -27,14 +27,16 @@ public class DatabaseNotSetException extends MetadataException { private static final long serialVersionUID = 3739300272099030533L; public DatabaseNotSetException(String path) { - super(String.format("Database is not set for current seriesPath: [%s]", path)); - this.errorCode = TSStatusCode.DATABASE_NOT_EXIST.getStatusCode(); + super( + String.format("Database is not set for current seriesPath: [%s]", path), + TSStatusCode.DATABASE_NOT_EXIST.getStatusCode()); } public DatabaseNotSetException(String path, boolean isUserException) { - super(String.format("Database is not set for current seriesPath: [%s]", path)); - this.isUserException = isUserException; - this.errorCode = TSStatusCode.DATABASE_NOT_EXIST.getStatusCode(); + super( + String.format("Database is not set for current seriesPath: [%s]", path), + TSStatusCode.DATABASE_NOT_EXIST.getStatusCode(), + isUserException); } public DatabaseNotSetException(String path, String reason) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/IllegalParameterOfPathException.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/IllegalParameterOfPathException.java index febe7b000b9a5..9ae6e79f40689 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/IllegalParameterOfPathException.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/IllegalParameterOfPathException.java @@ -25,8 +25,9 @@ public class IllegalParameterOfPathException extends MetadataException { public IllegalParameterOfPathException(String msg, String path) { - super(String.format("%s. Failed to create timeseries for path %s", msg, path)); - errorCode = TSStatusCode.ILLEGAL_PARAMETER.getStatusCode(); - this.isUserException = true; + super( + String.format("%s. Failed to create timeseries for path %s", msg, path), + TSStatusCode.ILLEGAL_PARAMETER.getStatusCode(), + true); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/MeasurementAlreadyExistException.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/MeasurementAlreadyExistException.java index 3b63c01eeff96..a3d5adabac8bc 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/MeasurementAlreadyExistException.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/MeasurementAlreadyExistException.java @@ -31,8 +31,8 @@ public class MeasurementAlreadyExistException extends MetadataException { public MeasurementAlreadyExistException(String path, MeasurementPath measurementPath) { super( String.format("Path [%s] already exist", path), - TSStatusCode.TIMESERIES_ALREADY_EXIST.getStatusCode()); - this.isUserException = true; + TSStatusCode.TIMESERIES_ALREADY_EXIST.getStatusCode(), + true); this.measurementPath = measurementPath; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/PathAlreadyExistException.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/PathAlreadyExistException.java index d91ea75dba089..5da3c957cba98 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/PathAlreadyExistException.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/PathAlreadyExistException.java @@ -30,7 +30,7 @@ public class PathAlreadyExistException extends MetadataException { public PathAlreadyExistException(String path) { super( String.format("Path [%s] already exist", path), - TSStatusCode.PATH_ALREADY_EXIST.getStatusCode()); - this.isUserException = true; + TSStatusCode.PATH_ALREADY_EXIST.getStatusCode(), + true); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/template/TemplateIncompatibleException.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/template/TemplateIncompatibleException.java index bb1d0c1a99710..cc39a3b50b22b 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/template/TemplateIncompatibleException.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/template/TemplateIncompatibleException.java @@ -32,8 +32,8 @@ public TemplateIncompatibleException( String.format( "Cannot create timeseries [%s] since device template [%s] already set on path [%s].", path, templateName, templateSetPath), - TSStatusCode.TEMPLATE_INCOMPATIBLE.getStatusCode()); - this.isUserException = true; + TSStatusCode.TEMPLATE_INCOMPATIBLE.getStatusCode(), + true); } public TemplateIncompatibleException(String templateName, PartialPath templateSetPath) { @@ -42,12 +42,11 @@ public TemplateIncompatibleException(String templateName, PartialPath templateSe "Cannot set device template [%s] to path [%s] " + "since there's timeseries under path [%s].", templateName, templateSetPath, templateSetPath), - TSStatusCode.TEMPLATE_INCOMPATIBLE.getStatusCode()); - this.isUserException = true; + TSStatusCode.TEMPLATE_INCOMPATIBLE.getStatusCode(), + true); } public TemplateIncompatibleException(String reason) { - super(reason, TSStatusCode.TEMPLATE_INCOMPATIBLE.getStatusCode()); - this.isUserException = true; + super(reason, TSStatusCode.TEMPLATE_INCOMPATIBLE.getStatusCode(), true); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/query/QueryTimeoutRuntimeException.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/query/QueryTimeoutRuntimeException.java index ecabba0542f0e..dec3081ec7a5d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/query/QueryTimeoutRuntimeException.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/query/QueryTimeoutRuntimeException.java @@ -29,4 +29,8 @@ public QueryTimeoutRuntimeException(long startTime, long currentTime, long timeo String.format( QUERY_TIMEOUT_EXCEPTION_MESSAGE, startTime, startTime + timeout, currentTime)); } + + public QueryTimeoutRuntimeException(String message) { + super(message); + } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/exception/ProcedureAbortedException.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/runtime/ModelInferenceProcessException.java similarity index 76% rename from iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/exception/ProcedureAbortedException.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/runtime/ModelInferenceProcessException.java index 31d10e263478e..586c624a8d33e 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/exception/ProcedureAbortedException.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/runtime/ModelInferenceProcessException.java @@ -17,14 +17,11 @@ * under the License. */ -package org.apache.iotdb.confignode.procedure.exception; +package org.apache.iotdb.db.exception.runtime; -public class ProcedureAbortedException extends ProcedureException { - public ProcedureAbortedException() { - super(); - } +public class ModelInferenceProcessException extends RuntimeException { - public ProcedureAbortedException(String msg) { - super(msg); + public ModelInferenceProcessException(String message) { + super(message); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/sql/StatementAnalyzeException.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/sql/StatementAnalyzeException.java index 3ca1e397c375e..219d73b1b7ca3 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/sql/StatementAnalyzeException.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/sql/StatementAnalyzeException.java @@ -28,4 +28,8 @@ public StatementAnalyzeException(String message) { public StatementAnalyzeException(Exception cause) { super(cause); } + + public StatementAnalyzeException(String message, Throwable cause) { + super(message, cause); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/PipeDataNodePluginAgent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/PipeDataNodePluginAgent.java index c93bb5a588209..31f404de9d29c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/PipeDataNodePluginAgent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/PipeDataNodePluginAgent.java @@ -19,11 +19,11 @@ package org.apache.iotdb.db.pipe.agent.plugin; -import org.apache.iotdb.commons.pipe.plugin.meta.DataNodePipePluginMetaKeeper; -import org.apache.iotdb.commons.pipe.plugin.meta.PipePluginMeta; -import org.apache.iotdb.commons.pipe.plugin.service.PipePluginClassLoader; -import org.apache.iotdb.commons.pipe.plugin.service.PipePluginClassLoaderManager; -import org.apache.iotdb.commons.pipe.plugin.service.PipePluginExecutableManager; +import org.apache.iotdb.commons.pipe.agent.plugin.meta.DataNodePipePluginMetaKeeper; +import org.apache.iotdb.commons.pipe.agent.plugin.meta.PipePluginMeta; +import org.apache.iotdb.commons.pipe.agent.plugin.service.PipePluginClassLoader; +import org.apache.iotdb.commons.pipe.agent.plugin.service.PipePluginClassLoaderManager; +import org.apache.iotdb.commons.pipe.agent.plugin.service.PipePluginExecutableManager; import org.apache.iotdb.db.pipe.agent.plugin.dataregion.PipeDataRegionPluginAgent; import org.apache.iotdb.db.pipe.agent.plugin.schemaregion.PipeSchemaRegionPluginAgent; import org.apache.iotdb.pipe.api.PipePlugin; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/dataregion/PipeDataRegionPluginAgent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/dataregion/PipeDataRegionPluginAgent.java index 92d130a12fe62..a8583559394d4 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/dataregion/PipeDataRegionPluginAgent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/dataregion/PipeDataRegionPluginAgent.java @@ -19,12 +19,12 @@ package org.apache.iotdb.db.pipe.agent.plugin.dataregion; -import org.apache.iotdb.commons.pipe.agent.plugin.PipeConnectorConstructor; -import org.apache.iotdb.commons.pipe.agent.plugin.PipeExtractorConstructor; import org.apache.iotdb.commons.pipe.agent.plugin.PipePluginAgent; -import org.apache.iotdb.commons.pipe.agent.plugin.PipeProcessorConstructor; -import org.apache.iotdb.commons.pipe.plugin.meta.DataNodePipePluginMetaKeeper; -import org.apache.iotdb.commons.pipe.plugin.meta.PipePluginMetaKeeper; +import org.apache.iotdb.commons.pipe.agent.plugin.constructor.PipeProcessorConstructor; +import org.apache.iotdb.commons.pipe.agent.plugin.constructor.PipeSinkConstructor; +import org.apache.iotdb.commons.pipe.agent.plugin.constructor.PipeSourceConstructor; +import org.apache.iotdb.commons.pipe.agent.plugin.meta.DataNodePipePluginMetaKeeper; +import org.apache.iotdb.commons.pipe.agent.plugin.meta.PipePluginMetaKeeper; public class PipeDataRegionPluginAgent extends PipePluginAgent { @@ -33,10 +33,9 @@ public PipeDataRegionPluginAgent(DataNodePipePluginMetaKeeper pipePluginMetaKeep } @Override - protected PipeExtractorConstructor createPipeExtractorConstructor( + protected PipeSourceConstructor createPipeExtractorConstructor( PipePluginMetaKeeper pipePluginMetaKeeper) { - return new PipeDataRegionExtractorConstructor( - (DataNodePipePluginMetaKeeper) pipePluginMetaKeeper); + return new PipeDataRegionSourceConstructor((DataNodePipePluginMetaKeeper) pipePluginMetaKeeper); } @Override @@ -47,9 +46,8 @@ protected PipeProcessorConstructor createPipeProcessorConstructor( } @Override - protected PipeConnectorConstructor createPipeConnectorConstructor( + protected PipeSinkConstructor createPipeConnectorConstructor( PipePluginMetaKeeper pipePluginMetaKeeper) { - return new PipeDataRegionConnectorConstructor( - (DataNodePipePluginMetaKeeper) pipePluginMetaKeeper); + return new PipeDataRegionSinkConstructor((DataNodePipePluginMetaKeeper) pipePluginMetaKeeper); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/dataregion/PipeDataRegionProcessorConstructor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/dataregion/PipeDataRegionProcessorConstructor.java index 6f172ae5fb832..44c6ef17800b5 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/dataregion/PipeDataRegionProcessorConstructor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/dataregion/PipeDataRegionProcessorConstructor.java @@ -19,11 +19,11 @@ package org.apache.iotdb.db.pipe.agent.plugin.dataregion; -import org.apache.iotdb.commons.pipe.agent.plugin.PipeProcessorConstructor; -import org.apache.iotdb.commons.pipe.plugin.builtin.BuiltinPipePlugin; -import org.apache.iotdb.commons.pipe.plugin.builtin.processor.donothing.DoNothingProcessor; -import org.apache.iotdb.commons.pipe.plugin.builtin.processor.throwing.ThrowingExceptionProcessor; -import org.apache.iotdb.commons.pipe.plugin.meta.DataNodePipePluginMetaKeeper; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.BuiltinPipePlugin; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.processor.donothing.DoNothingProcessor; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.processor.throwing.ThrowingExceptionProcessor; +import org.apache.iotdb.commons.pipe.agent.plugin.constructor.PipeProcessorConstructor; +import org.apache.iotdb.commons.pipe.agent.plugin.meta.DataNodePipePluginMetaKeeper; import org.apache.iotdb.db.pipe.processor.aggregate.AggregateProcessor; import org.apache.iotdb.db.pipe.processor.aggregate.operator.processor.StandardStatisticsOperatorProcessor; import org.apache.iotdb.db.pipe.processor.aggregate.window.processor.TumblingWindowingProcessor; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/dataregion/PipeDataRegionConnectorConstructor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/dataregion/PipeDataRegionSinkConstructor.java similarity index 58% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/dataregion/PipeDataRegionConnectorConstructor.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/dataregion/PipeDataRegionSinkConstructor.java index c9b073a01db8d..536cf71cdb8db 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/dataregion/PipeDataRegionConnectorConstructor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/dataregion/PipeDataRegionSinkConstructor.java @@ -19,22 +19,23 @@ package org.apache.iotdb.db.pipe.agent.plugin.dataregion; -import org.apache.iotdb.commons.pipe.agent.plugin.PipeConnectorConstructor; -import org.apache.iotdb.commons.pipe.plugin.builtin.BuiltinPipePlugin; -import org.apache.iotdb.commons.pipe.plugin.builtin.connector.donothing.DoNothingConnector; -import org.apache.iotdb.commons.pipe.plugin.meta.DataNodePipePluginMetaKeeper; -import org.apache.iotdb.db.pipe.connector.protocol.airgap.IoTDBDataRegionAirGapConnector; -import org.apache.iotdb.db.pipe.connector.protocol.legacy.IoTDBLegacyPipeConnector; -import org.apache.iotdb.db.pipe.connector.protocol.opcua.OpcUaConnector; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.PipeConsensusAsyncConnector; -import org.apache.iotdb.db.pipe.connector.protocol.thrift.async.IoTDBDataRegionAsyncConnector; -import org.apache.iotdb.db.pipe.connector.protocol.thrift.sync.IoTDBDataRegionSyncConnector; -import org.apache.iotdb.db.pipe.connector.protocol.websocket.WebSocketConnector; -import org.apache.iotdb.db.pipe.connector.protocol.writeback.WriteBackConnector; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.BuiltinPipePlugin; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.sink.donothing.DoNothingSink; +import org.apache.iotdb.commons.pipe.agent.plugin.constructor.PipeSinkConstructor; +import org.apache.iotdb.commons.pipe.agent.plugin.meta.DataNodePipePluginMetaKeeper; +import org.apache.iotdb.db.pipe.sink.protocol.airgap.IoTDBDataRegionAirGapSink; +import org.apache.iotdb.db.pipe.sink.protocol.legacy.IoTDBLegacyPipeSink; +import org.apache.iotdb.db.pipe.sink.protocol.opcda.OpcDaSink; +import org.apache.iotdb.db.pipe.sink.protocol.opcua.OpcUaSink; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.PipeConsensusAsyncSink; +import org.apache.iotdb.db.pipe.sink.protocol.thrift.async.IoTDBDataRegionAsyncSink; +import org.apache.iotdb.db.pipe.sink.protocol.thrift.sync.IoTDBDataRegionSyncSink; +import org.apache.iotdb.db.pipe.sink.protocol.websocket.WebSocketSink; +import org.apache.iotdb.db.pipe.sink.protocol.writeback.WriteBackSink; -class PipeDataRegionConnectorConstructor extends PipeConnectorConstructor { +class PipeDataRegionSinkConstructor extends PipeSinkConstructor { - PipeDataRegionConnectorConstructor(DataNodePipePluginMetaKeeper pipePluginMetaKeeper) { + PipeDataRegionSinkConstructor(DataNodePipePluginMetaKeeper pipePluginMetaKeeper) { super(pipePluginMetaKeeper); } @@ -42,63 +43,59 @@ class PipeDataRegionConnectorConstructor extends PipeConnectorConstructor { protected void initConstructors() { pluginConstructors.put( BuiltinPipePlugin.IOTDB_THRIFT_CONNECTOR.getPipePluginName(), - IoTDBDataRegionAsyncConnector::new); + IoTDBDataRegionAsyncSink::new); pluginConstructors.put( BuiltinPipePlugin.IOTDB_THRIFT_SSL_CONNECTOR.getPipePluginName(), - IoTDBDataRegionSyncConnector::new); + IoTDBDataRegionSyncSink::new); pluginConstructors.put( BuiltinPipePlugin.IOTDB_THRIFT_SYNC_CONNECTOR.getPipePluginName(), - IoTDBDataRegionSyncConnector::new); + IoTDBDataRegionSyncSink::new); pluginConstructors.put( BuiltinPipePlugin.IOTDB_THRIFT_ASYNC_CONNECTOR.getPipePluginName(), - IoTDBDataRegionAsyncConnector::new); + IoTDBDataRegionAsyncSink::new); pluginConstructors.put( BuiltinPipePlugin.PIPE_CONSENSUS_ASYNC_CONNECTOR.getPipePluginName(), - PipeConsensusAsyncConnector::new); + PipeConsensusAsyncSink::new); pluginConstructors.put( BuiltinPipePlugin.IOTDB_LEGACY_PIPE_CONNECTOR.getPipePluginName(), - IoTDBLegacyPipeConnector::new); + IoTDBLegacyPipeSink::new); pluginConstructors.put( BuiltinPipePlugin.IOTDB_AIR_GAP_CONNECTOR.getPipePluginName(), - IoTDBDataRegionAirGapConnector::new); + IoTDBDataRegionAirGapSink::new); pluginConstructors.put( - BuiltinPipePlugin.WEBSOCKET_CONNECTOR.getPipePluginName(), WebSocketConnector::new); + BuiltinPipePlugin.WEBSOCKET_CONNECTOR.getPipePluginName(), WebSocketSink::new); + pluginConstructors.put(BuiltinPipePlugin.OPC_UA_CONNECTOR.getPipePluginName(), OpcUaSink::new); + pluginConstructors.put(BuiltinPipePlugin.OPC_DA_CONNECTOR.getPipePluginName(), OpcDaSink::new); pluginConstructors.put( - BuiltinPipePlugin.OPC_UA_CONNECTOR.getPipePluginName(), OpcUaConnector::new); + BuiltinPipePlugin.DO_NOTHING_CONNECTOR.getPipePluginName(), DoNothingSink::new); pluginConstructors.put( - BuiltinPipePlugin.DO_NOTHING_CONNECTOR.getPipePluginName(), DoNothingConnector::new); - pluginConstructors.put( - BuiltinPipePlugin.WRITE_BACK_CONNECTOR.getPipePluginName(), WriteBackConnector::new); + BuiltinPipePlugin.WRITE_BACK_CONNECTOR.getPipePluginName(), WriteBackSink::new); pluginConstructors.put( - BuiltinPipePlugin.IOTDB_THRIFT_SINK.getPipePluginName(), - IoTDBDataRegionAsyncConnector::new); + BuiltinPipePlugin.IOTDB_THRIFT_SINK.getPipePluginName(), IoTDBDataRegionAsyncSink::new); pluginConstructors.put( - BuiltinPipePlugin.IOTDB_THRIFT_SSL_SINK.getPipePluginName(), - IoTDBDataRegionSyncConnector::new); + BuiltinPipePlugin.IOTDB_THRIFT_SSL_SINK.getPipePluginName(), IoTDBDataRegionSyncSink::new); pluginConstructors.put( - BuiltinPipePlugin.IOTDB_THRIFT_SYNC_SINK.getPipePluginName(), - IoTDBDataRegionSyncConnector::new); + BuiltinPipePlugin.IOTDB_THRIFT_SYNC_SINK.getPipePluginName(), IoTDBDataRegionSyncSink::new); pluginConstructors.put( BuiltinPipePlugin.IOTDB_THRIFT_ASYNC_SINK.getPipePluginName(), - IoTDBDataRegionAsyncConnector::new); + IoTDBDataRegionAsyncSink::new); pluginConstructors.put( - BuiltinPipePlugin.IOTDB_LEGACY_PIPE_SINK.getPipePluginName(), - IoTDBLegacyPipeConnector::new); + BuiltinPipePlugin.IOTDB_LEGACY_PIPE_SINK.getPipePluginName(), IoTDBLegacyPipeSink::new); pluginConstructors.put( - BuiltinPipePlugin.IOTDB_AIR_GAP_SINK.getPipePluginName(), - IoTDBDataRegionAirGapConnector::new); + BuiltinPipePlugin.IOTDB_AIR_GAP_SINK.getPipePluginName(), IoTDBDataRegionAirGapSink::new); pluginConstructors.put( - BuiltinPipePlugin.WEBSOCKET_SINK.getPipePluginName(), WebSocketConnector::new); - pluginConstructors.put(BuiltinPipePlugin.OPC_UA_SINK.getPipePluginName(), OpcUaConnector::new); + BuiltinPipePlugin.WEBSOCKET_SINK.getPipePluginName(), WebSocketSink::new); + pluginConstructors.put(BuiltinPipePlugin.OPC_UA_SINK.getPipePluginName(), OpcUaSink::new); + pluginConstructors.put(BuiltinPipePlugin.OPC_DA_SINK.getPipePluginName(), OpcDaSink::new); pluginConstructors.put( - BuiltinPipePlugin.DO_NOTHING_SINK.getPipePluginName(), DoNothingConnector::new); + BuiltinPipePlugin.DO_NOTHING_SINK.getPipePluginName(), DoNothingSink::new); pluginConstructors.put( - BuiltinPipePlugin.WRITE_BACK_SINK.getPipePluginName(), WriteBackConnector::new); + BuiltinPipePlugin.WRITE_BACK_SINK.getPipePluginName(), WriteBackSink::new); pluginConstructors.put( - BuiltinPipePlugin.SUBSCRIPTION_SINK.getPipePluginName(), DoNothingConnector::new); + BuiltinPipePlugin.SUBSCRIPTION_SINK.getPipePluginName(), DoNothingSink::new); pluginConstructors.put( BuiltinPipePlugin.PIPE_CONSENSUS_ASYNC_SINK.getPipePluginName(), - PipeConsensusAsyncConnector::new); + PipeConsensusAsyncSink::new); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/dataregion/PipeDataRegionExtractorConstructor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/dataregion/PipeDataRegionSourceConstructor.java similarity index 66% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/dataregion/PipeDataRegionExtractorConstructor.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/dataregion/PipeDataRegionSourceConstructor.java index 4cebec06d5f4e..42caccb3e5f70 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/dataregion/PipeDataRegionExtractorConstructor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/dataregion/PipeDataRegionSourceConstructor.java @@ -19,28 +19,28 @@ package org.apache.iotdb.db.pipe.agent.plugin.dataregion; -import org.apache.iotdb.commons.pipe.agent.plugin.PipeExtractorConstructor; -import org.apache.iotdb.commons.pipe.plugin.builtin.BuiltinPipePlugin; -import org.apache.iotdb.commons.pipe.plugin.builtin.extractor.donothing.DoNothingExtractor; -import org.apache.iotdb.commons.pipe.plugin.meta.DataNodePipePluginMetaKeeper; -import org.apache.iotdb.db.pipe.extractor.dataregion.IoTDBDataRegionExtractor; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.BuiltinPipePlugin; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.source.donothing.DoNothingSource; +import org.apache.iotdb.commons.pipe.agent.plugin.constructor.PipeSourceConstructor; +import org.apache.iotdb.commons.pipe.agent.plugin.meta.DataNodePipePluginMetaKeeper; +import org.apache.iotdb.db.pipe.source.dataregion.IoTDBDataRegionSource; -class PipeDataRegionExtractorConstructor extends PipeExtractorConstructor { +class PipeDataRegionSourceConstructor extends PipeSourceConstructor { - PipeDataRegionExtractorConstructor(DataNodePipePluginMetaKeeper pipePluginMetaKeeper) { + PipeDataRegionSourceConstructor(DataNodePipePluginMetaKeeper pipePluginMetaKeeper) { super(pipePluginMetaKeeper); } @Override protected void initConstructors() { pluginConstructors.put( - BuiltinPipePlugin.DO_NOTHING_EXTRACTOR.getPipePluginName(), DoNothingExtractor::new); + BuiltinPipePlugin.DO_NOTHING_EXTRACTOR.getPipePluginName(), DoNothingSource::new); pluginConstructors.put( - BuiltinPipePlugin.IOTDB_EXTRACTOR.getPipePluginName(), IoTDBDataRegionExtractor::new); + BuiltinPipePlugin.IOTDB_EXTRACTOR.getPipePluginName(), IoTDBDataRegionSource::new); pluginConstructors.put( - BuiltinPipePlugin.DO_NOTHING_SOURCE.getPipePluginName(), DoNothingExtractor::new); + BuiltinPipePlugin.DO_NOTHING_SOURCE.getPipePluginName(), DoNothingSource::new); pluginConstructors.put( - BuiltinPipePlugin.IOTDB_SOURCE.getPipePluginName(), IoTDBDataRegionExtractor::new); + BuiltinPipePlugin.IOTDB_SOURCE.getPipePluginName(), IoTDBDataRegionSource::new); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/schemaregion/PipeSchemaRegionPluginAgent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/schemaregion/PipeSchemaRegionPluginAgent.java index 348b7ce0585b2..549030073b1ee 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/schemaregion/PipeSchemaRegionPluginAgent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/schemaregion/PipeSchemaRegionPluginAgent.java @@ -19,11 +19,11 @@ package org.apache.iotdb.db.pipe.agent.plugin.schemaregion; -import org.apache.iotdb.commons.pipe.agent.plugin.PipeConnectorConstructor; -import org.apache.iotdb.commons.pipe.agent.plugin.PipeExtractorConstructor; import org.apache.iotdb.commons.pipe.agent.plugin.PipePluginAgent; -import org.apache.iotdb.commons.pipe.agent.plugin.PipeProcessorConstructor; -import org.apache.iotdb.commons.pipe.plugin.meta.PipePluginMetaKeeper; +import org.apache.iotdb.commons.pipe.agent.plugin.constructor.PipeProcessorConstructor; +import org.apache.iotdb.commons.pipe.agent.plugin.constructor.PipeSinkConstructor; +import org.apache.iotdb.commons.pipe.agent.plugin.constructor.PipeSourceConstructor; +import org.apache.iotdb.commons.pipe.agent.plugin.meta.PipePluginMetaKeeper; public class PipeSchemaRegionPluginAgent extends PipePluginAgent { @@ -32,9 +32,9 @@ public PipeSchemaRegionPluginAgent(PipePluginMetaKeeper pipePluginMetaKeeper) { } @Override - protected PipeExtractorConstructor createPipeExtractorConstructor( + protected PipeSourceConstructor createPipeExtractorConstructor( PipePluginMetaKeeper pipePluginMetaKeeper) { - return new PipeSchemaRegionExtractorConstructor(); + return new PipeSchemaRegionSourceConstructor(); } @Override @@ -44,8 +44,8 @@ protected PipeProcessorConstructor createPipeProcessorConstructor( } @Override - protected PipeConnectorConstructor createPipeConnectorConstructor( + protected PipeSinkConstructor createPipeConnectorConstructor( PipePluginMetaKeeper pipePluginMetaKeeper) { - return new PipeSchemaRegionConnectorConstructor(); + return new PipeSchemaRegionSinkConstructor(); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/schemaregion/PipeSchemaRegionProcessorConstructor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/schemaregion/PipeSchemaRegionProcessorConstructor.java index 099283dbf572b..51e5e64e598ee 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/schemaregion/PipeSchemaRegionProcessorConstructor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/schemaregion/PipeSchemaRegionProcessorConstructor.java @@ -19,9 +19,9 @@ package org.apache.iotdb.db.pipe.agent.plugin.schemaregion; -import org.apache.iotdb.commons.pipe.agent.plugin.PipeProcessorConstructor; -import org.apache.iotdb.commons.pipe.plugin.builtin.BuiltinPipePlugin; -import org.apache.iotdb.commons.pipe.plugin.builtin.processor.donothing.DoNothingProcessor; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.BuiltinPipePlugin; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.processor.donothing.DoNothingProcessor; +import org.apache.iotdb.commons.pipe.agent.plugin.constructor.PipeProcessorConstructor; import org.apache.iotdb.pipe.api.PipeProcessor; class PipeSchemaRegionProcessorConstructor extends PipeProcessorConstructor { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/schemaregion/PipeSchemaRegionConnectorConstructor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/schemaregion/PipeSchemaRegionSinkConstructor.java similarity index 66% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/schemaregion/PipeSchemaRegionConnectorConstructor.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/schemaregion/PipeSchemaRegionSinkConstructor.java index 20132a9756aec..160ecf54c0158 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/schemaregion/PipeSchemaRegionConnectorConstructor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/schemaregion/PipeSchemaRegionSinkConstructor.java @@ -19,57 +19,51 @@ package org.apache.iotdb.db.pipe.agent.plugin.schemaregion; -import org.apache.iotdb.commons.pipe.agent.plugin.PipeConnectorConstructor; -import org.apache.iotdb.commons.pipe.plugin.builtin.BuiltinPipePlugin; -import org.apache.iotdb.commons.pipe.plugin.builtin.connector.donothing.DoNothingConnector; -import org.apache.iotdb.db.pipe.connector.protocol.airgap.IoTDBSchemaRegionAirGapConnector; -import org.apache.iotdb.db.pipe.connector.protocol.thrift.sync.IoTDBSchemaRegionConnector; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.BuiltinPipePlugin; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.sink.donothing.DoNothingSink; +import org.apache.iotdb.commons.pipe.agent.plugin.constructor.PipeSinkConstructor; +import org.apache.iotdb.db.pipe.sink.protocol.airgap.IoTDBSchemaRegionAirGapSink; +import org.apache.iotdb.db.pipe.sink.protocol.thrift.sync.IoTDBSchemaRegionSink; import org.apache.iotdb.pipe.api.PipeConnector; -class PipeSchemaRegionConnectorConstructor extends PipeConnectorConstructor { +class PipeSchemaRegionSinkConstructor extends PipeSinkConstructor { @Override protected void initConstructors() { pluginConstructors.put( - BuiltinPipePlugin.IOTDB_THRIFT_CONNECTOR.getPipePluginName(), - IoTDBSchemaRegionConnector::new); + BuiltinPipePlugin.IOTDB_THRIFT_CONNECTOR.getPipePluginName(), IoTDBSchemaRegionSink::new); pluginConstructors.put( BuiltinPipePlugin.IOTDB_THRIFT_SSL_CONNECTOR.getPipePluginName(), - IoTDBSchemaRegionConnector::new); + IoTDBSchemaRegionSink::new); pluginConstructors.put( BuiltinPipePlugin.IOTDB_THRIFT_SYNC_CONNECTOR.getPipePluginName(), - IoTDBSchemaRegionConnector::new); + IoTDBSchemaRegionSink::new); pluginConstructors.put( BuiltinPipePlugin.IOTDB_THRIFT_ASYNC_CONNECTOR.getPipePluginName(), - IoTDBSchemaRegionConnector::new); + IoTDBSchemaRegionSink::new); pluginConstructors.put( BuiltinPipePlugin.IOTDB_AIR_GAP_CONNECTOR.getPipePluginName(), - IoTDBSchemaRegionAirGapConnector::new); + IoTDBSchemaRegionAirGapSink::new); pluginConstructors.put( - BuiltinPipePlugin.DO_NOTHING_CONNECTOR.getPipePluginName(), DoNothingConnector::new); + BuiltinPipePlugin.DO_NOTHING_CONNECTOR.getPipePluginName(), DoNothingSink::new); pluginConstructors.put( - BuiltinPipePlugin.IOTDB_THRIFT_SINK.getPipePluginName(), IoTDBSchemaRegionConnector::new); + BuiltinPipePlugin.IOTDB_THRIFT_SINK.getPipePluginName(), IoTDBSchemaRegionSink::new); pluginConstructors.put( - BuiltinPipePlugin.IOTDB_THRIFT_SSL_SINK.getPipePluginName(), - IoTDBSchemaRegionConnector::new); + BuiltinPipePlugin.IOTDB_THRIFT_SSL_SINK.getPipePluginName(), IoTDBSchemaRegionSink::new); pluginConstructors.put( - BuiltinPipePlugin.IOTDB_THRIFT_SYNC_SINK.getPipePluginName(), - IoTDBSchemaRegionConnector::new); + BuiltinPipePlugin.IOTDB_THRIFT_SYNC_SINK.getPipePluginName(), IoTDBSchemaRegionSink::new); pluginConstructors.put( - BuiltinPipePlugin.IOTDB_THRIFT_ASYNC_SINK.getPipePluginName(), - IoTDBSchemaRegionConnector::new); + BuiltinPipePlugin.IOTDB_THRIFT_ASYNC_SINK.getPipePluginName(), IoTDBSchemaRegionSink::new); pluginConstructors.put( - BuiltinPipePlugin.IOTDB_AIR_GAP_SINK.getPipePluginName(), - IoTDBSchemaRegionAirGapConnector::new); + BuiltinPipePlugin.IOTDB_AIR_GAP_SINK.getPipePluginName(), IoTDBSchemaRegionAirGapSink::new); pluginConstructors.put( - BuiltinPipePlugin.DO_NOTHING_SINK.getPipePluginName(), DoNothingConnector::new); + BuiltinPipePlugin.DO_NOTHING_SINK.getPipePluginName(), DoNothingSink::new); } @Override public PipeConnector reflectPluginByKey(String pluginKey) { // TODO: support constructing plugin by reflection - return (PipeConnector) - pluginConstructors.getOrDefault(pluginKey, DoNothingConnector::new).get(); + return (PipeConnector) pluginConstructors.getOrDefault(pluginKey, DoNothingSink::new).get(); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/schemaregion/PipeSchemaRegionExtractorConstructor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/schemaregion/PipeSchemaRegionSourceConstructor.java similarity index 70% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/schemaregion/PipeSchemaRegionExtractorConstructor.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/schemaregion/PipeSchemaRegionSourceConstructor.java index 876ab9f85174f..303e4987c2075 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/schemaregion/PipeSchemaRegionExtractorConstructor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/schemaregion/PipeSchemaRegionSourceConstructor.java @@ -19,31 +19,30 @@ package org.apache.iotdb.db.pipe.agent.plugin.schemaregion; -import org.apache.iotdb.commons.pipe.agent.plugin.PipeExtractorConstructor; -import org.apache.iotdb.commons.pipe.plugin.builtin.BuiltinPipePlugin; -import org.apache.iotdb.commons.pipe.plugin.builtin.extractor.donothing.DoNothingExtractor; -import org.apache.iotdb.db.pipe.extractor.schemaregion.IoTDBSchemaRegionExtractor; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.BuiltinPipePlugin; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.source.donothing.DoNothingSource; +import org.apache.iotdb.commons.pipe.agent.plugin.constructor.PipeSourceConstructor; +import org.apache.iotdb.db.pipe.source.schemaregion.IoTDBSchemaRegionSource; import org.apache.iotdb.pipe.api.PipeExtractor; -class PipeSchemaRegionExtractorConstructor extends PipeExtractorConstructor { +class PipeSchemaRegionSourceConstructor extends PipeSourceConstructor { @Override protected void initConstructors() { pluginConstructors.put( - BuiltinPipePlugin.DO_NOTHING_EXTRACTOR.getPipePluginName(), DoNothingExtractor::new); + BuiltinPipePlugin.DO_NOTHING_EXTRACTOR.getPipePluginName(), DoNothingSource::new); pluginConstructors.put( - BuiltinPipePlugin.IOTDB_EXTRACTOR.getPipePluginName(), IoTDBSchemaRegionExtractor::new); + BuiltinPipePlugin.IOTDB_EXTRACTOR.getPipePluginName(), IoTDBSchemaRegionSource::new); pluginConstructors.put( - BuiltinPipePlugin.DO_NOTHING_SOURCE.getPipePluginName(), DoNothingExtractor::new); + BuiltinPipePlugin.DO_NOTHING_SOURCE.getPipePluginName(), DoNothingSource::new); pluginConstructors.put( - BuiltinPipePlugin.IOTDB_SOURCE.getPipePluginName(), IoTDBSchemaRegionExtractor::new); + BuiltinPipePlugin.IOTDB_SOURCE.getPipePluginName(), IoTDBSchemaRegionSource::new); } @Override public PipeExtractor reflectPluginByKey(String pluginKey) { // TODO: support constructing plugin by reflection - return (PipeExtractor) - pluginConstructors.getOrDefault(pluginKey, DoNothingExtractor::new).get(); + return (PipeExtractor) pluginConstructors.getOrDefault(pluginKey, DoNothingSource::new).get(); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeAgentLauncher.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeAgentLauncher.java index 4e92f94bf5157..1b56a00b325e3 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeAgentLauncher.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeAgentLauncher.java @@ -21,10 +21,10 @@ import org.apache.iotdb.commons.client.exception.ClientManagerException; import org.apache.iotdb.commons.exception.StartupException; -import org.apache.iotdb.commons.pipe.plugin.meta.PipePluginMeta; -import org.apache.iotdb.commons.pipe.plugin.service.PipePluginClassLoaderManager; -import org.apache.iotdb.commons.pipe.plugin.service.PipePluginExecutableManager; -import org.apache.iotdb.commons.pipe.task.meta.PipeMeta; +import org.apache.iotdb.commons.pipe.agent.plugin.meta.PipePluginMeta; +import org.apache.iotdb.commons.pipe.agent.plugin.service.PipePluginClassLoaderManager; +import org.apache.iotdb.commons.pipe.agent.plugin.service.PipePluginExecutableManager; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta; import org.apache.iotdb.confignode.rpc.thrift.TGetAllPipeInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TGetJarInListReq; import org.apache.iotdb.confignode.rpc.thrift.TGetJarInListResp; @@ -168,13 +168,14 @@ public static synchronized void launchPipeTaskAgent() { getAllPipeInfoResp.getAllPipeInfo().stream() .map( byteBuffer -> { - final PipeMeta pipeMeta = PipeMeta.deserialize(byteBuffer); + final PipeMeta pipeMeta = PipeMeta.deserialize4TaskAgent(byteBuffer); LOGGER.info( "Pulled pipe meta from config node: {}, recovering ...", pipeMeta); return pipeMeta; }) .collect(Collectors.toList())); - } catch (Exception e) { + } catch (Exception | Error e) { + // Ignore unexpected exceptions to ensure that DataNode can start normally LOGGER.info( "Failed to get pipe task meta from config node. Ignore the exception, " + "because config node may not be ready yet, and " diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeDataNodeRuntimeAgent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeDataNodeRuntimeAgent.java index dcf9f8b0435bb..70af01b288ebe 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeDataNodeRuntimeAgent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeDataNodeRuntimeAgent.java @@ -25,17 +25,18 @@ import org.apache.iotdb.commons.exception.StartupException; import org.apache.iotdb.commons.exception.pipe.PipeRuntimeCriticalException; import org.apache.iotdb.commons.exception.pipe.PipeRuntimeException; +import org.apache.iotdb.commons.pipe.agent.runtime.PipePeriodicalJobExecutor; +import org.apache.iotdb.commons.pipe.agent.runtime.PipePeriodicalPhantomReferenceCleaner; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; import org.apache.iotdb.commons.pipe.config.PipeConfig; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; import org.apache.iotdb.commons.service.IService; import org.apache.iotdb.commons.service.ServiceType; import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; -import org.apache.iotdb.db.pipe.extractor.schemaregion.SchemaRegionListeningQueue; -import org.apache.iotdb.db.pipe.progress.SimpleProgressIndexAssigner; import org.apache.iotdb.db.pipe.resource.PipeDataNodeHardlinkOrCopiedFileDirStartupCleaner; +import org.apache.iotdb.db.pipe.source.schemaregion.SchemaRegionListeningQueue; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; import org.apache.iotdb.db.service.ResourcesInformationHolder; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; @@ -62,6 +63,9 @@ public class PipeDataNodeRuntimeAgent implements IService { private final PipePeriodicalJobExecutor pipePeriodicalJobExecutor = new PipePeriodicalJobExecutor(); + private final PipePeriodicalPhantomReferenceCleaner pipePeriodicalPhantomReferenceCleaner = + new PipePeriodicalPhantomReferenceCleaner(); + //////////////////////////// System Service Interface //////////////////////////// public synchronized void preparePipeResources( @@ -81,12 +85,12 @@ public synchronized void start() throws StartupException { PipeConfig.getInstance().printAllConfigs(); PipeAgentLauncher.launchPipeTaskAgent(); - registerPeriodicalJob( - "PipeTaskAgent#restartAllStuckPipes", - PipeDataNodeAgent.task()::restartAllStuckPipes, - PipeConfig.getInstance().getPipeStuckRestartIntervalSeconds()); pipePeriodicalJobExecutor.start(); + if (PipeConfig.getInstance().getPipeEventReferenceTrackingEnabled()) { + pipePeriodicalPhantomReferenceCleaner.start(); + } + isShutdown.set(false); } @@ -231,4 +235,9 @@ public void stopPeriodicalJobExecutor() { public void clearPeriodicalJobExecutor() { pipePeriodicalJobExecutor.clear(); } + + public void registerPhantomReferenceCleanJob( + String id, Runnable periodicalJob, long intervalInSeconds) { + pipePeriodicalPhantomReferenceCleaner.register(id, periodicalJob, intervalInSeconds); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeSchemaRegionListenerManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeSchemaRegionListenerManager.java index bf7c6ed103a6c..d28929b36a216 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeSchemaRegionListenerManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeSchemaRegionListenerManager.java @@ -20,9 +20,9 @@ package org.apache.iotdb.db.pipe.agent.runtime; import org.apache.iotdb.commons.consensus.SchemaRegionId; -import org.apache.iotdb.commons.pipe.task.PipeTask; -import org.apache.iotdb.db.pipe.extractor.schemaregion.SchemaRegionListeningQueue; -import org.apache.iotdb.db.pipe.metric.PipeSchemaRegionListenerMetrics; +import org.apache.iotdb.commons.pipe.agent.task.PipeTask; +import org.apache.iotdb.db.pipe.metric.schema.PipeSchemaRegionListenerMetrics; +import org.apache.iotdb.db.pipe.source.schemaregion.SchemaRegionListeningQueue; import java.util.Map; import java.util.Set; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/progress/SimpleProgressIndexAssigner.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/SimpleProgressIndexAssigner.java similarity index 72% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/progress/SimpleProgressIndexAssigner.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/SimpleProgressIndexAssigner.java index 766945a934b66..06222f925a2e0 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/progress/SimpleProgressIndexAssigner.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/SimpleProgressIndexAssigner.java @@ -17,10 +17,9 @@ * under the License. */ -package org.apache.iotdb.db.pipe.progress; +package org.apache.iotdb.db.pipe.agent.runtime; import org.apache.iotdb.commons.consensus.index.impl.SimpleProgressIndex; -import org.apache.iotdb.commons.exception.StartupException; import org.apache.iotdb.commons.file.SystemFileFactory; import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; @@ -31,6 +30,7 @@ import org.slf4j.LoggerFactory; import java.io.File; +import java.io.FileOutputStream; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.concurrent.atomic.AtomicLong; @@ -55,17 +55,21 @@ public class SimpleProgressIndexAssigner { private int rebootTimes = 0; private final AtomicLong insertionRequestId = new AtomicLong(1); - public void start() throws StartupException { + public void start() { isSimpleConsensusEnable = IOTDB_CONFIG.getDataRegionConsensusProtocolClass().equals(SIMPLE_CONSENSUS); - LOGGER.info("Start SimpleProgressIndexAssigner ..."); + LOGGER.info("Starting SimpleProgressIndexAssigner ..."); try { makeDirIfNecessary(); parseRebootTimes(); recordRebootTimes(); + LOGGER.info( + "SimpleProgressIndexAssigner started successfully. isSimpleConsensusEnable: {}, rebootTimes: {}", + isSimpleConsensusEnable, + rebootTimes); } catch (Exception e) { - throw new StartupException(e); + LOGGER.error("Cannot start SimpleProgressIndexAssigner because of {}", e.getMessage(), e); } } @@ -86,15 +90,27 @@ private void parseRebootTimes() { try { String content = FileUtils.readFileToString(file, StandardCharsets.UTF_8); rebootTimes = Integer.parseInt(content); - } catch (IOException e) { - LOGGER.error("Cannot parse reboot times from file {}", file.getAbsolutePath(), e); - rebootTimes = 0; + } catch (final Exception e) { + rebootTimes = (int) (System.currentTimeMillis() / 1000); + LOGGER.error( + "Cannot parse reboot times from file {}, set the current time in seconds ({}) as the reboot times", + file.getAbsolutePath(), + rebootTimes); } } - private void recordRebootTimes() throws IOException { - File file = SystemFileFactory.INSTANCE.getFile(PIPE_SYSTEM_DIR + REBOOT_TIMES_FILE_NAME); - FileUtils.writeStringToFile(file, String.valueOf(rebootTimes + 1), StandardCharsets.UTF_8); + private void recordRebootTimes() { + final File file = SystemFileFactory.INSTANCE.getFile(PIPE_SYSTEM_DIR + REBOOT_TIMES_FILE_NAME); + try (final FileOutputStream fos = new FileOutputStream(file, false)) { + fos.write(String.valueOf(rebootTimes + 1).getBytes(StandardCharsets.UTF_8)); + fos.flush(); + fos.getFD().sync(); + } catch (final Exception e) { + LOGGER.error( + "Cannot record reboot times {} to file {}, the reboot times will not be updated", + rebootTimes, + file.getAbsolutePath()); + } } public void assignIfNeeded(InsertNode insertNode) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/PipeDataNodeTask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/PipeDataNodeTask.java similarity index 94% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/PipeDataNodeTask.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/PipeDataNodeTask.java index 502bdec2c7cb1..d33ec44a86e53 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/PipeDataNodeTask.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/PipeDataNodeTask.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.db.pipe.task; +package org.apache.iotdb.db.pipe.agent.task; -import org.apache.iotdb.commons.pipe.task.PipeTask; -import org.apache.iotdb.commons.pipe.task.stage.PipeTaskStage; +import org.apache.iotdb.commons.pipe.agent.task.PipeTask; +import org.apache.iotdb.commons.pipe.agent.task.stage.PipeTaskStage; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/PipeDataNodeTaskAgent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/PipeDataNodeTaskAgent.java index ab0e8cfafefdd..0e8ddef54ea2f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/PipeDataNodeTaskAgent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/PipeDataNodeTaskAgent.java @@ -19,51 +19,58 @@ package org.apache.iotdb.db.pipe.agent.task; +import org.apache.iotdb.common.rpc.thrift.TPipeHeartbeatResp; +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.concurrent.IoTThreadFactory; +import org.apache.iotdb.commons.concurrent.ThreadName; +import org.apache.iotdb.commons.concurrent.threadpool.WrappedThreadPoolExecutor; +import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.consensus.DataRegionId; import org.apache.iotdb.commons.consensus.SchemaRegionId; import org.apache.iotdb.commons.consensus.index.ProgressIndex; import org.apache.iotdb.commons.consensus.index.impl.MetaProgressIndex; import org.apache.iotdb.commons.exception.IllegalPathException; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.BuiltinPipePlugin; +import org.apache.iotdb.commons.pipe.agent.task.PipeTask; import org.apache.iotdb.commons.pipe.agent.task.PipeTaskAgent; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStaticMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStatus; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeType; import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant; -import org.apache.iotdb.commons.pipe.task.PipeTask; -import org.apache.iotdb.commons.pipe.task.meta.PipeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeStaticMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeStatus; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeType; -import org.apache.iotdb.commons.service.metric.MetricService; -import org.apache.iotdb.commons.service.metric.enums.Tag; +import org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant; +import org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant; +import org.apache.iotdb.commons.pipe.config.constant.SystemConstant; import org.apache.iotdb.consensus.exception.ConsensusException; import org.apache.iotdb.consensus.pipe.consensuspipe.ConsensusPipeName; import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.consensus.SchemaRegionConsensusImpl; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; -import org.apache.iotdb.db.pipe.extractor.dataregion.DataRegionListeningFilter; -import org.apache.iotdb.db.pipe.extractor.dataregion.IoTDBDataRegionExtractor; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.listener.PipeInsertionDataNodeListener; -import org.apache.iotdb.db.pipe.extractor.schemaregion.SchemaRegionListeningFilter; -import org.apache.iotdb.db.pipe.metric.PipeDataNodeRemainingEventAndTimeMetrics; -import org.apache.iotdb.db.pipe.metric.PipeDataRegionExtractorMetrics; +import org.apache.iotdb.db.pipe.agent.task.builder.PipeDataNodeBuilder; +import org.apache.iotdb.db.pipe.agent.task.builder.PipeDataNodeTaskBuilder; +import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics; +import org.apache.iotdb.db.pipe.metric.overview.PipeTsFileToTabletsMetrics; import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; -import org.apache.iotdb.db.pipe.task.PipeDataNodeTask; -import org.apache.iotdb.db.pipe.task.builder.PipeDataNodeBuilder; -import org.apache.iotdb.db.pipe.task.builder.PipeDataNodeTaskBuilder; +import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryManager; +import org.apache.iotdb.db.pipe.source.dataregion.DataRegionListeningFilter; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.listener.PipeInsertionDataNodeListener; +import org.apache.iotdb.db.pipe.source.schemaregion.SchemaRegionListeningFilter; +import org.apache.iotdb.db.protocol.client.ConfigNodeClient; +import org.apache.iotdb.db.protocol.client.ConfigNodeClientManager; +import org.apache.iotdb.db.protocol.client.ConfigNodeInfo; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeId; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeOperateSchemaQueueNode; import org.apache.iotdb.db.schemaengine.SchemaEngine; import org.apache.iotdb.db.storageengine.StorageEngine; -import org.apache.iotdb.db.storageengine.dataregion.wal.WALManager; -import org.apache.iotdb.metrics.utils.MetricLevel; -import org.apache.iotdb.metrics.utils.SystemMetric; +import org.apache.iotdb.db.subscription.agent.SubscriptionAgent; import org.apache.iotdb.mpp.rpc.thrift.TDataNodeHeartbeatResp; import org.apache.iotdb.mpp.rpc.thrift.TPipeHeartbeatReq; -import org.apache.iotdb.mpp.rpc.thrift.TPipeHeartbeatResp; import org.apache.iotdb.mpp.rpc.thrift.TPushPipeMetaRespExceptionMessage; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; import org.apache.iotdb.pipe.api.exception.PipeException; +import org.apache.iotdb.rpc.TSStatusCode; import com.google.common.collect.ImmutableMap; import org.apache.thrift.TException; @@ -75,6 +82,7 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -83,15 +91,53 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; import java.util.stream.Collectors; import java.util.stream.StreamSupport; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_END_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_HISTORY_ENABLE_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_HISTORY_ENABLE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_HISTORY_END_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_HISTORY_START_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_PATH_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_PATTERN_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_REALTIME_ENABLE_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_REALTIME_ENABLE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_START_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_END_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_HISTORY_ENABLE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_HISTORY_END_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_HISTORY_START_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_PATH_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_PATTERN_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_REALTIME_ENABLE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_START_TIME_KEY; + public class PipeDataNodeTaskAgent extends PipeTaskAgent { private static final Logger LOGGER = LoggerFactory.getLogger(PipeDataNodeTaskAgent.class); protected static final IoTDBConfig CONFIG = IoTDBDescriptor.getInstance().getConfig(); + private final ExecutorService pipeExecutor = + new WrappedThreadPoolExecutor( + 0, + IoTDBDescriptor.getInstance().getConfig().getPipeTaskThreadCount(), + 0L, + TimeUnit.SECONDS, + new ArrayBlockingQueue<>( + IoTDBDescriptor.getInstance().getConfig().getPipeTaskThreadCount()), + new IoTThreadFactory(ThreadName.PIPE_PARALLEL_EXECUTION_POOL.getName()), + ThreadName.PIPE_PARALLEL_EXECUTION_POOL.getName(), + new ThreadPoolExecutor.CallerRunsPolicy()); + ////////////////////////// Pipe Task Management Entry ////////////////////////// @Override @@ -114,18 +160,17 @@ protected void createPipeTask( final PipeTaskMeta pipeTaskMeta) throws IllegalPathException { if (pipeTaskMeta.getLeaderNodeId() == CONFIG.getDataNodeId()) { - final PipeParameters extractorParameters = pipeStaticMeta.getExtractorParameters(); + final PipeParameters sourceParameters = pipeStaticMeta.getExtractorParameters(); + final DataRegionId dataRegionId = new DataRegionId(consensusGroupId); final boolean needConstructDataRegionTask = - StorageEngine.getInstance() - .getAllDataRegionIds() - .contains(new DataRegionId(consensusGroupId)) - && DataRegionListeningFilter.shouldDataRegionBeListened(extractorParameters); + StorageEngine.getInstance().getAllDataRegionIds().contains(dataRegionId) + && DataRegionListeningFilter.shouldDataRegionBeListened( + sourceParameters, dataRegionId); final boolean needConstructSchemaRegionTask = SchemaEngine.getInstance() .getAllSchemaRegionIds() .contains(new SchemaRegionId(consensusGroupId)) - && !SchemaRegionListeningFilter.parseListeningPlanTypeSet(extractorParameters) - .isEmpty(); + && !SchemaRegionListeningFilter.parseListeningPlanTypeSet(sourceParameters).isEmpty(); // Advance the extractor parameters parsing logic to avoid creating un-relevant pipeTasks if (needConstructDataRegionTask || needConstructSchemaRegionTask) { @@ -248,13 +293,12 @@ private void closeSchemaRegionListeningQueueIfNecessary( @Override protected void thawRate(final String pipeName, final long creationTime) { - PipeDataNodeRemainingEventAndTimeMetrics.getInstance().thawRate(pipeName + "_" + creationTime); + PipeDataNodeSinglePipeMetrics.getInstance().thawRate(pipeName + "_" + creationTime); } @Override protected void freezeRate(final String pipeName, final long creationTime) { - PipeDataNodeRemainingEventAndTimeMetrics.getInstance() - .freezeRate(pipeName + "_" + creationTime); + PipeDataNodeSinglePipeMetrics.getInstance().freezeRate(pipeName + "_" + creationTime); } @Override @@ -263,8 +307,9 @@ protected boolean dropPipe(final String pipeName, final long creationTime) { return false; } - PipeDataNodeRemainingEventAndTimeMetrics.getInstance() - .deregister(pipeName + "_" + creationTime); + final String taskId = pipeName + "_" + creationTime; + PipeTsFileToTabletsMetrics.getInstance().deregister(taskId); + PipeDataNodeSinglePipeMetrics.getInstance().deregister(taskId); return true; } @@ -274,14 +319,40 @@ protected boolean dropPipe(final String pipeName) { // Get the pipe meta first because it is removed after super#dropPipe(pipeName) final PipeMeta pipeMeta = pipeMetaKeeper.getPipeMeta(pipeName); + // Record whether there are pipe tasks before dropping the pipe + final boolean hasPipeTasks; + if (Objects.nonNull(pipeMeta)) { + final Map pipeTaskMap = + pipeTaskManager.getPipeTasks(pipeMeta.getStaticMeta()); + hasPipeTasks = Objects.nonNull(pipeTaskMap) && !pipeTaskMap.isEmpty(); + } else { + hasPipeTasks = false; + } + if (!super.dropPipe(pipeName)) { return false; } if (Objects.nonNull(pipeMeta)) { final long creationTime = pipeMeta.getStaticMeta().getCreationTime(); - PipeDataNodeRemainingEventAndTimeMetrics.getInstance() - .deregister(pipeName + "_" + creationTime); + final String taskId = pipeName + "_" + creationTime; + PipeTsFileToTabletsMetrics.getInstance().deregister(taskId); + PipeDataNodeSinglePipeMetrics.getInstance().deregister(taskId); + // When the pipe contains no pipe tasks, there is no corresponding prefetching queue for the + // subscribed pipe, so the subscription needs to be manually marked as completed. + if (!hasPipeTasks && PipeStaticMeta.isSubscriptionPipe(pipeName)) { + final String topicName = + pipeMeta + .getStaticMeta() + .getConnectorParameters() + .getString(PipeSinkConstant.SINK_TOPIC_KEY); + final String consumerGroupId = + pipeMeta + .getStaticMeta() + .getConnectorParameters() + .getString(PipeSinkConstant.SINK_CONSUMER_GROUP_KEY); + SubscriptionAgent.broker().updateCompletedTopicNames(consumerGroupId, topicName); + } } return true; @@ -294,9 +365,8 @@ public void stopAllPipesWithCriticalException() { ///////////////////////// Heartbeat ///////////////////////// public void collectPipeMetaList(final TDataNodeHeartbeatResp resp) throws TException { - // Try the lock instead of directly acquire it to prevent the block of the cluster heartbeat - // 10s is the half of the HEARTBEAT_TIMEOUT_TIME defined in class BaseNodeCache in ConfigNode - if (!tryReadLockWithTimeOut(10)) { + if (!tryReadLockWithTimeOut( + CommonDescriptor.getInstance().getConfig().getDnConnectionTimeoutInMS() * 2L / 3)) { return; } try { @@ -311,6 +381,13 @@ private void collectPipeMetaListInternal(final TDataNodeHeartbeatResp resp) thro if (PipeDataNodeAgent.runtime().isShutdown()) { return; } + final Optional logger = + PipeDataNodeResourceManager.log() + .schedule( + PipeDataNodeTaskAgent.class, + PipeConfig.getInstance().getPipeMetaReportMaxLogNumPerRound(), + PipeConfig.getInstance().getPipeMetaReportMaxLogIntervalRounds(), + pipeMetaKeeper.getPipeMetaCount()); final Set dataRegionIds = StorageEngine.getInstance().getAllDataRegionIds().stream() @@ -322,13 +399,6 @@ private void collectPipeMetaListInternal(final TDataNodeHeartbeatResp resp) thro final List pipeRemainingEventCountList = new ArrayList<>(); final List pipeRemainingTimeList = new ArrayList<>(); try { - final Optional logger = - PipeDataNodeResourceManager.log() - .schedule( - PipeDataNodeTaskAgent.class, - PipeConfig.getInstance().getPipeMetaReportMaxLogNumPerRound(), - PipeConfig.getInstance().getPipeMetaReportMaxLogIntervalRounds(), - pipeMetaKeeper.getPipeMetaCount()); for (final PipeMeta pipeMeta : pipeMetaKeeper.getPipeMetaList()) { pipeMetaBinaryList.add(pipeMeta.serialize()); @@ -340,27 +410,25 @@ private void collectPipeMetaListInternal(final TDataNodeHeartbeatResp resp) thro || pipeTaskMap.entrySet().stream() .filter(entry -> dataRegionIds.contains(entry.getKey())) .allMatch(entry -> ((PipeDataNodeTask) entry.getValue()).isCompleted()); - final String extractorModeValue = + final String sourceModeValue = pipeMeta .getStaticMeta() .getExtractorParameters() .getStringOrDefault( Arrays.asList( - PipeExtractorConstant.EXTRACTOR_MODE_KEY, - PipeExtractorConstant.SOURCE_MODE_KEY), - PipeExtractorConstant.EXTRACTOR_MODE_DEFAULT_VALUE); + PipeSourceConstant.EXTRACTOR_MODE_KEY, PipeSourceConstant.SOURCE_MODE_KEY), + PipeSourceConstant.EXTRACTOR_MODE_DEFAULT_VALUE); final boolean includeDataAndNeedDrop = DataRegionListeningFilter.parseInsertionDeletionListeningOptionPair( pipeMeta.getStaticMeta().getExtractorParameters()) .getLeft() - && (extractorModeValue.equalsIgnoreCase( - PipeExtractorConstant.EXTRACTOR_MODE_QUERY_VALUE) - || extractorModeValue.equalsIgnoreCase( - PipeExtractorConstant.EXTRACTOR_MODE_SNAPSHOT_VALUE)); + && (sourceModeValue.equalsIgnoreCase(PipeSourceConstant.EXTRACTOR_MODE_QUERY_VALUE) + || sourceModeValue.equalsIgnoreCase( + PipeSourceConstant.EXTRACTOR_MODE_SNAPSHOT_VALUE)); final boolean isCompleted = isAllDataRegionCompleted && includeDataAndNeedDrop; final Pair remainingEventAndTime = - PipeDataNodeRemainingEventAndTimeMetrics.getInstance() + PipeDataNodeSinglePipeMetrics.getInstance() .getRemainingEventAndTime(staticMeta.getPipeName(), staticMeta.getCreationTime()); pipeCompletedList.add(isCompleted); pipeRemainingEventCountList.add(remainingEventAndTime.getLeft()); @@ -375,7 +443,7 @@ private void collectPipeMetaListInternal(final TDataNodeHeartbeatResp resp) thro remainingEventAndTime.getLeft(), remainingEventAndTime.getRight())); } - LOGGER.info("Reported {} pipe metas.", pipeMetaBinaryList.size()); + logger.ifPresent(l -> l.info("Reported {} pipe metas.", pipeMetaBinaryList.size())); } catch (final IOException | IllegalPathException e) { throw new TException(e); } @@ -390,10 +458,18 @@ private void collectPipeMetaListInternal(final TDataNodeHeartbeatResp resp) thro protected void collectPipeMetaListInternal( final TPipeHeartbeatReq req, final TPipeHeartbeatResp resp) throws TException { // Do nothing if data node is removing or removed, or request does not need pipe meta list - if (PipeDataNodeAgent.runtime().isShutdown()) { + // If the heartbeatId == Long.MIN_VALUE then it's shutdown report and shall not be skipped + if (PipeDataNodeAgent.runtime().isShutdown() && req.heartbeatId != Long.MIN_VALUE) { return; } - LOGGER.info("Received pipe heartbeat request {} from config node.", req.heartbeatId); + final Optional logger = + PipeDataNodeResourceManager.log() + .schedule( + PipeDataNodeTaskAgent.class, + PipeConfig.getInstance().getPipeMetaReportMaxLogNumPerRound(), + PipeConfig.getInstance().getPipeMetaReportMaxLogIntervalRounds(), + pipeMetaKeeper.getPipeMetaCount()); + LOGGER.debug("Received pipe heartbeat request {} from config node.", req.heartbeatId); final Set dataRegionIds = StorageEngine.getInstance().getAllDataRegionIds().stream() @@ -405,13 +481,6 @@ protected void collectPipeMetaListInternal( final List pipeRemainingEventCountList = new ArrayList<>(); final List pipeRemainingTimeList = new ArrayList<>(); try { - final Optional logger = - PipeDataNodeResourceManager.log() - .schedule( - PipeDataNodeTaskAgent.class, - PipeConfig.getInstance().getPipeMetaReportMaxLogNumPerRound(), - PipeConfig.getInstance().getPipeMetaReportMaxLogIntervalRounds(), - pipeMetaKeeper.getPipeMetaCount()); for (final PipeMeta pipeMeta : pipeMetaKeeper.getPipeMetaList()) { pipeMetaBinaryList.add(pipeMeta.serialize()); @@ -429,21 +498,20 @@ protected void collectPipeMetaListInternal( .getExtractorParameters() .getStringOrDefault( Arrays.asList( - PipeExtractorConstant.EXTRACTOR_MODE_KEY, - PipeExtractorConstant.SOURCE_MODE_KEY), - PipeExtractorConstant.EXTRACTOR_MODE_DEFAULT_VALUE); + PipeSourceConstant.EXTRACTOR_MODE_KEY, PipeSourceConstant.SOURCE_MODE_KEY), + PipeSourceConstant.EXTRACTOR_MODE_DEFAULT_VALUE); final boolean includeDataAndNeedDrop = DataRegionListeningFilter.parseInsertionDeletionListeningOptionPair( pipeMeta.getStaticMeta().getExtractorParameters()) .getLeft() && (extractorModeValue.equalsIgnoreCase( - PipeExtractorConstant.EXTRACTOR_MODE_QUERY_VALUE) + PipeSourceConstant.EXTRACTOR_MODE_QUERY_VALUE) || extractorModeValue.equalsIgnoreCase( - PipeExtractorConstant.EXTRACTOR_MODE_SNAPSHOT_VALUE)); + PipeSourceConstant.EXTRACTOR_MODE_SNAPSHOT_VALUE)); final boolean isCompleted = isAllDataRegionCompleted && includeDataAndNeedDrop; final Pair remainingEventAndTime = - PipeDataNodeRemainingEventAndTimeMetrics.getInstance() + PipeDataNodeSinglePipeMetrics.getInstance() .getRemainingEventAndTime(staticMeta.getPipeName(), staticMeta.getCreationTime()); pipeCompletedList.add(isCompleted); pipeRemainingEventCountList.add(remainingEventAndTime.getLeft()); @@ -458,7 +526,7 @@ protected void collectPipeMetaListInternal( remainingEventAndTime.getLeft(), remainingEventAndTime.getRight())); } - LOGGER.info("Reported {} pipe metas.", pipeMetaBinaryList.size()); + logger.ifPresent(l -> l.info("Reported {} pipe metas.", pipeMetaBinaryList.size())); } catch (final IOException | IllegalPathException e) { throw new TException(e); } @@ -469,157 +537,6 @@ protected void collectPipeMetaListInternal( PipeInsertionDataNodeListener.getInstance().listenToHeartbeat(true); } - ///////////////////////// Restart Logic ///////////////////////// - - public void restartAllStuckPipes() { - if (!tryWriteLockWithTimeOut(5)) { - return; - } - try { - restartAllStuckPipesInternal(); - } finally { - releaseWriteLock(); - } - } - - private void restartAllStuckPipesInternal() { - final Map taskId2ExtractorMap = - PipeDataRegionExtractorMetrics.getInstance().getExtractorMap(); - - final Set stuckPipes = new HashSet<>(); - for (final PipeMeta pipeMeta : pipeMetaKeeper.getPipeMetaList()) { - final String pipeName = pipeMeta.getStaticMeta().getPipeName(); - final List extractors = - taskId2ExtractorMap.values().stream() - .filter(e -> e.getPipeName().equals(pipeName) && e.shouldExtractInsertion()) - .collect(Collectors.toList()); - - if (extractors.isEmpty()) { - continue; - } - - // Extractors of this pipe might not pin too much MemTables, - // still need to check if linked-and-deleted TsFile count exceeds limit. - // Typically, if deleted tsFiles are too abundant all pipes may need to restart. - if ((CONFIG.isEnableSeqSpaceCompaction() - || CONFIG.isEnableUnseqSpaceCompaction() - || CONFIG.isEnableCrossSpaceCompaction()) - && mayDeletedTsFileSizeReachDangerousThreshold()) { - LOGGER.warn( - "Pipe {} needs to restart because too many TsFiles are out-of-date.", - pipeMeta.getStaticMeta()); - stuckPipes.add(pipeMeta); - continue; - } - - // Only restart the stream mode pipes for releasing memTables. - if (extractors.get(0).isStreamMode() - && extractors.stream().anyMatch(IoTDBDataRegionExtractor::hasConsumedAllHistoricalTsFiles) - && (mayMemTablePinnedCountReachDangerousThreshold() - || mayWalSizeReachThrottleThreshold())) { - // Extractors of this pipe may be stuck and is pinning too many MemTables. - LOGGER.warn( - "Pipe {} needs to restart because too many memTables are pinned.", - pipeMeta.getStaticMeta()); - stuckPipes.add(pipeMeta); - } - } - - // Restart all stuck pipes - stuckPipes.parallelStream().forEach(this::restartStuckPipe); - } - - private boolean mayDeletedTsFileSizeReachDangerousThreshold() { - try { - final long linkedButDeletedTsFileSize = - PipeDataNodeResourceManager.tsfile().getTotalLinkedButDeletedTsfileSize(); - final double totalDisk = - MetricService.getInstance() - .getAutoGauge( - SystemMetric.SYS_DISK_TOTAL_SPACE.toString(), - MetricLevel.CORE, - Tag.NAME.toString(), - // This "system" should stay the same with the one in - // DataNodeInternalRPCServiceImpl. - "system") - .getValue(); - return linkedButDeletedTsFileSize > 0 - && totalDisk > 0 - && linkedButDeletedTsFileSize - > PipeConfig.getInstance().getPipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage() - * totalDisk; - } catch (final Exception e) { - LOGGER.warn("Failed to judge if deleted TsFile size reaches dangerous threshold.", e); - return false; - } - } - - private boolean mayMemTablePinnedCountReachDangerousThreshold() { - return PipeDataNodeResourceManager.wal().getPinnedWalCount() - >= 10 * PipeConfig.getInstance().getPipeMaxAllowedPinnedMemTableCount(); - } - - private boolean mayWalSizeReachThrottleThreshold() { - return 3 * WALManager.getInstance().getTotalDiskUsage() > 2 * CONFIG.getThrottleThreshold(); - } - - private void restartStuckPipe(final PipeMeta pipeMeta) { - LOGGER.warn("Pipe {} will be restarted because of stuck.", pipeMeta.getStaticMeta()); - final long startTime = System.currentTimeMillis(); - changePipeStatusBeforeRestart(pipeMeta.getStaticMeta().getPipeName()); - handleSinglePipeMetaChangesInternal(pipeMeta); - LOGGER.warn( - "Pipe {} was restarted because of stuck, time cost: {} ms.", - pipeMeta.getStaticMeta(), - System.currentTimeMillis() - startTime); - } - - private void changePipeStatusBeforeRestart(final String pipeName) { - final PipeMeta pipeMeta = pipeMetaKeeper.getPipeMeta(pipeName); - final Map pipeTasks = pipeTaskManager.getPipeTasks(pipeMeta.getStaticMeta()); - final Set taskRegionIds = new HashSet<>(pipeTasks.keySet()); - final Set dataRegionIds = - StorageEngine.getInstance().getAllDataRegionIds().stream() - .map(DataRegionId::getId) - .collect(Collectors.toSet()); - final Set dataRegionPipeTasks = - taskRegionIds.stream() - .filter(dataRegionIds::contains) - .map(regionId -> pipeTaskManager.removePipeTask(pipeMeta.getStaticMeta(), regionId)) - .filter(Objects::nonNull) - .collect(Collectors.toSet()); - - // Drop data region tasks - dataRegionPipeTasks.parallelStream().forEach(PipeTask::drop); - - // Stop schema region tasks - pipeTaskManager.getPipeTasks(pipeMeta.getStaticMeta()).values().parallelStream() - .forEach(PipeTask::stop); - - // Re-create data region tasks - dataRegionPipeTasks.parallelStream() - .forEach( - pipeTask -> { - final PipeTask newPipeTask = - new PipeDataNodeTaskBuilder( - pipeMeta.getStaticMeta(), - ((PipeDataNodeTask) pipeTask).getRegionId(), - pipeMeta - .getRuntimeMeta() - .getConsensusGroupId2TaskMetaMap() - .get(((PipeDataNodeTask) pipeTask).getRegionId())) - .build(); - newPipeTask.create(); - pipeTaskManager.addPipeTask( - pipeMeta.getStaticMeta(), - ((PipeDataNodeTask) pipeTask).getRegionId(), - newPipeTask); - }); - - // Set pipe meta status to STOPPED - pipeMeta.getRuntimeMeta().getStatus().set(PipeStatus.STOPPED); - } - ///////////////////////// Terminate Logic ///////////////////////// public void markCompleted(final String pipeName, final int regionId) { @@ -648,6 +565,60 @@ public Set getPipeTaskRegionIdSet(final String pipeName, final long cre : pipeMeta.getRuntimeMeta().getConsensusGroupId2TaskMetaMap().keySet(); } + public boolean hasPipeReleaseRegionRelatedResource(final int consensusGroupId) { + if (!tryReadLockWithTimeOut(10)) { + LOGGER.warn( + "Failed to check if pipe has release region related resource with consensus group id: {}.", + consensusGroupId); + return false; + } + + try { + return !pipeTaskManager.hasPipeTaskInConsensusGroup(consensusGroupId); + } finally { + releaseReadLock(); + } + } + + @Override + public void runPipeTasks( + final Collection pipeTasks, final Consumer runSingle) { + final Set> pipeFuture = new HashSet<>(); + + pipeTasks.forEach( + pipeTask -> pipeFuture.add(pipeExecutor.submit(() -> runSingle.accept(pipeTask)))); + + for (final Future future : pipeFuture) { + try { + future.get(); + } catch (final ExecutionException | InterruptedException e) { + LOGGER.warn("Exception occurs when executing pipe task: ", e); + throw new PipeException(e.toString()); + } + } + } + + ///////////////////////// Shutdown Logic ///////////////////////// + + public void persistAllProgressIndex() { + try (final ConfigNodeClient configNodeClient = + ConfigNodeClientManager.getInstance().borrowClient(ConfigNodeInfo.CONFIG_REGION_ID)) { + // Send request to some API server + final TPipeHeartbeatResp resp = new TPipeHeartbeatResp(); + collectPipeMetaList(new TPipeHeartbeatReq(Long.MIN_VALUE), resp); + final TSStatus result = + configNodeClient.pushHeartbeat( + IoTDBDescriptor.getInstance().getConfig().getDataNodeId(), resp); + if (TSStatusCode.SUCCESS_STATUS.getStatusCode() != result.getCode()) { + LOGGER.warn("Failed to persist progress index to configNode, status: {}", result); + } else { + LOGGER.info("Successfully persisted all pipe's info to configNode."); + } + } catch (final Exception e) { + LOGGER.warn(e.getMessage()); + } + } + ///////////////////////// Pipe Consensus ///////////////////////// public ProgressIndex getPipeTaskProgressIndex(final String pipeName, final int consensusGroupId) { @@ -690,4 +661,219 @@ public Map getAllConsensusPipe() { releaseReadLock(); } } + + @Override + protected void calculateMemoryUsage( + final PipeStaticMeta staticMeta, + final PipeParameters sourceParameters, + final PipeParameters processorParameters, + final PipeParameters sinkParameters) { + if (!PipeConfig.getInstance().isPipeEnableMemoryCheck() + || !isInnerSource(sourceParameters) + || !PipeType.USER.equals(staticMeta.getPipeType())) { + return; + } + + calculateInsertNodeQueueMemory(sourceParameters); + + long needMemory = 0; + + needMemory += calculateTsFileParserMemory(sourceParameters, sinkParameters); + needMemory += calculateSinkBatchMemory(sinkParameters); + needMemory += calculateSendTsFileReadBufferMemory(sourceParameters, sinkParameters); + needMemory += calculateAssignerMemory(sourceParameters); + + PipeMemoryManager pipeMemoryManager = PipeDataNodeResourceManager.memory(); + final long freeMemorySizeInBytes = pipeMemoryManager.getFreeMemorySizeInBytes(); + final long reservedMemorySizeInBytes = + (long) + (PipeMemoryManager.getTotalMemorySizeInBytes() + * PipeConfig.getInstance().getReservedMemoryPercentage()); + if (freeMemorySizeInBytes < needMemory + reservedMemorySizeInBytes) { + final String message = + String.format( + "%s Need memory: %d bytes, free memory: %d bytes, reserved memory: %d bytes, total memory: %d bytes", + MESSAGE_PIPE_NOT_ENOUGH_MEMORY, + needMemory, + freeMemorySizeInBytes, + freeMemorySizeInBytes, + PipeMemoryManager.getTotalMemorySizeInBytes()); + LOGGER.warn(message); + throw new PipeException(message); + } + } + + private boolean isInnerSource(final PipeParameters sourceParameters) { + final String pluginName = + sourceParameters + .getStringOrDefault( + Arrays.asList(PipeSourceConstant.EXTRACTOR_KEY, PipeSourceConstant.SOURCE_KEY), + BuiltinPipePlugin.IOTDB_EXTRACTOR.getPipePluginName()) + .toLowerCase(); + + return pluginName.equals(BuiltinPipePlugin.IOTDB_EXTRACTOR.getPipePluginName()) + || pluginName.equals(BuiltinPipePlugin.IOTDB_SOURCE.getPipePluginName()); + } + + private void calculateInsertNodeQueueMemory(final PipeParameters sourceParameters) { + + // Realtime source is enabled by default, so we only need to check the source realtime + if (!sourceParameters.getBooleanOrDefault( + Arrays.asList(EXTRACTOR_REALTIME_ENABLE_KEY, SOURCE_REALTIME_ENABLE_KEY), + EXTRACTOR_REALTIME_ENABLE_DEFAULT_VALUE)) { + return; + } + + // If the realtime mode is batch or file, we do not need to allocate memory + final String realtimeMode = + sourceParameters.getStringByKeys( + PipeSourceConstant.EXTRACTOR_REALTIME_MODE_KEY, + PipeSourceConstant.SOURCE_REALTIME_MODE_KEY); + if (PipeSourceConstant.EXTRACTOR_REALTIME_MODE_BATCH_MODE_VALUE.equals(realtimeMode) + || PipeSourceConstant.EXTRACTOR_REALTIME_MODE_FILE_VALUE.equals(realtimeMode)) { + return; + } + + final long allocatedMemorySizeInBytes = this.getAllFloatingMemoryUsageInByte(); + final long remainingMemory = + PipeMemoryManager.getTotalFloatingMemorySizeInBytes() - allocatedMemorySizeInBytes; + if (remainingMemory < PipeConfig.getInstance().PipeInsertNodeQueueMemory()) { + final String message = + String.format( + "%s Need Floating memory: %d bytes, free Floating memory: %d bytes", + MESSAGE_PIPE_NOT_ENOUGH_MEMORY, + PipeConfig.getInstance().PipeInsertNodeQueueMemory(), + remainingMemory); + LOGGER.warn(message); + throw new PipeException(message); + } + } + + private long calculateTsFileParserMemory( + final PipeParameters sourceParameters, final PipeParameters sinkParameters) { + + // If the source is not history, we do not need to allocate memory + boolean isExtractorHistory = + sourceParameters.getBooleanOrDefault( + SystemConstant.RESTART_KEY, SystemConstant.RESTART_DEFAULT_VALUE) + || sourceParameters.getBooleanOrDefault( + Arrays.asList(EXTRACTOR_HISTORY_ENABLE_KEY, SOURCE_HISTORY_ENABLE_KEY), + EXTRACTOR_HISTORY_ENABLE_DEFAULT_VALUE); + + // If the source is history, and has start/end time, we need to allocate memory + boolean isTSFileParser = + isExtractorHistory + && sourceParameters.hasAnyAttributes( + EXTRACTOR_HISTORY_START_TIME_KEY, SOURCE_HISTORY_START_TIME_KEY); + + isTSFileParser = + isTSFileParser + || (isExtractorHistory + && sourceParameters.hasAnyAttributes( + EXTRACTOR_HISTORY_END_TIME_KEY, SOURCE_HISTORY_END_TIME_KEY)); + + // if the source has start/end time, we need to allocate memory + isTSFileParser = + isTSFileParser + || sourceParameters.hasAnyAttributes(SOURCE_START_TIME_KEY, EXTRACTOR_START_TIME_KEY); + + isTSFileParser = + isTSFileParser + || sourceParameters.hasAnyAttributes(SOURCE_END_TIME_KEY, EXTRACTOR_END_TIME_KEY); + + // If the source has pattern or path, we need to allocate memory + isTSFileParser = + isTSFileParser + || sourceParameters.hasAnyAttributes(EXTRACTOR_PATTERN_KEY, SOURCE_PATTERN_KEY); + + isTSFileParser = + isTSFileParser || sourceParameters.hasAnyAttributes(EXTRACTOR_PATH_KEY, SOURCE_PATH_KEY); + + // If the source is not hybrid, we do need to allocate memory + isTSFileParser = + isTSFileParser + || !PipeSinkConstant.CONNECTOR_FORMAT_HYBRID_VALUE.equals( + sinkParameters.getStringOrDefault( + Arrays.asList( + PipeSinkConstant.CONNECTOR_FORMAT_KEY, PipeSinkConstant.SINK_FORMAT_KEY), + PipeSinkConstant.CONNECTOR_FORMAT_HYBRID_VALUE)); + + if (!isTSFileParser) { + return 0; + } + + return PipeConfig.getInstance().getTsFileParserMemory(); + } + + private long calculateSinkBatchMemory(final PipeParameters sinkParameters) { + + // If the sink format is tsfile , we need to use batch + boolean needUseBatch = + PipeSinkConstant.CONNECTOR_FORMAT_TS_FILE_VALUE.equals( + sinkParameters.getStringOrDefault( + Arrays.asList( + PipeSinkConstant.CONNECTOR_FORMAT_KEY, PipeSinkConstant.SINK_FORMAT_KEY), + PipeSinkConstant.CONNECTOR_FORMAT_HYBRID_VALUE)); + + if (needUseBatch) { + return PipeConfig.getInstance().getSinkBatchMemoryTsFile(); + } + + // If the sink is batch mode, we need to use batch + needUseBatch = + sinkParameters.getBooleanOrDefault( + Arrays.asList( + PipeSinkConstant.CONNECTOR_IOTDB_BATCH_MODE_ENABLE_KEY, + PipeSinkConstant.SINK_IOTDB_BATCH_MODE_ENABLE_KEY), + PipeSinkConstant.CONNECTOR_IOTDB_BATCH_MODE_ENABLE_DEFAULT_VALUE); + + if (!needUseBatch) { + return 0; + } + + return PipeConfig.getInstance().getSinkBatchMemoryInsertNode(); + } + + private long calculateSendTsFileReadBufferMemory( + final PipeParameters sourceParameters, final PipeParameters sinkParameters) { + // If the source is history enable, we need to transfer tsfile + boolean needTransferTsFile = + sourceParameters.getBooleanOrDefault( + SystemConstant.RESTART_KEY, SystemConstant.RESTART_DEFAULT_VALUE) + || sourceParameters.getBooleanOrDefault( + Arrays.asList(EXTRACTOR_HISTORY_ENABLE_KEY, SOURCE_HISTORY_ENABLE_KEY), + EXTRACTOR_HISTORY_ENABLE_DEFAULT_VALUE); + + String format = + sinkParameters.getStringOrDefault( + Arrays.asList(PipeSinkConstant.CONNECTOR_FORMAT_KEY, PipeSinkConstant.SINK_FORMAT_KEY), + PipeSinkConstant.CONNECTOR_FORMAT_HYBRID_VALUE); + + // If the sink format is tsfile and hybrid, we need to transfer tsfile + needTransferTsFile = + needTransferTsFile + || PipeSinkConstant.CONNECTOR_FORMAT_HYBRID_VALUE.equals(format) + || PipeSinkConstant.CONNECTOR_FORMAT_TS_FILE_VALUE.equals(format); + + if (!needTransferTsFile) { + return 0; + } + + return PipeConfig.getInstance().getSendTsFileReadBuffer(); + } + + private long calculateAssignerMemory(final PipeParameters sourceParameters) { + try { + if (!PipeInsertionDataNodeListener.getInstance().isEmpty() + || !DataRegionListeningFilter.parseInsertionDeletionListeningOptionPair(sourceParameters) + .getLeft()) { + return 0; + } + return PipeConfig.getInstance().getPipeExtractorAssignerDisruptorRingBufferSize() + * PipeConfig.getInstance().getPipeExtractorAssignerDisruptorRingBufferEntrySizeInBytes() + * Math.min(StorageEngine.getInstance().getDataRegionNumber(), 10); + } catch (final IllegalPathException e) { + return 0; + } + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/builder/PipeDataNodeBuilder.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/builder/PipeDataNodeBuilder.java similarity index 82% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/builder/PipeDataNodeBuilder.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/builder/PipeDataNodeBuilder.java index 6586e771847d5..9216ea2c377df 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/builder/PipeDataNodeBuilder.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/builder/PipeDataNodeBuilder.java @@ -17,20 +17,20 @@ * under the License. */ -package org.apache.iotdb.db.pipe.task.builder; +package org.apache.iotdb.db.pipe.agent.task.builder; import org.apache.iotdb.commons.consensus.DataRegionId; import org.apache.iotdb.commons.consensus.SchemaRegionId; import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.pipe.task.PipeTask; -import org.apache.iotdb.commons.pipe.task.meta.PipeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeRuntimeMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeStaticMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.agent.task.PipeTask; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeRuntimeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStaticMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.pipe.extractor.dataregion.DataRegionListeningFilter; -import org.apache.iotdb.db.pipe.extractor.schemaregion.SchemaRegionListeningFilter; +import org.apache.iotdb.db.pipe.source.dataregion.DataRegionListeningFilter; +import org.apache.iotdb.db.pipe.source.schemaregion.SchemaRegionListeningFilter; import org.apache.iotdb.db.schemaengine.SchemaEngine; import org.apache.iotdb.db.storageengine.StorageEngine; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; @@ -64,9 +64,11 @@ public Map build() throws IllegalPathException { if (pipeTaskMeta.getLeaderNodeId() == CONFIG.getDataNodeId()) { final PipeParameters extractorParameters = pipeStaticMeta.getExtractorParameters(); + final DataRegionId dataRegionId = new DataRegionId(consensusGroupId); final boolean needConstructDataRegionTask = - dataRegionIds.contains(new DataRegionId(consensusGroupId)) - && DataRegionListeningFilter.shouldDataRegionBeListened(extractorParameters); + dataRegionIds.contains(dataRegionId) + && DataRegionListeningFilter.shouldDataRegionBeListened( + extractorParameters, dataRegionId); final boolean needConstructSchemaRegionTask = schemaRegionIds.contains(new SchemaRegionId(consensusGroupId)) && !SchemaRegionListeningFilter.parseListeningPlanTypeSet(extractorParameters) diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/builder/PipeDataNodeTaskBuilder.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/builder/PipeDataNodeTaskBuilder.java new file mode 100644 index 0000000000000..21f344f173daa --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/builder/PipeDataNodeTaskBuilder.java @@ -0,0 +1,231 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.agent.task.builder; + +import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex; +import org.apache.iotdb.commons.exception.IllegalPathException; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStaticMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeType; +import org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant; +import org.apache.iotdb.commons.pipe.config.constant.SystemConstant; +import org.apache.iotdb.db.pipe.agent.task.PipeDataNodeTask; +import org.apache.iotdb.db.pipe.agent.task.execution.PipeProcessorSubtaskExecutor; +import org.apache.iotdb.db.pipe.agent.task.execution.PipeSubtaskExecutorManager; +import org.apache.iotdb.db.pipe.agent.task.stage.PipeTaskProcessorStage; +import org.apache.iotdb.db.pipe.agent.task.stage.PipeTaskSinkStage; +import org.apache.iotdb.db.pipe.agent.task.stage.PipeTaskSourceStage; +import org.apache.iotdb.db.pipe.source.dataregion.DataRegionListeningFilter; +import org.apache.iotdb.db.subscription.task.stage.SubscriptionTaskSinkStage; +import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; + +import org.apache.tsfile.utils.Pair; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_FORMAT_HYBRID_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_FORMAT_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_FORMAT_TABLET_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_FORMAT_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_MODE_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_MODE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_MODE_QUERY_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_MODE_SNAPSHOT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_REALTIME_ENABLE_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_REALTIME_ENABLE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_MODE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_REALTIME_ENABLE_KEY; + +public class PipeDataNodeTaskBuilder { + + private static final Logger LOGGER = LoggerFactory.getLogger(PipeDataNodeTaskBuilder.class); + + private final PipeStaticMeta pipeStaticMeta; + private final int regionId; + private final PipeTaskMeta pipeTaskMeta; + + private static final PipeProcessorSubtaskExecutor PROCESSOR_EXECUTOR = + PipeSubtaskExecutorManager.getInstance().getProcessorExecutor(); + + protected final Map systemParameters = new HashMap<>(); + + public PipeDataNodeTaskBuilder( + final PipeStaticMeta pipeStaticMeta, final int regionId, final PipeTaskMeta pipeTaskMeta) { + this.pipeStaticMeta = pipeStaticMeta; + this.regionId = regionId; + this.pipeTaskMeta = pipeTaskMeta; + generateSystemParameters(); + } + + public PipeDataNodeTask build() { + // Event flow: extractor -> processor -> connector + + // Analyzes the PipeParameters to identify potential conflicts. + final PipeParameters extractorParameters = + blendUserAndSystemParameters(pipeStaticMeta.getExtractorParameters()); + final PipeParameters connectorParameters = + blendUserAndSystemParameters(pipeStaticMeta.getConnectorParameters()); + checkConflict(extractorParameters, connectorParameters); + + // We first build the extractor and connector, then build the processor. + final PipeTaskSourceStage extractorStage = + new PipeTaskSourceStage( + pipeStaticMeta.getPipeName(), + pipeStaticMeta.getCreationTime(), + extractorParameters, + regionId, + pipeTaskMeta); + + final PipeTaskSinkStage connectorStage; + final PipeType pipeType = pipeStaticMeta.getPipeType(); + + if (PipeType.SUBSCRIPTION.equals(pipeType)) { + connectorStage = + new SubscriptionTaskSinkStage( + pipeStaticMeta.getPipeName(), + pipeStaticMeta.getCreationTime(), + connectorParameters, + regionId, + PipeSubtaskExecutorManager.getInstance().getSubscriptionExecutor()); + } else { // user pipe or consensus pipe + connectorStage = + new PipeTaskSinkStage( + pipeStaticMeta.getPipeName(), + pipeStaticMeta.getCreationTime(), + connectorParameters, + regionId, + pipeType.equals(PipeType.USER) + ? PipeSubtaskExecutorManager.getInstance().getConnectorExecutorSupplier() + : PipeSubtaskExecutorManager.getInstance().getConsensusExecutorSupplier()); + } + + // The processor connects the extractor and connector. + final PipeTaskProcessorStage processorStage = + new PipeTaskProcessorStage( + pipeStaticMeta.getPipeName(), + pipeStaticMeta.getCreationTime(), + blendUserAndSystemParameters(pipeStaticMeta.getProcessorParameters()), + regionId, + extractorStage.getEventSupplier(), + connectorStage.getPipeConnectorPendingQueue(), + PROCESSOR_EXECUTOR, + pipeTaskMeta, + pipeStaticMeta + .getConnectorParameters() + .getStringOrDefault( + Arrays.asList(CONNECTOR_FORMAT_KEY, SINK_FORMAT_KEY), + CONNECTOR_FORMAT_HYBRID_VALUE) + .equals(CONNECTOR_FORMAT_TABLET_VALUE), + PipeType.SUBSCRIPTION.equals(pipeType)); + + return new PipeDataNodeTask( + pipeStaticMeta.getPipeName(), regionId, extractorStage, processorStage, connectorStage); + } + + private void generateSystemParameters() { + if (!(pipeTaskMeta.getProgressIndex() instanceof MinimumProgressIndex)) { + systemParameters.put(SystemConstant.RESTART_KEY, Boolean.TRUE.toString()); + } + } + + private PipeParameters blendUserAndSystemParameters(final PipeParameters userParameters) { + // Deep copy the user parameters to avoid modification of the original parameters. + // If the original parameters are modified, progress index report will be affected. + final Map blendedParameters = new HashMap<>(userParameters.getAttribute()); + blendedParameters.putAll(systemParameters); + return new PipeParameters(blendedParameters); + } + + private void checkConflict( + final PipeParameters extractorParameters, final PipeParameters connectorParameters) { + final Pair insertionDeletionListeningOptionPair; + final boolean shouldTerminatePipeOnAllHistoricalEventsConsumed; + + try { + insertionDeletionListeningOptionPair = + DataRegionListeningFilter.parseInsertionDeletionListeningOptionPair(extractorParameters); + + final String extractorModeValue = + extractorParameters.getStringOrDefault( + Arrays.asList(EXTRACTOR_MODE_KEY, SOURCE_MODE_KEY), EXTRACTOR_MODE_DEFAULT_VALUE); + shouldTerminatePipeOnAllHistoricalEventsConsumed = + extractorModeValue.equalsIgnoreCase(EXTRACTOR_MODE_SNAPSHOT_VALUE) + || extractorModeValue.equalsIgnoreCase(EXTRACTOR_MODE_QUERY_VALUE); + + } catch (final IllegalPathException e) { + LOGGER.warn( + "PipeDataNodeTaskBuilder failed to parse 'inclusion' and 'exclusion' parameters: {}", + e.getMessage(), + e); + return; + } + + if (insertionDeletionListeningOptionPair.right + || shouldTerminatePipeOnAllHistoricalEventsConsumed) { + final Boolean isRealtime = + connectorParameters.getBooleanByKeys( + PipeSinkConstant.CONNECTOR_REALTIME_FIRST_KEY, + PipeSinkConstant.SINK_REALTIME_FIRST_KEY); + if (isRealtime == null) { + connectorParameters.addAttribute(PipeSinkConstant.CONNECTOR_REALTIME_FIRST_KEY, "false"); + if (insertionDeletionListeningOptionPair.right) { + LOGGER.info( + "PipeDataNodeTaskBuilder: When 'inclusion' contains 'data.delete', 'realtime-first' is defaulted to 'false' to prevent sync issues after deletion."); + } else { + LOGGER.info( + "PipeDataNodeTaskBuilder: When extractor uses snapshot model, 'realtime-first' is defaulted to 'false' to prevent premature halt before transfer completion."); + } + } else if (isRealtime) { + if (insertionDeletionListeningOptionPair.right) { + LOGGER.warn( + "PipeDataNodeTaskBuilder: When 'inclusion' includes 'data.delete', 'realtime-first' set to 'true' may result in data synchronization issues after deletion."); + } else { + LOGGER.warn( + "PipeDataNodeTaskBuilder: When extractor uses snapshot model, 'realtime-first' set to 'true' may cause prevent premature halt before transfer completion."); + } + } + } + + final boolean isRealtimeEnabled = + extractorParameters.getBooleanOrDefault( + Arrays.asList(EXTRACTOR_REALTIME_ENABLE_KEY, SOURCE_REALTIME_ENABLE_KEY), + EXTRACTOR_REALTIME_ENABLE_DEFAULT_VALUE); + + if (isRealtimeEnabled && !shouldTerminatePipeOnAllHistoricalEventsConsumed) { + final Boolean enableSendTsFileLimit = + connectorParameters.getBooleanByKeys( + PipeSinkConstant.SINK_ENABLE_SEND_TSFILE_LIMIT, + PipeSinkConstant.CONNECTOR_ENABLE_SEND_TSFILE_LIMIT); + + if (enableSendTsFileLimit == null) { + connectorParameters.addAttribute(PipeSinkConstant.SINK_ENABLE_SEND_TSFILE_LIMIT, "true"); + LOGGER.info( + "PipeDataNodeTaskBuilder: When the realtime sync is enabled, we enable rate limiter in sending tsfile by default to reserve disk and network IO for realtime sending."); + } else if (!enableSendTsFileLimit) { + LOGGER.warn( + "PipeDataNodeTaskBuilder: When the realtime sync is enabled, not enabling the rate limiter in sending tsfile may introduce delay for realtime sending."); + } + } + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/connection/EnrichedDeque.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/connection/EnrichedDeque.java similarity index 94% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/connection/EnrichedDeque.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/connection/EnrichedDeque.java index fa352c0055e61..d2d31faeeca38 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/connection/EnrichedDeque.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/connection/EnrichedDeque.java @@ -17,9 +17,9 @@ * under the License. */ -package org.apache.iotdb.db.pipe.task.connection; +package org.apache.iotdb.db.pipe.agent.task.connection; -import org.apache.iotdb.db.pipe.metric.PipeDataRegionEventCounter; +import org.apache.iotdb.db.pipe.metric.source.PipeDataRegionEventCounter; import org.apache.iotdb.pipe.api.event.Event; import java.util.Deque; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/connection/PipeEventCollector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/connection/PipeEventCollector.java similarity index 78% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/connection/PipeEventCollector.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/connection/PipeEventCollector.java index c4c91f80f582b..9edb53ed932b7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/connection/PipeEventCollector.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/connection/PipeEventCollector.java @@ -17,24 +17,23 @@ * under the License. */ -package org.apache.iotdb.db.pipe.task.connection; +package org.apache.iotdb.db.pipe.agent.task.connection; +import org.apache.iotdb.commons.pipe.agent.task.connection.UnboundedBlockingPendingQueue; +import org.apache.iotdb.commons.pipe.agent.task.progress.PipeEventCommitManager; +import org.apache.iotdb.commons.pipe.datastructure.pattern.IoTDBPipePattern; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; import org.apache.iotdb.commons.pipe.event.ProgressReportEvent; -import org.apache.iotdb.commons.pipe.pattern.IoTDBPipePattern; -import org.apache.iotdb.commons.pipe.progress.PipeEventCommitManager; -import org.apache.iotdb.commons.pipe.task.connection.UnboundedBlockingPendingQueue; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; -import org.apache.iotdb.db.pipe.extractor.schemaregion.IoTDBSchemaRegionExtractor; +import org.apache.iotdb.db.pipe.source.schemaregion.IoTDBSchemaRegionSource; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeType; import org.apache.iotdb.pipe.api.collector.EventCollector; import org.apache.iotdb.pipe.api.event.Event; -import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; import org.apache.iotdb.pipe.api.exception.PipeException; import org.slf4j.Logger; @@ -54,18 +53,23 @@ public class PipeEventCollector implements EventCollector { private final boolean forceTabletFormat; + private final boolean skipParsing; + private final AtomicInteger collectInvocationCount = new AtomicInteger(0); private boolean hasNoGeneratedEvent = true; + private boolean isFailedToIncreaseReferenceCount = false; public PipeEventCollector( final UnboundedBlockingPendingQueue pendingQueue, final long creationTime, final int regionId, - final boolean forceTabletFormat) { + final boolean forceTabletFormat, + final boolean skipParsing) { this.pendingQueue = pendingQueue; this.creationTime = creationTime; this.regionId = regionId; this.forceTabletFormat = forceTabletFormat; + this.skipParsing = skipParsing; } @Override @@ -94,6 +98,11 @@ public void collect(final Event event) { } private void parseAndCollectEvent(final PipeInsertNodeTabletInsertionEvent sourceEvent) { + if (skipParsing) { + collectEvent(sourceEvent); + return; + } + if (sourceEvent.shouldParseTimeOrPattern()) { for (final PipeRawTabletInsertionEvent parsedEvent : sourceEvent.toRawTabletInsertionEvents()) { @@ -105,10 +114,11 @@ private void parseAndCollectEvent(final PipeInsertNodeTabletInsertionEvent sourc } private void parseAndCollectEvent(final PipeRawTabletInsertionEvent sourceEvent) { - collectParsedRawTableEvent( - sourceEvent.shouldParseTimeOrPattern() - ? sourceEvent.parseEventWithPatternOrTime() - : sourceEvent); + if (sourceEvent.shouldParseTimeOrPattern()) { + collectParsedRawTableEvent(sourceEvent.parseEventWithPatternOrTime()); + } else { + collectEvent(sourceEvent); + } } private void parseAndCollectEvent(final PipeTsFileInsertionEvent sourceEvent) throws Exception { @@ -119,20 +129,28 @@ private void parseAndCollectEvent(final PipeTsFileInsertionEvent sourceEvent) th return; } - if (!forceTabletFormat && !sourceEvent.shouldParseTimeOrPattern()) { + if (skipParsing) { + collectEvent(sourceEvent); + return; + } + + if (!forceTabletFormat && canSkipParsing4TsFileEvent(sourceEvent)) { collectEvent(sourceEvent); return; } try { - for (final TabletInsertionEvent parsedEvent : sourceEvent.toTabletInsertionEvents()) { - collectParsedRawTableEvent((PipeRawTabletInsertionEvent) parsedEvent); - } + sourceEvent.consumeTabletInsertionEventsWithRetry( + this::collectParsedRawTableEvent, "PipeEventCollector::parseAndCollectEvent"); } finally { sourceEvent.close(); } } + public static boolean canSkipParsing4TsFileEvent(final PipeTsFileInsertionEvent sourceEvent) { + return !sourceEvent.shouldParseTimeOrPattern(); + } + private void collectParsedRawTableEvent(final PipeRawTabletInsertionEvent parsedEvent) { if (!parsedEvent.hasNoNeedParsingAndIsEmpty()) { hasNoGeneratedEvent = false; @@ -143,7 +161,7 @@ private void collectParsedRawTableEvent(final PipeRawTabletInsertionEvent parsed private void parseAndCollectEvent(final PipeSchemaRegionWritePlanEvent deleteDataEvent) { // Only used by events containing delete data node, no need to bind progress index here since // delete data event does not have progress index currently - IoTDBSchemaRegionExtractor.PATTERN_PARSE_VISITOR + IoTDBSchemaRegionSource.PATTERN_PARSE_VISITOR .process(deleteDataEvent.getPlanNode(), (IoTDBPipePattern) deleteDataEvent.getPipePattern()) .map( planNode -> @@ -162,10 +180,12 @@ private void parseAndCollectEvent(final PipeSchemaRegionWritePlanEvent deleteDat } private void collectEvent(final Event event) { - collectInvocationCount.incrementAndGet(); - if (event instanceof EnrichedEvent) { - ((EnrichedEvent) event).increaseReferenceCount(PipeEventCollector.class.getName()); + if (!((EnrichedEvent) event).increaseReferenceCount(PipeEventCollector.class.getName())) { + LOGGER.warn("PipeEventCollector: The event {} is already released, skipping it.", event); + isFailedToIncreaseReferenceCount = true; + return; + } // Assign a commit id for this event in order to report progress in order. PipeEventCommitManager.getInstance() @@ -180,11 +200,13 @@ private void collectEvent(final Event event) { } pendingQueue.directOffer(event); + collectInvocationCount.incrementAndGet(); } - public void resetCollectInvocationCountAndGenerateFlag() { + public void resetFlags() { collectInvocationCount.set(0); hasNoGeneratedEvent = true; + isFailedToIncreaseReferenceCount = false; } public long getCollectInvocationCount() { @@ -198,4 +220,8 @@ public boolean hasNoCollectInvocationAfterReset() { public boolean hasNoGeneratedEvent() { return hasNoGeneratedEvent; } + + public boolean isFailedToIncreaseReferenceCount() { + return isFailedToIncreaseReferenceCount; + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/execution/PipeProcessorSubtaskExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/execution/PipeProcessorSubtaskExecutor.java similarity index 85% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/execution/PipeProcessorSubtaskExecutor.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/execution/PipeProcessorSubtaskExecutor.java index 6d5a8ca89fc9f..9eddd27f22e6b 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/execution/PipeProcessorSubtaskExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/execution/PipeProcessorSubtaskExecutor.java @@ -17,18 +17,18 @@ * under the License. */ -package org.apache.iotdb.db.pipe.execution; +package org.apache.iotdb.db.pipe.agent.task.execution; import org.apache.iotdb.commons.concurrent.ThreadName; +import org.apache.iotdb.commons.pipe.agent.task.execution.PipeSubtaskExecutor; import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.pipe.execution.executor.PipeSubtaskExecutor; public class PipeProcessorSubtaskExecutor extends PipeSubtaskExecutor { public PipeProcessorSubtaskExecutor() { super( PipeConfig.getInstance().getPipeSubtaskExecutorMaxThreadNum(), - ThreadName.PIPE_PROCESSOR_EXECUTOR_POOL, + ThreadName.PIPE_PROCESSOR_EXECUTOR_POOL.getName(), false); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/execution/PipeConnectorSubtaskExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/execution/PipeSinkSubtaskExecutor.java similarity index 63% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/execution/PipeConnectorSubtaskExecutor.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/execution/PipeSinkSubtaskExecutor.java index ea03c8ce4a008..9a88ad74d7ccb 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/execution/PipeConnectorSubtaskExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/execution/PipeSinkSubtaskExecutor.java @@ -17,22 +17,26 @@ * under the License. */ -package org.apache.iotdb.db.pipe.execution; +package org.apache.iotdb.db.pipe.agent.task.execution; import org.apache.iotdb.commons.concurrent.ThreadName; +import org.apache.iotdb.commons.pipe.agent.task.execution.PipeSubtaskExecutor; import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.pipe.execution.executor.PipeSubtaskExecutor; -public class PipeConnectorSubtaskExecutor extends PipeSubtaskExecutor { +import java.util.concurrent.atomic.AtomicInteger; - public PipeConnectorSubtaskExecutor() { +public class PipeSinkSubtaskExecutor extends PipeSubtaskExecutor { + private static final AtomicInteger id = new AtomicInteger(0); + + public PipeSinkSubtaskExecutor() { super( PipeConfig.getInstance().getPipeSubtaskExecutorMaxThreadNum(), - ThreadName.PIPE_CONNECTOR_EXECUTOR_POOL, + ThreadName.PIPE_CONNECTOR_EXECUTOR_POOL.getName() + "-" + id.get(), + ThreadName.PIPE_SUBTASK_CALLBACK_EXECUTOR_POOL.getName() + "-" + id.getAndIncrement(), true); } - public PipeConnectorSubtaskExecutor(final int corePoolSize, final ThreadName threadName) { + public PipeSinkSubtaskExecutor(final int corePoolSize, final String threadName) { super(corePoolSize, threadName, true); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/execution/PipeSubtaskExecutorManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/execution/PipeSubtaskExecutorManager.java similarity index 67% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/execution/PipeSubtaskExecutorManager.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/execution/PipeSubtaskExecutorManager.java index 78db893152293..61edbe5d60f00 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/execution/PipeSubtaskExecutorManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/execution/PipeSubtaskExecutorManager.java @@ -17,10 +17,13 @@ * under the License. */ -package org.apache.iotdb.db.pipe.execution; +package org.apache.iotdb.db.pipe.agent.task.execution; +import org.apache.iotdb.commons.subscription.config.SubscriptionConfig; import org.apache.iotdb.db.pipe.consensus.PipeConsensusSubtaskExecutor; -import org.apache.iotdb.db.subscription.execution.executor.SubscriptionSubtaskExecutor; +import org.apache.iotdb.db.subscription.task.execution.SubscriptionSubtaskExecutor; + +import java.util.function.Supplier; /** * PipeTaskExecutor is responsible for executing the pipe tasks, and it is scheduled by the @@ -28,33 +31,36 @@ */ public class PipeSubtaskExecutorManager { private final PipeProcessorSubtaskExecutor processorExecutor; - private final PipeConnectorSubtaskExecutor connectorExecutor; + private final Supplier connectorExecutorSupplier; private final SubscriptionSubtaskExecutor subscriptionExecutor; - private final PipeConsensusSubtaskExecutor consensusExecutor; + private final Supplier consensusExecutorSupplier; public PipeProcessorSubtaskExecutor getProcessorExecutor() { return processorExecutor; } - public PipeConnectorSubtaskExecutor getConnectorExecutor() { - return connectorExecutor; + public Supplier getConnectorExecutorSupplier() { + return connectorExecutorSupplier; } public SubscriptionSubtaskExecutor getSubscriptionExecutor() { return subscriptionExecutor; } - public PipeConsensusSubtaskExecutor getConsensusExecutor() { - return consensusExecutor; + public Supplier getConsensusExecutorSupplier() { + return consensusExecutorSupplier; } ///////////////////////// Singleton Instance Holder ///////////////////////// private PipeSubtaskExecutorManager() { processorExecutor = new PipeProcessorSubtaskExecutor(); - connectorExecutor = new PipeConnectorSubtaskExecutor(); - subscriptionExecutor = new SubscriptionSubtaskExecutor(); - consensusExecutor = new PipeConsensusSubtaskExecutor(); + connectorExecutorSupplier = PipeSinkSubtaskExecutor::new; + subscriptionExecutor = + SubscriptionConfig.getInstance().getSubscriptionEnabled() + ? new SubscriptionSubtaskExecutor() + : null; + consensusExecutorSupplier = PipeConsensusSubtaskExecutor::new; } private static class PipeTaskExecutorHolder { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/stage/PipeTaskProcessorStage.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/stage/PipeTaskProcessorStage.java similarity index 86% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/stage/PipeTaskProcessorStage.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/stage/PipeTaskProcessorStage.java index e7ab0cb1997a0..ddc194716d2d1 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/stage/PipeTaskProcessorStage.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/stage/PipeTaskProcessorStage.java @@ -17,21 +17,21 @@ * under the License. */ -package org.apache.iotdb.db.pipe.task.stage; +package org.apache.iotdb.db.pipe.agent.task.stage; import org.apache.iotdb.commons.consensus.DataRegionId; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.BuiltinPipePlugin; +import org.apache.iotdb.commons.pipe.agent.task.connection.EventSupplier; +import org.apache.iotdb.commons.pipe.agent.task.connection.UnboundedBlockingPendingQueue; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.agent.task.stage.PipeTaskStage; import org.apache.iotdb.commons.pipe.config.constant.PipeProcessorConstant; import org.apache.iotdb.commons.pipe.config.plugin.configuraion.PipeTaskRuntimeConfiguration; import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskProcessorRuntimeEnvironment; -import org.apache.iotdb.commons.pipe.plugin.builtin.BuiltinPipePlugin; -import org.apache.iotdb.commons.pipe.task.EventSupplier; -import org.apache.iotdb.commons.pipe.task.connection.UnboundedBlockingPendingQueue; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; -import org.apache.iotdb.commons.pipe.task.stage.PipeTaskStage; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; -import org.apache.iotdb.db.pipe.execution.PipeProcessorSubtaskExecutor; -import org.apache.iotdb.db.pipe.task.connection.PipeEventCollector; -import org.apache.iotdb.db.pipe.task.subtask.processor.PipeProcessorSubtask; +import org.apache.iotdb.db.pipe.agent.task.connection.PipeEventCollector; +import org.apache.iotdb.db.pipe.agent.task.execution.PipeProcessorSubtaskExecutor; +import org.apache.iotdb.db.pipe.agent.task.subtask.processor.PipeProcessorSubtask; import org.apache.iotdb.db.storageengine.StorageEngine; import org.apache.iotdb.db.storageengine.dataregion.DataRegion; import org.apache.iotdb.pipe.api.PipeConnector; @@ -68,7 +68,8 @@ public PipeTaskProcessorStage( final UnboundedBlockingPendingQueue pipeConnectorOutputPendingQueue, final PipeProcessorSubtaskExecutor executor, final PipeTaskMeta pipeTaskMeta, - final boolean forceTabletFormat) { + final boolean forceTabletFormat, + final boolean skipParsing) { final PipeProcessorRuntimeConfiguration runtimeConfiguration = new PipeTaskRuntimeConfiguration( new PipeTaskProcessorRuntimeEnvironment( @@ -100,12 +101,16 @@ public PipeTaskProcessorStage( final String taskId = pipeName + "_" + regionId + "_" + creationTime; final PipeEventCollector pipeConnectorOutputEventCollector = new PipeEventCollector( - pipeConnectorOutputPendingQueue, creationTime, regionId, forceTabletFormat); + pipeConnectorOutputPendingQueue, + creationTime, + regionId, + forceTabletFormat, + skipParsing); this.pipeProcessorSubtask = new PipeProcessorSubtask( taskId, - creationTime, pipeName, + creationTime, regionId, pipeExtractorInputEventSupplier, pipeProcessor, diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/stage/PipeTaskConnectorStage.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/stage/PipeTaskSinkStage.java similarity index 66% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/stage/PipeTaskConnectorStage.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/stage/PipeTaskSinkStage.java index a5ec4fed78bb1..c24db53e6106a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/stage/PipeTaskConnectorStage.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/stage/PipeTaskSinkStage.java @@ -17,33 +17,35 @@ * under the License. */ -package org.apache.iotdb.db.pipe.task.stage; +package org.apache.iotdb.db.pipe.agent.task.stage; -import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskConnectorRuntimeEnvironment; -import org.apache.iotdb.commons.pipe.task.connection.UnboundedBlockingPendingQueue; -import org.apache.iotdb.commons.pipe.task.stage.PipeTaskStage; -import org.apache.iotdb.db.pipe.execution.PipeConnectorSubtaskExecutor; -import org.apache.iotdb.db.pipe.task.subtask.connector.PipeConnectorSubtaskManager; +import org.apache.iotdb.commons.pipe.agent.task.connection.UnboundedBlockingPendingQueue; +import org.apache.iotdb.commons.pipe.agent.task.stage.PipeTaskStage; +import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskSinkRuntimeEnvironment; +import org.apache.iotdb.db.pipe.agent.task.execution.PipeSinkSubtaskExecutor; +import org.apache.iotdb.db.pipe.agent.task.subtask.sink.PipeSinkSubtaskManager; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; import org.apache.iotdb.pipe.api.event.Event; import org.apache.iotdb.pipe.api.exception.PipeException; -public class PipeTaskConnectorStage extends PipeTaskStage { +import java.util.function.Supplier; + +public class PipeTaskSinkStage extends PipeTaskStage { protected final String pipeName; protected final long creationTime; protected final PipeParameters pipeConnectorParameters; protected final int regionId; - protected final PipeConnectorSubtaskExecutor executor; + protected final Supplier executor; protected String connectorSubtaskId; - public PipeTaskConnectorStage( + public PipeTaskSinkStage( String pipeName, long creationTime, PipeParameters pipeConnectorParameters, int regionId, - PipeConnectorSubtaskExecutor executor) { + Supplier executor) { this.pipeName = pipeName; this.creationTime = creationTime; this.pipeConnectorParameters = pipeConnectorParameters; @@ -55,11 +57,11 @@ public PipeTaskConnectorStage( protected void registerSubtask() { this.connectorSubtaskId = - PipeConnectorSubtaskManager.instance() + PipeSinkSubtaskManager.instance() .register( executor, pipeConnectorParameters, - new PipeTaskConnectorRuntimeEnvironment(pipeName, creationTime, regionId)); + new PipeTaskSinkRuntimeEnvironment(pipeName, creationTime, regionId)); } @Override @@ -69,21 +71,21 @@ public void createSubtask() throws PipeException { @Override public void startSubtask() throws PipeException { - PipeConnectorSubtaskManager.instance().start(connectorSubtaskId); + PipeSinkSubtaskManager.instance().start(connectorSubtaskId); } @Override public void stopSubtask() throws PipeException { - PipeConnectorSubtaskManager.instance().stop(connectorSubtaskId); + PipeSinkSubtaskManager.instance().stop(connectorSubtaskId); } @Override public void dropSubtask() throws PipeException { - PipeConnectorSubtaskManager.instance() + PipeSinkSubtaskManager.instance() .deregister(pipeName, creationTime, regionId, connectorSubtaskId); } public UnboundedBlockingPendingQueue getPipeConnectorPendingQueue() { - return PipeConnectorSubtaskManager.instance().getPipeConnectorPendingQueue(connectorSubtaskId); + return PipeSinkSubtaskManager.instance().getPipeConnectorPendingQueue(connectorSubtaskId); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/stage/PipeTaskExtractorStage.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/stage/PipeTaskSourceStage.java similarity index 85% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/stage/PipeTaskExtractorStage.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/stage/PipeTaskSourceStage.java index 32b8ca8f77543..dbc35aeb466f2 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/stage/PipeTaskExtractorStage.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/stage/PipeTaskSourceStage.java @@ -17,14 +17,14 @@ * under the License. */ -package org.apache.iotdb.db.pipe.task.stage; +package org.apache.iotdb.db.pipe.agent.task.stage; import org.apache.iotdb.commons.consensus.DataRegionId; +import org.apache.iotdb.commons.pipe.agent.task.connection.EventSupplier; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.agent.task.stage.PipeTaskStage; import org.apache.iotdb.commons.pipe.config.plugin.configuraion.PipeTaskRuntimeConfiguration; -import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskExtractorRuntimeEnvironment; -import org.apache.iotdb.commons.pipe.task.EventSupplier; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; -import org.apache.iotdb.commons.pipe.task.stage.PipeTaskStage; +import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskSourceRuntimeEnvironment; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; import org.apache.iotdb.db.storageengine.StorageEngine; import org.apache.iotdb.pipe.api.PipeExtractor; @@ -35,13 +35,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class PipeTaskExtractorStage extends PipeTaskStage { +public class PipeTaskSourceStage extends PipeTaskStage { - private static final Logger LOGGER = LoggerFactory.getLogger(PipeTaskExtractorStage.class); + private static final Logger LOGGER = LoggerFactory.getLogger(PipeTaskSourceStage.class); private final PipeExtractor pipeExtractor; - public PipeTaskExtractorStage( + public PipeTaskSourceStage( String pipeName, long creationTime, PipeParameters extractorParameters, @@ -61,8 +61,7 @@ public PipeTaskExtractorStage( // 2. Customize extractor final PipeTaskRuntimeConfiguration runtimeConfiguration = new PipeTaskRuntimeConfiguration( - new PipeTaskExtractorRuntimeEnvironment( - pipeName, creationTime, regionId, pipeTaskMeta)); + new PipeTaskSourceRuntimeEnvironment(pipeName, creationTime, regionId, pipeTaskMeta)); pipeExtractor.customize(extractorParameters, runtimeConfiguration); } catch (Exception e) { try { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/processor/PipeProcessorSubtask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/processor/PipeProcessorSubtask.java similarity index 87% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/processor/PipeProcessorSubtask.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/processor/PipeProcessorSubtask.java index 812b47a978ac0..d39feb96fe098 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/processor/PipeProcessorSubtask.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/processor/PipeProcessorSubtask.java @@ -17,23 +17,23 @@ * under the License. */ -package org.apache.iotdb.db.pipe.task.subtask.processor; +package org.apache.iotdb.db.pipe.agent.task.subtask.processor; import org.apache.iotdb.commons.consensus.DataRegionId; import org.apache.iotdb.commons.exception.pipe.PipeRuntimeException; import org.apache.iotdb.commons.exception.pipe.PipeRuntimeOutOfMemoryCriticalException; +import org.apache.iotdb.commons.pipe.agent.task.connection.EventSupplier; +import org.apache.iotdb.commons.pipe.agent.task.execution.PipeSubtaskScheduler; +import org.apache.iotdb.commons.pipe.agent.task.progress.PipeEventCommitManager; +import org.apache.iotdb.commons.pipe.agent.task.subtask.PipeReportableSubtask; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.commons.pipe.execution.scheduler.PipeSubtaskScheduler; -import org.apache.iotdb.commons.pipe.progress.PipeEventCommitManager; -import org.apache.iotdb.commons.pipe.task.EventSupplier; -import org.apache.iotdb.commons.pipe.task.subtask.PipeReportableSubtask; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; +import org.apache.iotdb.db.pipe.agent.task.connection.PipeEventCollector; import org.apache.iotdb.db.pipe.event.UserDefinedEnrichedEvent; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; -import org.apache.iotdb.db.pipe.metric.PipeDataNodeRemainingEventAndTimeMetrics; -import org.apache.iotdb.db.pipe.metric.PipeProcessorMetrics; +import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics; +import org.apache.iotdb.db.pipe.metric.processor.PipeProcessorMetrics; import org.apache.iotdb.db.pipe.processor.pipeconsensus.PipeConsensusProcessor; -import org.apache.iotdb.db.pipe.task.connection.PipeEventCollector; import org.apache.iotdb.db.storageengine.StorageEngine; import org.apache.iotdb.db.utils.ErrorHandlingUtils; import org.apache.iotdb.pipe.api.PipeProcessor; @@ -57,38 +57,39 @@ public class PipeProcessorSubtask extends PipeReportableSubtask { private static final AtomicReference subtaskWorkerManager = new AtomicReference<>(); - private final EventSupplier inputEventSupplier; - private final PipeProcessor pipeProcessor; - private final PipeEventCollector outputEventCollector; - // Record these variables to provide corresponding value to tag key of monitoring metrics private final String pipeName; + private final String pipeNameWithCreationTime; // cache for better performance private final int regionId; + private final EventSupplier inputEventSupplier; + private final PipeProcessor pipeProcessor; + private final PipeEventCollector outputEventCollector; + // This variable is used to distinguish between old and new subtasks before and after stuck // restart. private final long subtaskCreationTime; public PipeProcessorSubtask( final String taskID, - final long creationTime, final String pipeName, + final long creationTime, final int regionId, final EventSupplier inputEventSupplier, final PipeProcessor pipeProcessor, final PipeEventCollector outputEventCollector) { super(taskID, creationTime); - this.subtaskCreationTime = System.currentTimeMillis(); this.pipeName = pipeName; + this.pipeNameWithCreationTime = pipeName + "_" + creationTime; this.regionId = regionId; this.inputEventSupplier = inputEventSupplier; this.pipeProcessor = pipeProcessor; this.outputEventCollector = outputEventCollector; + this.subtaskCreationTime = System.currentTimeMillis(); // Only register dataRegions if (StorageEngine.getInstance().getAllDataRegionIds().contains(new DataRegionId(regionId))) { PipeProcessorMetrics.getInstance().register(this); - PipeDataNodeRemainingEventAndTimeMetrics.getInstance().register(this); } } @@ -129,20 +130,19 @@ protected boolean executeOnce() throws Exception { return false; } - outputEventCollector.resetCollectInvocationCountAndGenerateFlag(); + outputEventCollector.resetFlags(); try { // event can be supplied after the subtask is closed, so we need to check isClosed here if (!isClosed.get()) { if (event instanceof TabletInsertionEvent) { pipeProcessor.process((TabletInsertionEvent) event, outputEventCollector); PipeProcessorMetrics.getInstance().markTabletEvent(taskID); - PipeDataNodeRemainingEventAndTimeMetrics.getInstance() - .markCollectInvocationCount(taskID, outputEventCollector.getCollectInvocationCount()); } else if (event instanceof TsFileInsertionEvent) { pipeProcessor.process((TsFileInsertionEvent) event, outputEventCollector); PipeProcessorMetrics.getInstance().markTsFileEvent(taskID); - PipeDataNodeRemainingEventAndTimeMetrics.getInstance() - .markCollectInvocationCount(taskID, outputEventCollector.getCollectInvocationCount()); + PipeDataNodeSinglePipeMetrics.getInstance() + .markTsFileCollectInvocationCount( + pipeNameWithCreationTime, outputEventCollector.getCollectInvocationCount()); } else if (event instanceof PipeHeartbeatEvent) { pipeProcessor.process(event, outputEventCollector); ((PipeHeartbeatEvent) event).onProcessed(); @@ -168,6 +168,9 @@ protected boolean executeOnce() throws Exception { // of the event must be zero in the processor stage, at this time, the progress of the // event needs to be reported. && outputEventCollector.hasNoGeneratedEvent() + // If the event's reference count cannot be increased, it means that the event has + // been released, and the progress of the event can not be reported. + && !outputEventCollector.isFailedToIncreaseReferenceCount() // Events generated from consensusPipe's transferred data should never be reported. && !(pipeProcessor instanceof PipeConsensusProcessor); if (shouldReport @@ -182,7 +185,7 @@ protected boolean executeOnce() throws Exception { PipeEventCommitManager.getInstance() .enrichWithCommitterKeyAndCommitId((EnrichedEvent) event, creationTime, regionId); } - decreaseReferenceCountAndReleaseLastEvent(shouldReport); + decreaseReferenceCountAndReleaseLastEvent(event, shouldReport); } catch (final PipeRuntimeOutOfMemoryCriticalException e) { LOGGER.info( "Temporarily out of memory in pipe event processing, will wait for the memory to release.", @@ -201,7 +204,7 @@ protected boolean executeOnce() throws Exception { e); } else { LOGGER.info("Exception in pipe event processing, ignored because pipe is dropped.", e); - clearReferenceCountAndReleaseLastEvent(); + clearReferenceCountAndReleaseLastEvent(event); } } @@ -225,10 +228,9 @@ public void close() { PipeProcessorMetrics.getInstance().deregister(taskID); try { isClosed.set(true); - - // pipeProcessor closes first, then no more events will be added into outputEventCollector. - // only after that, outputEventCollector can be closed. pipeProcessor.close(); + // It is important to note that even if the subtask and its corresponding processor are + // closed, the execution thread may still deliver events downstream. } catch (final Exception e) { LOGGER.info( "Exception occurred when closing pipe processor subtask {}, root cause: {}", diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/processor/PipeProcessorSubtaskWorker.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/processor/PipeProcessorSubtaskWorker.java similarity index 89% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/processor/PipeProcessorSubtaskWorker.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/processor/PipeProcessorSubtaskWorker.java index 684fa5ebb0c9a..b9584d2c586b3 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/processor/PipeProcessorSubtaskWorker.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/processor/PipeProcessorSubtaskWorker.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.pipe.task.subtask.processor; +package org.apache.iotdb.db.pipe.agent.task.subtask.processor; import org.apache.iotdb.commons.concurrent.WrappedRunnable; @@ -27,15 +27,11 @@ import java.util.Collections; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; -import java.util.stream.Collectors; public class PipeProcessorSubtaskWorker extends WrappedRunnable { private static final Logger LOGGER = LoggerFactory.getLogger(PipeProcessorSubtaskWorker.class); - private static final long CLOSED_SUBTASK_CLEANUP_ROUND_INTERVAL = 1000; - private long closedSubtaskCleanupRoundCounter = 0; - private static final int SLEEP_INTERVAL_ADJUSTMENT_ROUND_INTERVAL = 100; private int totalRoundInAdjustmentInterval = 0; private int workingRoundInAdjustmentInterval = 0; @@ -56,12 +52,7 @@ public void runMayThrow() { } private void cleanupClosedSubtasksIfNecessary() { - if (++closedSubtaskCleanupRoundCounter % CLOSED_SUBTASK_CLEANUP_ROUND_INTERVAL == 0) { - subtasks.stream() - .filter(PipeProcessorSubtask::isClosed) - .collect(Collectors.toList()) - .forEach(subtasks::remove); - } + subtasks.removeIf(PipeProcessorSubtask::isClosed); } private boolean runSubtasks() { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/processor/PipeProcessorSubtaskWorkerManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/processor/PipeProcessorSubtaskWorkerManager.java similarity index 96% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/processor/PipeProcessorSubtaskWorkerManager.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/processor/PipeProcessorSubtaskWorkerManager.java index a7c0736332126..33d58c4b5d491 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/processor/PipeProcessorSubtaskWorkerManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/processor/PipeProcessorSubtaskWorkerManager.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.pipe.task.subtask.processor; +package org.apache.iotdb.db.pipe.agent.task.subtask.processor; import org.apache.iotdb.commons.pipe.config.PipeConfig; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeRealtimePriorityBlockingQueue.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeRealtimePriorityBlockingQueue.java new file mode 100644 index 0000000000000..2baebeedc180e --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeRealtimePriorityBlockingQueue.java @@ -0,0 +1,399 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.agent.task.subtask.sink; + +import org.apache.iotdb.commons.pipe.agent.task.connection.BlockingPendingQueue; +import org.apache.iotdb.commons.pipe.agent.task.connection.UnboundedBlockingPendingQueue; +import org.apache.iotdb.commons.pipe.agent.task.progress.CommitterKey; +import org.apache.iotdb.commons.pipe.config.PipeConfig; +import org.apache.iotdb.commons.pipe.event.EnrichedEvent; +import org.apache.iotdb.db.pipe.agent.task.connection.PipeEventCollector; +import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; +import org.apache.iotdb.db.pipe.event.common.tsfile.PipeCompactedTsFileInsertionEvent; +import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; +import org.apache.iotdb.db.pipe.metric.source.PipeDataRegionEventCounter; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.pipe.api.event.Event; +import org.apache.iotdb.pipe.api.event.dml.insertion.TsFileInsertionEvent; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.BlockingDeque; +import java.util.concurrent.LinkedBlockingDeque; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Consumer; +import java.util.stream.Collectors; + +public class PipeRealtimePriorityBlockingQueue extends UnboundedBlockingPendingQueue { + + private static final Logger LOGGER = + LoggerFactory.getLogger(PipeRealtimePriorityBlockingQueue.class); + + private static final PipeConfig PIPE_CONFIG = PipeConfig.getInstance(); + + private final BlockingDeque tsfileInsertEventDeque = + new LinkedBlockingDeque<>(); + + private final AtomicInteger pollTsFileCounter = new AtomicInteger(0); + + private final AtomicLong pollHistoricalTsFileCounter = new AtomicLong(0); + + // Need to ensure that NPE does not occur + private AtomicInteger offerTsFileCounter = new AtomicInteger(0); + + public PipeRealtimePriorityBlockingQueue() { + super(new PipeDataRegionEventCounter()); + } + + @Override + public boolean directOffer(final Event event) { + checkBeforeOffer(event); + + if (event instanceof TsFileInsertionEvent) { + tsfileInsertEventDeque.add((TsFileInsertionEvent) event); + return true; + } + + if (event instanceof PipeHeartbeatEvent && super.peekLast() instanceof PipeHeartbeatEvent) { + // We can NOT keep too many PipeHeartbeatEvent in bufferQueue because they may cause OOM. + ((EnrichedEvent) event).decreaseReferenceCount(PipeEventCollector.class.getName(), false); + return false; + } else { + return super.directOffer(event); + } + } + + @Override + public boolean waitedOffer(final Event event) { + return directOffer(event); + } + + @Override + public boolean put(final Event event) { + directOffer(event); + return true; + } + + @Override + public Event directPoll() { + Event event = null; + final int pollHistoricalTsFileThreshold = + PIPE_CONFIG.getPipeRealTimeQueuePollHistoricalTsFileThreshold(); + final int realTimeQueueMaxWaitingTsFileSize = + PIPE_CONFIG.getPipeRealTimeQueueMaxWaitingTsFileSize(); + + if (pollTsFileCounter.get() >= PIPE_CONFIG.getPipeRealTimeQueuePollTsFileThreshold() + && offerTsFileCounter.get() < realTimeQueueMaxWaitingTsFileSize) { + event = + pollHistoricalTsFileCounter.incrementAndGet() % pollHistoricalTsFileThreshold == 0 + ? tsfileInsertEventDeque.pollFirst() + : tsfileInsertEventDeque.pollLast(); + pollTsFileCounter.set(0); + } + + if (Objects.isNull(event)) { + // Sequentially poll the first offered non-TsFileInsertionEvent + event = super.directPoll(); + if (Objects.isNull(event) && offerTsFileCounter.get() < realTimeQueueMaxWaitingTsFileSize) { + event = + pollHistoricalTsFileCounter.incrementAndGet() % pollHistoricalTsFileThreshold == 0 + ? tsfileInsertEventDeque.pollFirst() + : tsfileInsertEventDeque.pollLast(); + } + if (event != null) { + pollTsFileCounter.incrementAndGet(); + } + } + + return event; + } + + /** + * When the number of polls exceeds the pollHistoryThreshold, the {@link TsFileInsertionEvent} of + * the earliest write to the queue is returned. if the pollHistoryThreshold is not reached then an + * attempt is made to poll the queue for the latest insertion {@link Event}. First, it tries to + * poll the first provided If there is no such {@link Event}, poll the last supplied {@link + * TsFileInsertionEvent}. If no {@link Event} is available, it blocks until a {@link Event} is + * available. + * + * @return the freshest insertion {@link Event}. can be {@code null} if no {@link Event} is + * available. + */ + @Override + public Event waitedPoll() { + Event event = null; + final int pollHistoricalTsFileThreshold = + PIPE_CONFIG.getPipeRealTimeQueuePollHistoricalTsFileThreshold(); + final int realTimeQueueMaxWaitingTsFileSize = + PIPE_CONFIG.getPipeRealTimeQueueMaxWaitingTsFileSize(); + + if (pollTsFileCounter.get() >= PIPE_CONFIG.getPipeRealTimeQueuePollTsFileThreshold() + && offerTsFileCounter.get() < realTimeQueueMaxWaitingTsFileSize) { + event = + pollHistoricalTsFileCounter.incrementAndGet() % pollHistoricalTsFileThreshold == 0 + ? tsfileInsertEventDeque.pollFirst() + : tsfileInsertEventDeque.pollLast(); + pollTsFileCounter.set(0); + } + if (event == null) { + // Sequentially poll the first offered non-TsFileInsertionEvent + event = super.directPoll(); + if (event == null && !tsfileInsertEventDeque.isEmpty()) { + event = + pollHistoricalTsFileCounter.incrementAndGet() % pollHistoricalTsFileThreshold == 0 + ? tsfileInsertEventDeque.pollFirst() + : tsfileInsertEventDeque.pollLast(); + } + if (event != null) { + pollTsFileCounter.incrementAndGet(); + } + } + + // If no event is available, block until an event is available + if (Objects.isNull(event) && offerTsFileCounter.get() < realTimeQueueMaxWaitingTsFileSize) { + event = super.waitedPoll(); + if (Objects.isNull(event)) { + event = + pollHistoricalTsFileCounter.incrementAndGet() % pollHistoricalTsFileThreshold == 0 + ? tsfileInsertEventDeque.pollFirst() + : tsfileInsertEventDeque.pollLast(); + } + if (event != null) { + pollTsFileCounter.incrementAndGet(); + } + } + + return event; + } + + @Override + public Event peek() { + final Event event = pendingQueue.peek(); + if (Objects.nonNull(event)) { + return event; + } + return tsfileInsertEventDeque.peek(); + } + + public synchronized void replace( + String dataRegionId, Set sourceFiles, List targetFiles) { + + final int regionId = Integer.parseInt(dataRegionId); + final Map> eventsToBeRemovedGroupByCommitterKey = + tsfileInsertEventDeque.stream() + .filter( + event -> + event instanceof PipeTsFileInsertionEvent + && ((PipeTsFileInsertionEvent) event).getRegionId() == regionId) + .map(event -> (PipeTsFileInsertionEvent) event) + .collect( + Collectors.groupingBy( + PipeTsFileInsertionEvent::getCommitterKey, Collectors.toSet())) + .entrySet() + .stream() + // Replace if all source files are present in the queue + .filter(entry -> entry.getValue().size() == sourceFiles.size()) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + if (eventsToBeRemovedGroupByCommitterKey.isEmpty()) { + LOGGER.info( + "Region {}: No TsFileInsertionEvents to replace for source files {}", + regionId, + sourceFiles.stream() + .map(TsFileResource::getTsFilePath) + .collect(Collectors.joining(", "))); + return; + } + + final Map> eventsToBeAddedGroupByCommitterKey = + new HashMap<>(); + for (final Map.Entry> entry : + eventsToBeRemovedGroupByCommitterKey.entrySet()) { + final CommitterKey committerKey = entry.getKey(); + final PipeTsFileInsertionEvent anyEvent = entry.getValue().stream().findFirst().orElse(null); + final Set newEvents = new HashSet<>(); + for (int i = 0; i < targetFiles.size(); i++) { + newEvents.add( + new PipeCompactedTsFileInsertionEvent( + committerKey, + entry.getValue(), + anyEvent, + targetFiles.get(i), + i == targetFiles.size() - 1)); + } + eventsToBeAddedGroupByCommitterKey.put(committerKey, newEvents); + } + + // Handling new events + final Set successfullyReferenceIncreasedEvents = new HashSet<>(); + final AtomicBoolean + allSuccess = // To track if all events successfully increased the reference count + new AtomicBoolean(true); + outerLoop: + for (final Map.Entry> committerKeySetEntry : + eventsToBeAddedGroupByCommitterKey.entrySet()) { + for (final PipeTsFileInsertionEvent event : committerKeySetEntry.getValue()) { + if (event != null) { + try { + if (!event.increaseReferenceCount(PipeRealtimePriorityBlockingQueue.class.getName())) { + allSuccess.set(false); + break outerLoop; + } else { + successfullyReferenceIncreasedEvents.add(event); + } + } catch (final Exception e) { + allSuccess.set(false); + break outerLoop; + } + } + } + } + if (!allSuccess.get()) { + // If any event failed to increase the reference count, + // we need to decrease the reference count for all successfully increased events + for (final PipeTsFileInsertionEvent event : successfullyReferenceIncreasedEvents) { + try { + event.decreaseReferenceCount(PipeRealtimePriorityBlockingQueue.class.getName(), false); + } catch (final Exception e) { + LOGGER.warn( + "Failed to decrease reference count for event {} in PipeRealtimePriorityBlockingQueue", + event, + e); + } + } + return; // Exit early if any event failed to increase the reference count + } else { + // If all events successfully increased reference count, + // we can proceed to add them to the deque + for (final PipeTsFileInsertionEvent event : successfullyReferenceIncreasedEvents) { + tsfileInsertEventDeque.add(event); + eventCounter.increaseEventCount(event); + } + } + + // Handling old events + for (final Map.Entry> entry : + eventsToBeRemovedGroupByCommitterKey.entrySet()) { + for (final PipeTsFileInsertionEvent event : entry.getValue()) { + if (event != null) { + try { + event.decreaseReferenceCount(PipeRealtimePriorityBlockingQueue.class.getName(), false); + } catch (final Exception e) { + LOGGER.warn( + "Failed to decrease reference count for event {} in PipeRealtimePriorityBlockingQueue", + event, + e); + } + eventCounter.decreaseEventCount(event); + } + } + } + final Set eventsToRemove = new HashSet<>(); + for (Set pipeTsFileInsertionEvents : + eventsToBeRemovedGroupByCommitterKey.values()) { + eventsToRemove.addAll(pipeTsFileInsertionEvents); + } + tsfileInsertEventDeque.removeIf(eventsToRemove::contains); + + LOGGER.info( + "Region {}: Replaced TsFileInsertionEvents {} with {}", + regionId, + eventsToBeRemovedGroupByCommitterKey.values().stream() + .flatMap(Set::stream) + .map(PipeTsFileInsertionEvent::coreReportMessage) + .collect(Collectors.joining(", ")), + eventsToBeAddedGroupByCommitterKey.values().stream() + .flatMap(Set::stream) + .map(PipeTsFileInsertionEvent::coreReportMessage) + .collect(Collectors.joining(", "))); + } + + @Override + public void clear() { + super.clear(); + tsfileInsertEventDeque.clear(); + } + + @Override + public void forEach(final Consumer action) { + super.forEach(action); + tsfileInsertEventDeque.forEach(action); + } + + @Override + public void discardAllEvents() { + super.discardAllEvents(); + tsfileInsertEventDeque.removeIf( + event -> { + if (event instanceof EnrichedEvent) { + if (((EnrichedEvent) event).clearReferenceCount(BlockingPendingQueue.class.getName())) { + eventCounter.decreaseEventCount(event); + } + } + return true; + }); + eventCounter.reset(); + } + + @Override + public void discardEventsOfPipe(final String pipeNameToDrop, final int regionId) { + super.discardEventsOfPipe(pipeNameToDrop, regionId); + tsfileInsertEventDeque.removeIf( + event -> { + if (event instanceof EnrichedEvent + && pipeNameToDrop.equals(((EnrichedEvent) event).getPipeName()) + && regionId == ((EnrichedEvent) event).getRegionId()) { + if (((EnrichedEvent) event) + .clearReferenceCount(PipeRealtimePriorityBlockingQueue.class.getName())) { + eventCounter.decreaseEventCount(event); + } + return true; + } + return false; + }); + } + + @Override + public boolean isEmpty() { + return super.isEmpty() && tsfileInsertEventDeque.isEmpty(); + } + + @Override + public int size() { + return super.size() + tsfileInsertEventDeque.size(); + } + + @Override + public int getTsFileInsertionEventCount() { + return tsfileInsertEventDeque.size(); + } + + public synchronized void setOfferTsFileCounter(AtomicInteger offerTsFileCounter) { + this.offerTsFileCounter = offerTsFileCounter; + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/connector/PipeConnectorSubtask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeSinkSubtask.java similarity index 52% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/connector/PipeConnectorSubtask.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeSinkSubtask.java index cc78ebdea54cc..acfa13c68c5f6 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/connector/PipeConnectorSubtask.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeSinkSubtask.java @@ -17,23 +17,24 @@ * under the License. */ -package org.apache.iotdb.db.pipe.task.subtask.connector; +package org.apache.iotdb.db.pipe.agent.task.subtask.sink; import org.apache.iotdb.commons.exception.pipe.PipeRuntimeException; -import org.apache.iotdb.commons.pipe.config.PipeConfig; +import org.apache.iotdb.commons.pipe.agent.task.connection.UnboundedBlockingPendingQueue; +import org.apache.iotdb.commons.pipe.agent.task.subtask.PipeAbstractSinkSubtask; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.commons.pipe.task.connection.UnboundedBlockingPendingQueue; -import org.apache.iotdb.commons.pipe.task.subtask.PipeAbstractConnectorSubtask; +import org.apache.iotdb.commons.pipe.sink.protocol.IoTDBSink; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; -import org.apache.iotdb.db.pipe.connector.protocol.thrift.async.IoTDBDataRegionAsyncConnector; import org.apache.iotdb.db.pipe.event.UserDefinedEnrichedEvent; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent; -import org.apache.iotdb.db.pipe.metric.PipeDataRegionConnectorMetrics; -import org.apache.iotdb.db.pipe.metric.PipeSchemaRegionConnectorMetrics; -import org.apache.iotdb.db.pipe.task.connection.PipeEventCollector; +import org.apache.iotdb.db.pipe.metric.schema.PipeSchemaRegionSinkMetrics; +import org.apache.iotdb.db.pipe.metric.sink.PipeDataRegionSinkMetrics; +import org.apache.iotdb.db.pipe.sink.protocol.thrift.async.IoTDBDataRegionAsyncSink; +import org.apache.iotdb.db.pipe.sink.protocol.thrift.sync.IoTDBDataRegionSyncSink; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeType; import org.apache.iotdb.db.utils.ErrorHandlingUtils; +import org.apache.iotdb.metrics.type.Histogram; import org.apache.iotdb.pipe.api.PipeConnector; import org.apache.iotdb.pipe.api.event.Event; import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; @@ -45,11 +46,10 @@ import org.slf4j.LoggerFactory; import java.util.Objects; -import java.util.concurrent.atomic.AtomicInteger; -public class PipeConnectorSubtask extends PipeAbstractConnectorSubtask { +public class PipeSinkSubtask extends PipeAbstractSinkSubtask { - private static final Logger LOGGER = LoggerFactory.getLogger(PipeConnectorSubtask.class); + private static final Logger LOGGER = LoggerFactory.getLogger(PipeSinkSubtask.class); // For input protected final UnboundedBlockingPendingQueue inputPendingQueue; @@ -64,11 +64,8 @@ public class PipeConnectorSubtask extends PipeAbstractConnectorSubtask { // when no event can be pulled. public static final PipeHeartbeatEvent CRON_HEARTBEAT_EVENT = new PipeHeartbeatEvent("cron", false); - private static final long CRON_HEARTBEAT_EVENT_INJECT_INTERVAL_MILLISECONDS = - PipeConfig.getInstance().getPipeSubtaskExecutorCronHeartbeatEventIntervalSeconds() * 1000; - private long lastHeartbeatEventInjectTime = System.currentTimeMillis(); - public PipeConnectorSubtask( + public PipeSinkSubtask( final String taskID, final long creationTime, final String attributeSortedString, @@ -81,9 +78,9 @@ public PipeConnectorSubtask( this.inputPendingQueue = inputPendingQueue; if (!attributeSortedString.startsWith("schema_")) { - PipeDataRegionConnectorMetrics.getInstance().register(this); + PipeDataRegionSinkMetrics.getInstance().register(this); } else { - PipeSchemaRegionConnectorMetrics.getInstance().register(this); + PipeSchemaRegionSinkMetrics.getInstance().register(this); } } @@ -99,29 +96,30 @@ protected boolean executeOnce() { : UserDefinedEnrichedEvent.maybeOf(inputPendingQueue.waitedPoll()); // Record this event for retrying on connection failure or other exceptions setLastEvent(event); + if (event instanceof EnrichedEvent && ((EnrichedEvent) event).isReleased()) { + lastEvent = null; + return true; + } try { - if (event == null) { - if (System.currentTimeMillis() - lastHeartbeatEventInjectTime - > CRON_HEARTBEAT_EVENT_INJECT_INTERVAL_MILLISECONDS) { - transferHeartbeatEvent(CRON_HEARTBEAT_EVENT); - } + if (Objects.isNull(event)) { + transferHeartbeatEvent(CRON_HEARTBEAT_EVENT); return false; } if (event instanceof TabletInsertionEvent) { outputPipeConnector.transfer((TabletInsertionEvent) event); - PipeDataRegionConnectorMetrics.getInstance().markTabletEvent(taskID); + PipeDataRegionSinkMetrics.getInstance().markTabletEvent(taskID); } else if (event instanceof TsFileInsertionEvent) { outputPipeConnector.transfer((TsFileInsertionEvent) event); - PipeDataRegionConnectorMetrics.getInstance().markTsFileEvent(taskID); + PipeDataRegionSinkMetrics.getInstance().markTsFileEvent(taskID); } else if (event instanceof PipeSchemaRegionWritePlanEvent) { outputPipeConnector.transfer(event); if (((PipeSchemaRegionWritePlanEvent) event).getPlanNode().getType() != PlanNodeType.DELETE_DATA) { // Only plan nodes in schema region will be marked, delete data node is currently not // taken into account - PipeSchemaRegionConnectorMetrics.getInstance().markSchemaEvent(taskID); + PipeSchemaRegionSinkMetrics.getInstance().markSchemaEvent(taskID); } } else if (event instanceof PipeHeartbeatEvent) { transferHeartbeatEvent((PipeHeartbeatEvent) event); @@ -132,7 +130,7 @@ protected boolean executeOnce() { : event); } - decreaseReferenceCountAndReleaseLastEvent(true); + decreaseReferenceCountAndReleaseLastEvent(event, true); } catch (final PipeException e) { if (!isClosed.get()) { setLastExceptionEvent(event); @@ -142,7 +140,7 @@ protected boolean executeOnce() { "{} in pipe transfer, ignored because the connector subtask is dropped.", e.getClass().getSimpleName(), e); - clearReferenceCountAndReleaseLastEvent(); + clearReferenceCountAndReleaseLastEvent(event); } } catch (final Exception e) { if (!isClosed.get()) { @@ -159,7 +157,7 @@ protected boolean executeOnce() { } else { LOGGER.info( "Exception in pipe transfer, ignored because the connector subtask is dropped.", e); - clearReferenceCountAndReleaseLastEvent(); + clearReferenceCountAndReleaseLastEvent(event); } } @@ -167,6 +165,11 @@ protected boolean executeOnce() { } private void transferHeartbeatEvent(final PipeHeartbeatEvent event) { + // DO NOT call heartbeat or transfer after closed, or will cause connection leak + if (isClosed.get()) { + return; + } + try { outputPipeConnector.heartbeat(); outputPipeConnector.transfer(event); @@ -174,28 +177,35 @@ private void transferHeartbeatEvent(final PipeHeartbeatEvent event) { throw new PipeConnectionException( "PipeConnector: " + outputPipeConnector.getClass().getName() + + "(id: " + + taskID + + ")" + " heartbeat failed, or encountered failure when transferring generic event. Failure: " + e.getMessage(), e); } - lastHeartbeatEventInjectTime = System.currentTimeMillis(); - event.onTransferred(); - PipeDataRegionConnectorMetrics.getInstance().markPipeHeartbeatEvent(taskID); + PipeDataRegionSinkMetrics.getInstance().markPipeHeartbeatEvent(taskID); } @Override public void close() { if (!attributeSortedString.startsWith("schema_")) { - PipeDataRegionConnectorMetrics.getInstance().deregister(taskID); + PipeDataRegionSinkMetrics.getInstance().deregister(taskID); } else { - PipeSchemaRegionConnectorMetrics.getInstance().deregister(taskID); + PipeSchemaRegionSinkMetrics.getInstance().deregister(taskID); } isClosed.set(true); try { + final long startTime = System.currentTimeMillis(); outputPipeConnector.close(); + LOGGER.info( + "Pipe: connector subtask {} ({}) was closed within {} ms", + taskID, + outputPipeConnector, + System.currentTimeMillis() - startTime); } catch (final Exception e) { LOGGER.info( "Exception occurred when closing pipe connector subtask {}, root cause: {}", @@ -203,13 +213,7 @@ public void close() { ErrorHandlingUtils.getRootCause(e).getMessage(), e); } finally { - inputPendingQueue.forEach( - event -> { - if (event instanceof EnrichedEvent) { - ((EnrichedEvent) event).clearReferenceCount(PipeEventCollector.class.getName()); - } - }); - inputPendingQueue.clear(); + inputPendingQueue.discardAllEvents(); // Should be called after outputPipeConnector.close() super.close(); @@ -220,56 +224,55 @@ public void close() { * When a pipe is dropped, the connector maybe reused and will not be closed. So we just discard * its queued events in the output pipe connector. */ - public void discardEventsOfPipe(final String pipeNameToDrop) { + public void discardEventsOfPipe(final String pipeNameToDrop, int regionId) { // Try to remove the events as much as possible - inputPendingQueue.removeIf( - event -> { - if (event instanceof EnrichedEvent - && pipeNameToDrop.equals(((EnrichedEvent) event).getPipeName())) { - ((EnrichedEvent) event) - .clearReferenceCount(IoTDBDataRegionAsyncConnector.class.getName()); - return true; - } - return false; - }); - - // synchronized to use the lastEvent and lastExceptionEvent - synchronized (this) { - // Here we discard the last event, and re-submit the pipe task to avoid that the pipe task has - // stopped submission but will not be stopped by critical exceptions, because when it acquires - // lock, the pipe is already dropped, thus it will do nothing. - // Note that since we use a new thread to stop all the pipes, we will not encounter deadlock - // here. Or else we will. - if (lastEvent instanceof EnrichedEvent - && pipeNameToDrop.equals(((EnrichedEvent) lastEvent).getPipeName())) { - // Do not clear last event's reference count because it may be on transferring - lastEvent = null; - // Submit self to avoid that the lastEvent has been retried "max times" times and has - // stopped executing. - // 1. If the last event is still on execution, or submitted by the previous "onSuccess" or - // "onFailure", the "submitSelf" cause nothing. - // 2. If the last event is waiting the instance lock to call "onSuccess", then the callback - // method will skip this turn of submission. - // 3. If the last event is waiting to call "onFailure", then it will be ignored because the - // last event has been set to null. - // 4. If the last event has called "onFailure" and caused the subtask to stop submission, - // it's submitted here and the "report" will wait for the "drop pipe" lock to stop all - // the pipes with critical exceptions. As illustrated above, the "report" will do - // nothing. - submitSelf(); - } + inputPendingQueue.discardEventsOfPipe(pipeNameToDrop, regionId); + + try { + increaseHighPriorityTaskCount(); + + // synchronized to use the lastEvent & lastExceptionEvent + synchronized (this) { + // Here we discard the last event, and re-submit the pipe task to avoid that the pipe task + // has stopped submission but will not be stopped by critical exceptions, because when it + // acquires lock, the pipe is already dropped, thus it will do nothing. Note that since we + // use a new thread to stop all the pipes, we will not encounter deadlock here. Or else we + // will. + if (lastEvent instanceof EnrichedEvent + && pipeNameToDrop.equals(((EnrichedEvent) lastEvent).getPipeName()) + && regionId == ((EnrichedEvent) lastEvent).getRegionId()) { + // Do not clear the last event's reference counts because it may be on transferring + lastEvent = null; + // Submit self to avoid that the lastEvent has been retried "max times" times and has + // stopped executing. + // 1. If the last event is still on execution, or submitted by the previous "onSuccess" or + // "onFailure", the "submitSelf" causes nothing. + // 2. If the last event is waiting the instance lock to call "onSuccess", then the + // callback method will skip this turn of submission. + // 3. If the last event is waiting to call "onFailure", then it will be ignored because + // the last event has been set to null. + // 4. If the last event has called "onFailure" and caused the subtask to stop submission, + // it's submitted here and the "report" will wait for the "drop pipe" lock to stop all + // the pipes with critical exceptions. As illustrated above, the "report" will do + // nothing. + submitSelf(); + } - // We only clear the lastEvent's reference count when it's already on failure. Namely, we - // clear the lastExceptionEvent. It's safe to potentially clear it twice because we have the - // "nonnull" detection. - if (lastExceptionEvent instanceof EnrichedEvent - && pipeNameToDrop.equals(((EnrichedEvent) lastExceptionEvent).getPipeName())) { - clearReferenceCountAndReleaseLastExceptionEvent(); + // We only clear the lastEvent's reference counts when it's already on failure. Namely, we + // clear the lastExceptionEvent. It's safe to potentially clear it twice because we have the + // "nonnull" detection. + if (lastExceptionEvent instanceof EnrichedEvent + && pipeNameToDrop.equals(((EnrichedEvent) lastExceptionEvent).getPipeName()) + && regionId == ((EnrichedEvent) lastExceptionEvent).getRegionId()) { + clearReferenceCountAndReleaseLastExceptionEvent(); + } } + } finally { + decreaseHighPriorityTaskCount(); } - if (outputPipeConnector instanceof IoTDBDataRegionAsyncConnector) { - ((IoTDBDataRegionAsyncConnector) outputPipeConnector).discardEventsOfPipe(pipeNameToDrop); + if (outputPipeConnector instanceof IoTDBSink) { + ((IoTDBSink) outputPipeConnector).discardEventsOfPipe(pipeNameToDrop, regionId); } } @@ -299,40 +302,69 @@ public int getPipeHeartbeatEventCount() { } public int getAsyncConnectorRetryEventQueueSize() { - return outputPipeConnector instanceof IoTDBDataRegionAsyncConnector - ? ((IoTDBDataRegionAsyncConnector) outputPipeConnector).getRetryEventQueueSize() + return outputPipeConnector instanceof IoTDBDataRegionAsyncSink + ? ((IoTDBDataRegionAsyncSink) outputPipeConnector).getRetryEventQueueSize() : 0; } - // For performance, this will not acquire lock and does not guarantee the correct - // result. However, this shall not cause any exceptions when concurrently read & written. - public int getEventCount(final String pipeName) { - final AtomicInteger count = new AtomicInteger(0); - try { - inputPendingQueue.forEach( - event -> { - if (event instanceof EnrichedEvent - && pipeName.equals(((EnrichedEvent) event).getPipeName())) { - count.incrementAndGet(); - } - }); - } catch (final Exception e) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug( - "Exception occurred when counting event of pipe {}, root cause: {}", - pipeName, - ErrorHandlingUtils.getRootCause(e).getMessage(), - e); - } + public int getPendingHandlersSize() { + return outputPipeConnector instanceof IoTDBDataRegionAsyncSink + ? ((IoTDBDataRegionAsyncSink) outputPipeConnector).getPendingHandlersSize() + : 0; + } + + public int getBatchSize() { + if (outputPipeConnector instanceof IoTDBDataRegionAsyncSink) { + return ((IoTDBDataRegionAsyncSink) outputPipeConnector).getBatchSize(); + } + if (outputPipeConnector instanceof IoTDBDataRegionSyncSink) { + return ((IoTDBDataRegionSyncSink) outputPipeConnector).getBatchSize(); + } + return 0; + } + + public double getTotalUncompressedSize() { + return outputPipeConnector instanceof IoTDBSink + ? ((IoTDBSink) outputPipeConnector).getTotalUncompressedSize() + : 0; + } + + public double getTotalCompressedSize() { + return outputPipeConnector instanceof IoTDBSink + ? ((IoTDBSink) outputPipeConnector).getTotalCompressedSize() + : 0; + } + + public void setTabletBatchSizeHistogram(Histogram tabletBatchSizeHistogram) { + if (outputPipeConnector instanceof IoTDBSink) { + ((IoTDBSink) outputPipeConnector).setTabletBatchSizeHistogram(tabletBatchSizeHistogram); + } + } + + public void setTsFileBatchSizeHistogram(Histogram tsFileBatchSizeHistogram) { + if (outputPipeConnector instanceof IoTDBSink) { + ((IoTDBSink) outputPipeConnector).setTsFileBatchSizeHistogram(tsFileBatchSizeHistogram); + } + } + + public void setTabletBatchTimeIntervalHistogram(Histogram tabletBatchTimeIntervalHistogram) { + if (outputPipeConnector instanceof IoTDBSink) { + ((IoTDBSink) outputPipeConnector) + .setTabletBatchTimeIntervalHistogram(tabletBatchTimeIntervalHistogram); + } + } + + public void setTsFileBatchTimeIntervalHistogram(Histogram tsFileBatchTimeIntervalHistogram) { + if (outputPipeConnector instanceof IoTDBSink) { + ((IoTDBSink) outputPipeConnector) + .setTsFileBatchTimeIntervalHistogram(tsFileBatchTimeIntervalHistogram); + } + } + + public void setEventSizeHistogram(Histogram eventSizeHistogram) { + if (outputPipeConnector instanceof IoTDBSink) { + ((IoTDBSink) outputPipeConnector).setBatchEventSizeHistogram(eventSizeHistogram); } - // Avoid potential NPE in "getPipeName" - final EnrichedEvent event = - lastEvent instanceof EnrichedEvent ? (EnrichedEvent) lastEvent : null; - return count.get() - + (outputPipeConnector instanceof IoTDBDataRegionAsyncConnector - ? ((IoTDBDataRegionAsyncConnector) outputPipeConnector).getRetryEventCount(pipeName) - : 0) - + (Objects.nonNull(event) && pipeName.equals(event.getPipeName()) ? 1 : 0); } //////////////////////////// Error report //////////////////////////// diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/connector/PipeConnectorSubtaskLifeCycle.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeSinkSubtaskLifeCycle.java similarity index 64% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/connector/PipeConnectorSubtaskLifeCycle.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeSinkSubtaskLifeCycle.java index a28a0289b08df..0df3a773b9c15 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/connector/PipeConnectorSubtaskLifeCycle.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeSinkSubtaskLifeCycle.java @@ -17,29 +17,30 @@ * under the License. */ -package org.apache.iotdb.db.pipe.task.subtask.connector; +package org.apache.iotdb.db.pipe.agent.task.subtask.sink; -import org.apache.iotdb.commons.pipe.task.connection.UnboundedBlockingPendingQueue; -import org.apache.iotdb.db.pipe.execution.PipeConnectorSubtaskExecutor; +import org.apache.iotdb.commons.pipe.agent.task.connection.UnboundedBlockingPendingQueue; +import org.apache.iotdb.db.pipe.agent.task.execution.PipeSinkSubtaskExecutor; +import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; import org.apache.iotdb.pipe.api.event.Event; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class PipeConnectorSubtaskLifeCycle implements AutoCloseable { +public class PipeSinkSubtaskLifeCycle implements AutoCloseable { - private static final Logger LOGGER = LoggerFactory.getLogger(PipeConnectorSubtaskLifeCycle.class); + private static final Logger LOGGER = LoggerFactory.getLogger(PipeSinkSubtaskLifeCycle.class); - protected final PipeConnectorSubtaskExecutor executor; - protected final PipeConnectorSubtask subtask; + protected final PipeSinkSubtaskExecutor executor; + protected final PipeSinkSubtask subtask; private final UnboundedBlockingPendingQueue pendingQueue; - private int runningTaskCount; - private int registeredTaskCount; + protected int runningTaskCount; + protected int registeredTaskCount; - public PipeConnectorSubtaskLifeCycle( - PipeConnectorSubtaskExecutor executor, - PipeConnectorSubtask subtask, + public PipeSinkSubtaskLifeCycle( + PipeSinkSubtaskExecutor executor, + PipeSinkSubtask subtask, UnboundedBlockingPendingQueue pendingQueue) { this.executor = executor; this.subtask = subtask; @@ -49,7 +50,7 @@ public PipeConnectorSubtaskLifeCycle( registeredTaskCount = 0; } - public PipeConnectorSubtask getSubtask() { + public PipeSinkSubtask getSubtask() { return subtask; } @@ -65,6 +66,8 @@ public synchronized void register() { if (registeredTaskCount == 0) { executor.register(subtask); runningTaskCount = 0; + + PipeDataNodeResourceManager.compaction().registerPipeConnectorSubtaskLifeCycle(this); } registeredTaskCount++; @@ -76,25 +79,25 @@ public synchronized void register() { } /** - * Deregister the {@link PipeConnectorSubtask}. If the {@link PipeConnectorSubtask} is the last - * one, close the {@link PipeConnectorSubtask}. + * Deregister the {@link PipeSinkSubtask}. If the {@link PipeSinkSubtask} is the last one, close + * the {@link PipeSinkSubtask}. * - *

Note that this method should be called after the {@link PipeConnectorSubtask} is stopped. - * Otherwise, the {@link PipeConnectorSubtaskLifeCycle#runningTaskCount} might be inconsistent - * with the {@link PipeConnectorSubtaskLifeCycle#registeredTaskCount} because of parallel - * connector scheduling. + *

Note that this method should be called after the {@link PipeSinkSubtask} is stopped. + * Otherwise, the {@link PipeSinkSubtaskLifeCycle#runningTaskCount} might be inconsistent with the + * {@link PipeSinkSubtaskLifeCycle#registeredTaskCount} because of parallel connector scheduling. * * @param pipeNameToDeregister pipe name - * @return {@code true} if the {@link PipeConnectorSubtask} is out of life cycle, indicating that - * the {@link PipeConnectorSubtask} should never be used again - * @throws IllegalStateException if {@link PipeConnectorSubtaskLifeCycle#registeredTaskCount} <= 0 + * @param regionId region id + * @return {@code true} if the {@link PipeSinkSubtask} is out of life cycle, indicating that the + * {@link PipeSinkSubtask} should never be used again + * @throws IllegalStateException if {@link PipeSinkSubtaskLifeCycle#registeredTaskCount} <= 0 */ - public synchronized boolean deregister(String pipeNameToDeregister) { + public synchronized boolean deregister(final String pipeNameToDeregister, int regionId) { if (registeredTaskCount <= 0) { throw new IllegalStateException("registeredTaskCount <= 0"); } - subtask.discardEventsOfPipe(pipeNameToDeregister); + subtask.discardEventsOfPipe(pipeNameToDeregister, regionId); try { if (registeredTaskCount > 1) { @@ -120,7 +123,12 @@ public synchronized void start() { } if (runningTaskCount == 0) { - executor.start(subtask.getTaskID()); + try { + subtask.increaseHighPriorityTaskCount(); + executor.start(subtask.getTaskID()); + } finally { + subtask.decreaseHighPriorityTaskCount(); + } } runningTaskCount++; @@ -151,5 +159,7 @@ public synchronized void stop() { @Override public synchronized void close() { executor.deregister(subtask.getTaskID()); + + PipeDataNodeResourceManager.compaction().deregisterPipeConnectorSubtaskLifeCycle(this); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/connector/PipeConnectorSubtaskManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeSinkSubtaskManager.java similarity index 69% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/connector/PipeConnectorSubtaskManager.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeSinkSubtaskManager.java index f8809b6b689a1..e2743147705c6 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/connector/PipeConnectorSubtaskManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeSinkSubtaskManager.java @@ -17,20 +17,20 @@ * under the License. */ -package org.apache.iotdb.db.pipe.task.subtask.connector; +package org.apache.iotdb.db.pipe.agent.task.subtask.sink; import org.apache.iotdb.commons.consensus.DataRegionId; -import org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant; +import org.apache.iotdb.commons.pipe.agent.plugin.builtin.BuiltinPipePlugin; +import org.apache.iotdb.commons.pipe.agent.task.connection.UnboundedBlockingPendingQueue; +import org.apache.iotdb.commons.pipe.agent.task.progress.PipeEventCommitManager; +import org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant; import org.apache.iotdb.commons.pipe.config.constant.SystemConstant; import org.apache.iotdb.commons.pipe.config.plugin.configuraion.PipeTaskRuntimeConfiguration; -import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskConnectorRuntimeEnvironment; -import org.apache.iotdb.commons.pipe.plugin.builtin.BuiltinPipePlugin; -import org.apache.iotdb.commons.pipe.progress.PipeEventCommitManager; -import org.apache.iotdb.commons.pipe.task.connection.UnboundedBlockingPendingQueue; +import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskSinkRuntimeEnvironment; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; -import org.apache.iotdb.db.pipe.execution.PipeConnectorSubtaskExecutor; -import org.apache.iotdb.db.pipe.metric.PipeDataNodeRemainingEventAndTimeMetrics; -import org.apache.iotdb.db.pipe.metric.PipeDataRegionEventCounter; +import org.apache.iotdb.db.pipe.agent.task.execution.PipeSinkSubtaskExecutor; +import org.apache.iotdb.db.pipe.metric.source.PipeDataRegionEventCounter; +import org.apache.iotdb.db.pipe.sink.protocol.thrift.async.IoTDBDataRegionAsyncSink; import org.apache.iotdb.db.storageengine.StorageEngine; import org.apache.iotdb.pipe.api.PipeConnector; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator; @@ -47,25 +47,27 @@ import java.util.List; import java.util.Map; import java.util.TreeMap; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; -public class PipeConnectorSubtaskManager { +public class PipeSinkSubtaskManager { - private static final Logger LOGGER = LoggerFactory.getLogger(PipeConnectorSubtaskManager.class); + private static final Logger LOGGER = LoggerFactory.getLogger(PipeSinkSubtaskManager.class); private static final String FAILED_TO_DEREGISTER_EXCEPTION_MESSAGE = "Failed to deregister PipeConnectorSubtask. No such subtask: "; - private final Map> + private final Map> attributeSortedString2SubtaskLifeCycleMap = new HashMap<>(); public synchronized String register( - final PipeConnectorSubtaskExecutor executor, + final Supplier executorSupplier, final PipeParameters pipeConnectorParameters, - final PipeTaskConnectorRuntimeEnvironment environment) { + final PipeTaskSinkRuntimeEnvironment environment) { final String connectorKey = pipeConnectorParameters .getStringOrDefault( - Arrays.asList(PipeConnectorConstant.CONNECTOR_KEY, PipeConnectorConstant.SINK_KEY), + Arrays.asList(PipeSinkConstant.CONNECTOR_KEY, PipeSinkConstant.SINK_KEY), BuiltinPipePlugin.IOTDB_THRIFT_CONNECTOR.getPipePluginName()) // Convert the value of `CONNECTOR_KEY` or `SINK_KEY` to lowercase // for matching in `CONNECTOR_CONSTRUCTORS` @@ -89,15 +91,15 @@ public synchronized String register( connectorNum = pipeConnectorParameters.getIntOrDefault( Arrays.asList( - PipeConnectorConstant.CONNECTOR_IOTDB_PARALLEL_TASKS_KEY, - PipeConnectorConstant.SINK_IOTDB_PARALLEL_TASKS_KEY), - PipeConnectorConstant.CONNECTOR_IOTDB_PARALLEL_TASKS_DEFAULT_VALUE); + PipeSinkConstant.CONNECTOR_IOTDB_PARALLEL_TASKS_KEY, + PipeSinkConstant.SINK_IOTDB_PARALLEL_TASKS_KEY), + PipeSinkConstant.CONNECTOR_IOTDB_PARALLEL_TASKS_DEFAULT_VALUE); realTimeFirst = pipeConnectorParameters.getBooleanOrDefault( Arrays.asList( - PipeConnectorConstant.CONNECTOR_REALTIME_FIRST_KEY, - PipeConnectorConstant.SINK_REALTIME_FIRST_KEY), - PipeConnectorConstant.CONNECTOR_REALTIME_FIRST_DEFAULT_VALUE); + PipeSinkConstant.CONNECTOR_REALTIME_FIRST_KEY, + PipeSinkConstant.SINK_REALTIME_FIRST_KEY), + PipeSinkConstant.CONNECTOR_REALTIME_FIRST_DEFAULT_VALUE); attributeSortedString = "data_" + attributeSortedString; } else { // Do not allow parallel tasks for schema region connectors @@ -105,17 +107,25 @@ public synchronized String register( connectorNum = 1; attributeSortedString = "schema_" + attributeSortedString; } + environment.setAttributeSortedString(attributeSortedString); if (!attributeSortedString2SubtaskLifeCycleMap.containsKey(attributeSortedString)) { - final List pipeConnectorSubtaskLifeCycleList = + final PipeSinkSubtaskExecutor executor = executorSupplier.get(); + + final List pipeSinkSubtaskLifeCycleList = new ArrayList<>(connectorNum); + AtomicInteger counter = new AtomicInteger(0); // Shared pending queue for all subtasks final UnboundedBlockingPendingQueue pendingQueue = realTimeFirst ? new PipeRealtimePriorityBlockingQueue() : new UnboundedBlockingPendingQueue<>(new PipeDataRegionEventCounter()); + if (realTimeFirst) { + ((PipeRealtimePriorityBlockingQueue) pendingQueue).setOfferTsFileCounter(counter); + } + for (int connectorIndex = 0; connectorIndex < connectorNum; connectorIndex++) { final PipeConnector pipeConnector = isDataRegionConnector @@ -126,6 +136,9 @@ public synchronized String register( // 1. Construct, validate and customize PipeConnector, and then handshake (create // connection) with the target try { + if (pipeConnector instanceof IoTDBDataRegionAsyncSink) { + ((IoTDBDataRegionAsyncSink) pipeConnector).setTransferTsFileCounter(counter); + } pipeConnector.validate(new PipeParameterValidator(pipeConnectorParameters)); pipeConnector.customize( pipeConnectorParameters, new PipeTaskRuntimeConfiguration(environment)); @@ -144,8 +157,8 @@ public synchronized String register( } // 2. Construct PipeConnectorSubtaskLifeCycle to manage PipeConnectorSubtask's life cycle - final PipeConnectorSubtask pipeConnectorSubtask = - new PipeConnectorSubtask( + final PipeSinkSubtask pipeSinkSubtask = + new PipeSinkSubtask( String.format( "%s_%s_%s", attributeSortedString, environment.getCreationTime(), connectorIndex), @@ -154,23 +167,23 @@ public synchronized String register( connectorIndex, pendingQueue, pipeConnector); - final PipeConnectorSubtaskLifeCycle pipeConnectorSubtaskLifeCycle = - new PipeConnectorSubtaskLifeCycle(executor, pipeConnectorSubtask, pendingQueue); - pipeConnectorSubtaskLifeCycleList.add(pipeConnectorSubtaskLifeCycle); + final PipeSinkSubtaskLifeCycle pipeSinkSubtaskLifeCycle = + new PipeSinkSubtaskLifeCycle(executor, pipeSinkSubtask, pendingQueue); + pipeSinkSubtaskLifeCycleList.add(pipeSinkSubtaskLifeCycle); } + LOGGER.info( + "Pipe connector subtasks with attributes {} is bounded with connectorExecutor {} and callbackExecutor {}.", + attributeSortedString, + executor.getWorkingThreadName(), + executor.getCallbackThreadName()); attributeSortedString2SubtaskLifeCycleMap.put( - attributeSortedString, pipeConnectorSubtaskLifeCycleList); + attributeSortedString, pipeSinkSubtaskLifeCycleList); } - for (final PipeConnectorSubtaskLifeCycle lifeCycle : + for (final PipeSinkSubtaskLifeCycle lifeCycle : attributeSortedString2SubtaskLifeCycleMap.get(attributeSortedString)) { lifeCycle.register(); - if (isDataRegionConnector) { - PipeDataNodeRemainingEventAndTimeMetrics.getInstance() - .register( - lifeCycle.getSubtask(), environment.getPipeName(), environment.getCreationTime()); - } } return attributeSortedString; @@ -179,21 +192,30 @@ public synchronized String register( public synchronized void deregister( final String pipeName, final long creationTime, - final int dataRegionId, + final int regionId, final String attributeSortedString) { if (!attributeSortedString2SubtaskLifeCycleMap.containsKey(attributeSortedString)) { throw new PipeException(FAILED_TO_DEREGISTER_EXCEPTION_MESSAGE + attributeSortedString); } - final List lifeCycles = + final List lifeCycles = attributeSortedString2SubtaskLifeCycleMap.get(attributeSortedString); - lifeCycles.removeIf(o -> o.deregister(pipeName)); + + // Shall not be empty + final PipeSinkSubtaskExecutor executor = lifeCycles.get(0).executor; + + lifeCycles.removeIf(o -> o.deregister(pipeName, regionId)); if (lifeCycles.isEmpty()) { attributeSortedString2SubtaskLifeCycleMap.remove(attributeSortedString); + executor.shutdown(); + LOGGER.info( + "The executor {} and {} has been successfully shutdown.", + executor.getWorkingThreadName(), + executor.getCallbackThreadName()); } - PipeEventCommitManager.getInstance().deregister(pipeName, creationTime, dataRegionId); + PipeEventCommitManager.getInstance().deregister(pipeName, creationTime, regionId); } public synchronized void start(final String attributeSortedString) { @@ -201,7 +223,7 @@ public synchronized void start(final String attributeSortedString) { throw new PipeException(FAILED_TO_DEREGISTER_EXCEPTION_MESSAGE + attributeSortedString); } - for (final PipeConnectorSubtaskLifeCycle lifeCycle : + for (final PipeSinkSubtaskLifeCycle lifeCycle : attributeSortedString2SubtaskLifeCycleMap.get(attributeSortedString)) { lifeCycle.start(); } @@ -212,7 +234,7 @@ public synchronized void stop(final String attributeSortedString) { throw new PipeException(FAILED_TO_DEREGISTER_EXCEPTION_MESSAGE + attributeSortedString); } - for (final PipeConnectorSubtaskLifeCycle lifeCycle : + for (final PipeSinkSubtaskLifeCycle lifeCycle : attributeSortedString2SubtaskLifeCycleMap.get(attributeSortedString)) { lifeCycle.stop(); } @@ -241,15 +263,15 @@ private String generateAttributeSortedString(final PipeParameters pipeConnectorP ///////////////////////// Singleton Instance Holder ///////////////////////// - private PipeConnectorSubtaskManager() { + private PipeSinkSubtaskManager() { // Do nothing } private static class PipeSubtaskManagerHolder { - private static final PipeConnectorSubtaskManager INSTANCE = new PipeConnectorSubtaskManager(); + private static final PipeSinkSubtaskManager INSTANCE = new PipeSinkSubtaskManager(); } - public static PipeConnectorSubtaskManager instance() { + public static PipeSinkSubtaskManager instance() { return PipeSubtaskManagerHolder.INSTANCE; } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTransferBatchReqBuilder.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTransferBatchReqBuilder.java deleted file mode 100644 index f28f19e4da117..0000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTransferBatchReqBuilder.java +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.pipe.connector.payload.evolvable.batch; - -import org.apache.iotdb.common.rpc.thrift.TEndPoint; -import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.db.pipe.connector.client.IoTDBDataNodeCacheLeaderClientManager; -import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; -import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent; -import org.apache.iotdb.db.storageengine.dataregion.wal.exception.WALPipeException; -import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; -import org.apache.iotdb.pipe.api.event.Event; -import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; - -import org.apache.tsfile.exception.write.WriteProcessException; -import org.apache.tsfile.utils.Pair; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; - -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_FORMAT_HYBRID_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_FORMAT_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_FORMAT_TS_FILE_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_IOTDB_BATCH_DELAY_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_IOTDB_BATCH_SIZE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_IOTDB_PLAIN_BATCH_DELAY_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_IOTDB_PLAIN_BATCH_SIZE_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_IOTDB_TS_FILE_BATCH_DELAY_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_IOTDB_TS_FILE_BATCH_SIZE_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_LEADER_CACHE_ENABLE_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_LEADER_CACHE_ENABLE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_FORMAT_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_IOTDB_BATCH_DELAY_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_IOTDB_BATCH_SIZE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_LEADER_CACHE_ENABLE_KEY; - -public class PipeTransferBatchReqBuilder implements AutoCloseable { - - private static final Logger LOGGER = LoggerFactory.getLogger(PipeTransferBatchReqBuilder.class); - - private final boolean useLeaderCache; - - private final int requestMaxDelayInMs; - private final long requestMaxBatchSizeInBytes; - - // If the leader cache is disabled (or unable to find the endpoint of event in the leader cache), - // the event will be stored in the default batch. - private final PipeTabletEventBatch defaultBatch; - // If the leader cache is enabled, the batch will be divided by the leader endpoint, - // each endpoint has a batch. - // This is only used in plain batch since tsfile does not return redirection info. - private final Map endPointToBatch = new HashMap<>(); - - public PipeTransferBatchReqBuilder(final PipeParameters parameters) { - final boolean usingTsFileBatch = - parameters - .getStringOrDefault( - Arrays.asList(CONNECTOR_FORMAT_KEY, SINK_FORMAT_KEY), CONNECTOR_FORMAT_HYBRID_VALUE) - .equals(CONNECTOR_FORMAT_TS_FILE_VALUE); - - useLeaderCache = - !usingTsFileBatch - && parameters.getBooleanOrDefault( - Arrays.asList(SINK_LEADER_CACHE_ENABLE_KEY, CONNECTOR_LEADER_CACHE_ENABLE_KEY), - CONNECTOR_LEADER_CACHE_ENABLE_DEFAULT_VALUE); - - final int requestMaxDelayInSeconds; - if (usingTsFileBatch) { - requestMaxDelayInSeconds = - parameters.getIntOrDefault( - Arrays.asList(CONNECTOR_IOTDB_BATCH_DELAY_KEY, SINK_IOTDB_BATCH_DELAY_KEY), - CONNECTOR_IOTDB_TS_FILE_BATCH_DELAY_DEFAULT_VALUE); - requestMaxDelayInMs = - requestMaxDelayInSeconds < 0 ? Integer.MAX_VALUE : requestMaxDelayInSeconds * 1000; - requestMaxBatchSizeInBytes = - parameters.getLongOrDefault( - Arrays.asList(CONNECTOR_IOTDB_BATCH_SIZE_KEY, SINK_IOTDB_BATCH_SIZE_KEY), - CONNECTOR_IOTDB_TS_FILE_BATCH_SIZE_DEFAULT_VALUE); - this.defaultBatch = - new PipeTabletEventTsFileBatch(requestMaxDelayInMs, requestMaxBatchSizeInBytes); - } else { - requestMaxDelayInSeconds = - parameters.getIntOrDefault( - Arrays.asList(CONNECTOR_IOTDB_BATCH_DELAY_KEY, SINK_IOTDB_BATCH_DELAY_KEY), - CONNECTOR_IOTDB_PLAIN_BATCH_DELAY_DEFAULT_VALUE); - requestMaxDelayInMs = - requestMaxDelayInSeconds < 0 ? Integer.MAX_VALUE : requestMaxDelayInSeconds * 1000; - requestMaxBatchSizeInBytes = - parameters.getLongOrDefault( - Arrays.asList(CONNECTOR_IOTDB_BATCH_SIZE_KEY, SINK_IOTDB_BATCH_SIZE_KEY), - CONNECTOR_IOTDB_PLAIN_BATCH_SIZE_DEFAULT_VALUE); - this.defaultBatch = - new PipeTabletEventPlainBatch(requestMaxDelayInMs, requestMaxBatchSizeInBytes); - } - } - - /** - * Try offer {@link Event} into the corresponding batch if the given {@link Event} is not - * duplicated. - * - * @param event the given {@link Event} - * @return {@link Pair}<{@link TEndPoint}, {@link PipeTabletEventPlainBatch}> not null means this - * {@link PipeTabletEventPlainBatch} can be transferred. the first element is the leader - * endpoint to transfer to (might be null), the second element is the batch to be transferred. - */ - public synchronized Pair onEvent( - final TabletInsertionEvent event) - throws IOException, WALPipeException, WriteProcessException { - if (!(event instanceof EnrichedEvent)) { - LOGGER.warn( - "Unsupported event {} type {} when building transfer request", event, event.getClass()); - return null; - } - - if (!useLeaderCache) { - return defaultBatch.onEvent(event) ? new Pair<>(null, defaultBatch) : null; - } - - String deviceId = null; - if (event instanceof PipeRawTabletInsertionEvent) { - deviceId = ((PipeRawTabletInsertionEvent) event).getDeviceId(); - } else if (event instanceof PipeInsertNodeTabletInsertionEvent) { - deviceId = ((PipeInsertNodeTabletInsertionEvent) event).getDeviceId(); - } - - if (Objects.isNull(deviceId)) { - return defaultBatch.onEvent(event) ? new Pair<>(null, defaultBatch) : null; - } - - final TEndPoint endPoint = - IoTDBDataNodeCacheLeaderClientManager.LEADER_CACHE_MANAGER.getLeaderEndPoint(deviceId); - if (Objects.isNull(endPoint)) { - return defaultBatch.onEvent(event) ? new Pair<>(null, defaultBatch) : null; - } - - final PipeTabletEventPlainBatch batch = - endPointToBatch.computeIfAbsent( - endPoint, - k -> new PipeTabletEventPlainBatch(requestMaxDelayInMs, requestMaxBatchSizeInBytes)); - return batch.onEvent(event) ? new Pair<>(endPoint, batch) : null; - } - - /** Get all batches that have at least 1 event. */ - public synchronized List> getAllNonEmptyBatches() { - final List> nonEmptyBatches = new ArrayList<>(); - if (!defaultBatch.isEmpty()) { - nonEmptyBatches.add(new Pair<>(null, defaultBatch)); - } - endPointToBatch.forEach( - (endPoint, batch) -> { - if (!batch.isEmpty()) { - nonEmptyBatches.add(new Pair<>(endPoint, batch)); - } - }); - return nonEmptyBatches; - } - - public boolean isEmpty() { - return defaultBatch.isEmpty() - && endPointToBatch.values().stream().allMatch(PipeTabletEventPlainBatch::isEmpty); - } - - @Override - public synchronized void close() { - defaultBatch.close(); - endPointToBatch.values().forEach(PipeTabletEventPlainBatch::close); - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/opcua/OpcUaConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/opcua/OpcUaConnector.java deleted file mode 100644 index 158d8e84bb907..0000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/opcua/OpcUaConnector.java +++ /dev/null @@ -1,354 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.pipe.connector.protocol.opcua; - -import org.apache.iotdb.commons.exception.pipe.PipeRuntimeNonCriticalException; -import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; -import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent; -import org.apache.iotdb.pipe.api.PipeConnector; -import org.apache.iotdb.pipe.api.customizer.configuration.PipeConnectorRuntimeConfiguration; -import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator; -import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; -import org.apache.iotdb.pipe.api.event.Event; -import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; -import org.apache.iotdb.pipe.api.exception.PipeException; - -import org.apache.tsfile.common.constant.TsFileConstant; -import org.apache.tsfile.enums.TSDataType; -import org.apache.tsfile.utils.Binary; -import org.apache.tsfile.utils.Pair; -import org.apache.tsfile.write.record.Tablet; -import org.eclipse.milo.opcua.sdk.server.OpcUaServer; -import org.eclipse.milo.opcua.sdk.server.model.nodes.objects.BaseEventTypeNode; -import org.eclipse.milo.opcua.stack.core.Identifiers; -import org.eclipse.milo.opcua.stack.core.UaException; -import org.eclipse.milo.opcua.stack.core.types.builtin.DateTime; -import org.eclipse.milo.opcua.stack.core.types.builtin.LocalizedText; -import org.eclipse.milo.opcua.stack.core.types.builtin.NodeId; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.time.LocalDate; -import java.util.Arrays; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_IOTDB_PASSWORD_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_IOTDB_PASSWORD_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_IOTDB_USER_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_IOTDB_USER_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_OPC_UA_HTTPS_BIND_PORT_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_OPC_UA_HTTPS_BIND_PORT_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_OPC_UA_SECURITY_DIR_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_OPC_UA_SECURITY_DIR_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_OPC_UA_TCP_BIND_PORT_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_OPC_UA_TCP_BIND_PORT_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_IOTDB_PASSWORD_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_IOTDB_USER_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_OPC_UA_HTTPS_BIND_PORT_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_OPC_UA_SECURITY_DIR_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_OPC_UA_TCP_BIND_PORT_KEY; - -/** - * Send data in IoTDB based on Opc Ua protocol, using Eclipse Milo. All data are converted into - * tablets, then eventNodes to send to the subscriber clients. Notice that there is no namespace - * since the eventNodes do not need to be saved. - */ -public class OpcUaConnector implements PipeConnector { - - private static final Logger LOGGER = LoggerFactory.getLogger(OpcUaConnector.class); - - private static final Map> - SERVER_KEY_TO_REFERENCE_COUNT_AND_SERVER_MAP = new ConcurrentHashMap<>(); - - private String serverKey; - private OpcUaServer server; - - @Override - public void validate(final PipeParameterValidator validator) throws Exception { - // All the parameters are optional - } - - @Override - public void customize( - final PipeParameters parameters, final PipeConnectorRuntimeConfiguration configuration) - throws Exception { - final int tcpBindPort = - parameters.getIntOrDefault( - Arrays.asList(CONNECTOR_OPC_UA_TCP_BIND_PORT_KEY, SINK_OPC_UA_TCP_BIND_PORT_KEY), - CONNECTOR_OPC_UA_TCP_BIND_PORT_DEFAULT_VALUE); - final int httpsBindPort = - parameters.getIntOrDefault( - Arrays.asList(CONNECTOR_OPC_UA_HTTPS_BIND_PORT_KEY, SINK_OPC_UA_HTTPS_BIND_PORT_KEY), - CONNECTOR_OPC_UA_HTTPS_BIND_PORT_DEFAULT_VALUE); - - final String user = - parameters.getStringOrDefault( - Arrays.asList(CONNECTOR_IOTDB_USER_KEY, SINK_IOTDB_USER_KEY), - CONNECTOR_IOTDB_USER_DEFAULT_VALUE); - final String password = - parameters.getStringOrDefault( - Arrays.asList(CONNECTOR_IOTDB_PASSWORD_KEY, SINK_IOTDB_PASSWORD_KEY), - CONNECTOR_IOTDB_PASSWORD_DEFAULT_VALUE); - final String securityDir = - parameters.getStringOrDefault( - Arrays.asList(CONNECTOR_OPC_UA_SECURITY_DIR_KEY, SINK_OPC_UA_SECURITY_DIR_KEY), - CONNECTOR_OPC_UA_SECURITY_DIR_DEFAULT_VALUE); - - synchronized (SERVER_KEY_TO_REFERENCE_COUNT_AND_SERVER_MAP) { - serverKey = httpsBindPort + ":" + tcpBindPort; - - server = - SERVER_KEY_TO_REFERENCE_COUNT_AND_SERVER_MAP - .computeIfAbsent( - serverKey, - key -> { - try { - final OpcUaServer newServer = - new OpcUaServerBuilder() - .setTcpBindPort(tcpBindPort) - .setHttpsBindPort(httpsBindPort) - .setUser(user) - .setPassword(password) - .setSecurityDir(securityDir) - .build(); - newServer.startup(); - return new Pair<>(new AtomicInteger(0), newServer); - } catch (final Exception e) { - throw new PipeException("Failed to build and startup OpcUaServer", e); - } - }) - .getRight(); - SERVER_KEY_TO_REFERENCE_COUNT_AND_SERVER_MAP.get(serverKey).getLeft().incrementAndGet(); - } - } - - @Override - public void handshake() throws Exception { - // Server side, do nothing - } - - @Override - public void heartbeat() throws Exception { - // Server side, do nothing - } - - @Override - public void transfer(final Event event) throws Exception { - // Do nothing when receive heartbeat or other events - } - - @Override - public void transfer(final TabletInsertionEvent tabletInsertionEvent) throws Exception { - // PipeProcessor can change the type of TabletInsertionEvent - if (!(tabletInsertionEvent instanceof PipeInsertNodeTabletInsertionEvent) - && !(tabletInsertionEvent instanceof PipeRawTabletInsertionEvent)) { - LOGGER.warn( - "OpcUaConnector only support " - + "PipeInsertNodeTabletInsertionEvent and PipeRawTabletInsertionEvent. " - + "Ignore {}.", - tabletInsertionEvent); - return; - } - - if (tabletInsertionEvent instanceof PipeInsertNodeTabletInsertionEvent) { - transferTabletWrapper(server, (PipeInsertNodeTabletInsertionEvent) tabletInsertionEvent); - } else { - transferTabletWrapper(server, (PipeRawTabletInsertionEvent) tabletInsertionEvent); - } - } - - private void transferTabletWrapper( - final OpcUaServer server, - final PipeInsertNodeTabletInsertionEvent pipeInsertNodeTabletInsertionEvent) - throws UaException { - try { - // We increase the reference count for this event to determine if the event may be released. - if (!pipeInsertNodeTabletInsertionEvent.increaseReferenceCount( - OpcUaConnector.class.getName())) { - return; - } - for (final Tablet tablet : pipeInsertNodeTabletInsertionEvent.convertToTablets()) { - transferTablet(server, tablet); - } - } finally { - pipeInsertNodeTabletInsertionEvent.decreaseReferenceCount( - OpcUaConnector.class.getName(), false); - } - } - - private void transferTabletWrapper( - final OpcUaServer server, final PipeRawTabletInsertionEvent pipeRawTabletInsertionEvent) - throws UaException { - try { - // We increase the reference count for this event to determine if the event may be released. - if (!pipeRawTabletInsertionEvent.increaseReferenceCount(OpcUaConnector.class.getName())) { - return; - } - transferTablet(server, pipeRawTabletInsertionEvent.convertToTablet()); - } finally { - pipeRawTabletInsertionEvent.decreaseReferenceCount(OpcUaConnector.class.getName(), false); - } - } - - /** - * Transfer {@link Tablet} into eventNodes and post it on the eventBus, so that they will be heard - * at the subscribers. Notice that an eventNode is reused to reduce object creation costs. - * - * @param server OpcUaServer - * @param tablet the tablet to send - * @throws UaException if failed to create {@link Event} - */ - private void transferTablet(final OpcUaServer server, final Tablet tablet) throws UaException { - // There is no nameSpace, so that nameSpaceIndex is always 0 - final int pseudoNameSpaceIndex = 0; - final BaseEventTypeNode eventNode = - server - .getEventFactory() - .createEvent( - new NodeId(pseudoNameSpaceIndex, UUID.randomUUID()), Identifiers.BaseEventType); - // Use eventNode here because other nodes doesn't support values and times simultaneously - for (int columnIndex = 0; columnIndex < tablet.getSchemas().size(); ++columnIndex) { - - final TSDataType dataType = tablet.getSchemas().get(columnIndex).getType(); - - // Source name --> Sensor path, like root.test.d_0.s_0 - eventNode.setSourceName( - tablet.deviceId - + TsFileConstant.PATH_SEPARATOR - + tablet.getSchemas().get(columnIndex).getMeasurementId()); - - // Source node --> Sensor type, like double - eventNode.setSourceNode(convertToOpcDataType(dataType)); - - for (int rowIndex = 0; rowIndex < tablet.rowSize; ++rowIndex) { - // Filter null value - if (tablet.bitMaps[columnIndex].isMarked(rowIndex)) { - continue; - } - - // Time --> TimeStamp - eventNode.setTime(new DateTime(tablet.timestamps[rowIndex])); - - // Message --> Value - switch (dataType) { - case BOOLEAN: - eventNode.setMessage( - LocalizedText.english( - Boolean.toString(((boolean[]) tablet.values[columnIndex])[rowIndex]))); - break; - case INT32: - eventNode.setMessage( - LocalizedText.english( - Integer.toString(((int[]) tablet.values[columnIndex])[rowIndex]))); - break; - case DATE: - eventNode.setMessage( - LocalizedText.english( - (((LocalDate[]) tablet.values[columnIndex])[rowIndex]).toString())); - break; - case INT64: - case TIMESTAMP: - eventNode.setMessage( - LocalizedText.english( - Long.toString(((long[]) tablet.values[columnIndex])[rowIndex]))); - break; - case FLOAT: - eventNode.setMessage( - LocalizedText.english( - Float.toString(((float[]) tablet.values[columnIndex])[rowIndex]))); - break; - case DOUBLE: - eventNode.setMessage( - LocalizedText.english( - Double.toString(((double[]) tablet.values[columnIndex])[rowIndex]))); - break; - case TEXT: - case BLOB: - case STRING: - eventNode.setMessage( - LocalizedText.english( - ((Binary[]) tablet.values[columnIndex])[rowIndex].toString())); - break; - case VECTOR: - case UNKNOWN: - default: - throw new PipeRuntimeNonCriticalException( - "Unsupported data type: " + tablet.getSchemas().get(columnIndex).getType()); - } - - // Send the event - server.getEventBus().post(eventNode); - } - } - eventNode.delete(); - } - - private NodeId convertToOpcDataType(final TSDataType type) { - switch (type) { - case BOOLEAN: - return Identifiers.Boolean; - case INT32: - return Identifiers.Int32; - case DATE: - return Identifiers.DateTime; - case INT64: - case TIMESTAMP: - return Identifiers.Int64; - case FLOAT: - return Identifiers.Float; - case DOUBLE: - return Identifiers.Double; - case TEXT: - case BLOB: - case STRING: - return Identifiers.String; - case VECTOR: - case UNKNOWN: - default: - throw new PipeRuntimeNonCriticalException("Unsupported data type: " + type); - } - } - - @Override - public void close() throws Exception { - if (serverKey == null) { - return; - } - - synchronized (SERVER_KEY_TO_REFERENCE_COUNT_AND_SERVER_MAP) { - final Pair pair = - SERVER_KEY_TO_REFERENCE_COUNT_AND_SERVER_MAP.get(serverKey); - if (pair == null) { - return; - } - - if (pair.getLeft().decrementAndGet() <= 0) { - try { - pair.getRight().shutdown(); - } finally { - SERVER_KEY_TO_REFERENCE_COUNT_AND_SERVER_MAP.remove(serverKey); - } - } - } - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/IoTDBDataRegionAsyncConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/IoTDBDataRegionAsyncConnector.java deleted file mode 100644 index 255e5adec7673..0000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/IoTDBDataRegionAsyncConnector.java +++ /dev/null @@ -1,575 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.pipe.connector.protocol.thrift.async; - -import org.apache.iotdb.common.rpc.thrift.TEndPoint; -import org.apache.iotdb.commons.client.async.AsyncPipeDataTransferServiceClient; -import org.apache.iotdb.commons.pipe.connector.protocol.IoTDBConnector; -import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.db.pipe.connector.client.IoTDBDataNodeAsyncClientManager; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.batch.PipeTabletEventBatch; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.batch.PipeTabletEventPlainBatch; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.batch.PipeTabletEventTsFileBatch; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.batch.PipeTransferBatchReqBuilder; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTabletBinaryReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTabletInsertNodeReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTabletRawReq; -import org.apache.iotdb.db.pipe.connector.protocol.thrift.async.handler.PipeTransferTabletBatchEventHandler; -import org.apache.iotdb.db.pipe.connector.protocol.thrift.async.handler.PipeTransferTabletInsertNodeEventHandler; -import org.apache.iotdb.db.pipe.connector.protocol.thrift.async.handler.PipeTransferTabletRawEventHandler; -import org.apache.iotdb.db.pipe.connector.protocol.thrift.async.handler.PipeTransferTsFileHandler; -import org.apache.iotdb.db.pipe.connector.protocol.thrift.sync.IoTDBDataRegionSyncConnector; -import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; -import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent; -import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; -import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent; -import org.apache.iotdb.db.pipe.event.common.terminate.PipeTerminateEvent; -import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; -import org.apache.iotdb.db.pipe.task.subtask.connector.PipeConnectorSubtask; -import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; -import org.apache.iotdb.pipe.api.PipeConnector; -import org.apache.iotdb.pipe.api.customizer.configuration.PipeConnectorRuntimeConfiguration; -import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator; -import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; -import org.apache.iotdb.pipe.api.event.Event; -import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; -import org.apache.iotdb.pipe.api.event.dml.insertion.TsFileInsertionEvent; -import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; - -import org.apache.tsfile.exception.write.WriteProcessException; -import org.apache.tsfile.utils.Pair; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_LEADER_CACHE_ENABLE_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_LEADER_CACHE_ENABLE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_IOTDB_SSL_ENABLE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_IOTDB_SSL_TRUST_STORE_PATH_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_IOTDB_SSL_TRUST_STORE_PWD_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_LEADER_CACHE_ENABLE_KEY; - -public class IoTDBDataRegionAsyncConnector extends IoTDBConnector { - - private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBDataRegionAsyncConnector.class); - - private static final String THRIFT_ERROR_FORMATTER_WITHOUT_ENDPOINT = - "Failed to borrow client from client pool when sending to receiver."; - private static final String THRIFT_ERROR_FORMATTER_WITH_ENDPOINT = - "Exception occurred while sending to receiver %s:%s."; - - private IoTDBDataNodeAsyncClientManager clientManager; - - private final IoTDBDataRegionSyncConnector retryConnector = new IoTDBDataRegionSyncConnector(); - private final BlockingQueue retryEventQueue = new LinkedBlockingQueue<>(); - - private PipeTransferBatchReqBuilder tabletBatchBuilder; - - private final AtomicBoolean isClosed = new AtomicBoolean(false); - - @Override - public void validate(final PipeParameterValidator validator) throws Exception { - super.validate(validator); - retryConnector.validate(validator); - - final PipeParameters parameters = validator.getParameters(); - - validator.validate( - args -> !((boolean) args[0] || (boolean) args[1] || (boolean) args[2]), - "Only 'iotdb-thrift-ssl-sink' supports SSL transmission currently.", - parameters.getBooleanOrDefault(SINK_IOTDB_SSL_ENABLE_KEY, false), - parameters.hasAttribute(SINK_IOTDB_SSL_TRUST_STORE_PATH_KEY), - parameters.hasAttribute(SINK_IOTDB_SSL_TRUST_STORE_PWD_KEY)); - } - - @Override - public void customize( - final PipeParameters parameters, final PipeConnectorRuntimeConfiguration configuration) - throws Exception { - super.customize(parameters, configuration); - retryConnector.customize(parameters, configuration); - - clientManager = - new IoTDBDataNodeAsyncClientManager( - nodeUrls, - parameters.getBooleanOrDefault( - Arrays.asList(SINK_LEADER_CACHE_ENABLE_KEY, CONNECTOR_LEADER_CACHE_ENABLE_KEY), - CONNECTOR_LEADER_CACHE_ENABLE_DEFAULT_VALUE), - loadBalanceStrategy); - - if (isTabletBatchModeEnabled) { - tabletBatchBuilder = new PipeTransferBatchReqBuilder(parameters); - } - } - - @Override - // Synchronized to avoid close connector when transfer event - public synchronized void handshake() throws Exception { - retryConnector.handshake(); - } - - @Override - public void heartbeat() { - retryConnector.heartbeat(); - } - - @Override - public void transfer(final TabletInsertionEvent tabletInsertionEvent) throws Exception { - transferQueuedEventsIfNecessary(); - - if (!(tabletInsertionEvent instanceof PipeInsertNodeTabletInsertionEvent) - && !(tabletInsertionEvent instanceof PipeRawTabletInsertionEvent)) { - LOGGER.warn( - "IoTDBThriftAsyncConnector only support PipeInsertNodeTabletInsertionEvent and PipeRawTabletInsertionEvent. " - + "Current event: {}.", - tabletInsertionEvent); - return; - } - - if (isTabletBatchModeEnabled) { - final Pair endPointAndBatch = - tabletBatchBuilder.onEvent(tabletInsertionEvent); - if (Objects.isNull(endPointAndBatch)) { - return; - } - transferInBatchWithoutCheck(endPointAndBatch); - } else { - transferInEventWithoutCheck(tabletInsertionEvent); - } - } - - private void transferInBatchWithoutCheck( - final Pair endPointAndBatch) - throws IOException, WriteProcessException { - final PipeTabletEventBatch batch = endPointAndBatch.getRight(); - - if (batch instanceof PipeTabletEventPlainBatch) { - transfer( - endPointAndBatch.getLeft(), - new PipeTransferTabletBatchEventHandler((PipeTabletEventPlainBatch) batch, this)); - } else if (batch instanceof PipeTabletEventTsFileBatch) { - final PipeTabletEventTsFileBatch tsFileBatch = (PipeTabletEventTsFileBatch) batch; - final List sealedFiles = tsFileBatch.sealTsFiles(); - final Map, Double> pipe2WeightMap = tsFileBatch.deepCopyPipe2WeightMap(); - final List events = tsFileBatch.deepCopyEvents(); - final AtomicInteger eventsReferenceCount = new AtomicInteger(sealedFiles.size()); - final AtomicBoolean eventsHadBeenAddedToRetryQueue = new AtomicBoolean(false); - - for (final File sealedFile : sealedFiles) { - transfer( - new PipeTransferTsFileHandler( - this, - pipe2WeightMap, - events, - eventsReferenceCount, - eventsHadBeenAddedToRetryQueue, - sealedFile, - null, - false)); - } - } else { - LOGGER.warn( - "Unsupported batch type {} when transferring tablet insertion event.", batch.getClass()); - } - - endPointAndBatch.getRight().onSuccess(); - } - - private void transferInEventWithoutCheck(final TabletInsertionEvent tabletInsertionEvent) - throws Exception { - if (tabletInsertionEvent instanceof PipeInsertNodeTabletInsertionEvent) { - final PipeInsertNodeTabletInsertionEvent pipeInsertNodeTabletInsertionEvent = - (PipeInsertNodeTabletInsertionEvent) tabletInsertionEvent; - // We increase the reference count for this event to determine if the event may be released. - if (!pipeInsertNodeTabletInsertionEvent.increaseReferenceCount( - IoTDBDataRegionAsyncConnector.class.getName())) { - pipeInsertNodeTabletInsertionEvent.decreaseReferenceCount( - IoTDBDataRegionAsyncConnector.class.getName(), false); - return; - } - - final InsertNode insertNode = - pipeInsertNodeTabletInsertionEvent.getInsertNodeViaCacheIfPossible(); - final TPipeTransferReq pipeTransferReq = - compressIfNeeded( - Objects.isNull(insertNode) - ? PipeTransferTabletBinaryReq.toTPipeTransferReq( - pipeInsertNodeTabletInsertionEvent.getByteBuffer()) - : PipeTransferTabletInsertNodeReq.toTPipeTransferReq(insertNode)); - final PipeTransferTabletInsertNodeEventHandler pipeTransferInsertNodeReqHandler = - new PipeTransferTabletInsertNodeEventHandler( - pipeInsertNodeTabletInsertionEvent, pipeTransferReq, this); - - transfer( - // getDeviceId() may return null for InsertRowsNode - pipeInsertNodeTabletInsertionEvent.getDeviceId(), pipeTransferInsertNodeReqHandler); - } else { // tabletInsertionEvent instanceof PipeRawTabletInsertionEvent - final PipeRawTabletInsertionEvent pipeRawTabletInsertionEvent = - (PipeRawTabletInsertionEvent) tabletInsertionEvent; - // We increase the reference count for this event to determine if the event may be released. - if (!pipeRawTabletInsertionEvent.increaseReferenceCount( - IoTDBDataRegionAsyncConnector.class.getName())) { - pipeRawTabletInsertionEvent.decreaseReferenceCount( - IoTDBDataRegionAsyncConnector.class.getName(), false); - return; - } - - final TPipeTransferReq pipeTransferTabletRawReq = - compressIfNeeded( - PipeTransferTabletRawReq.toTPipeTransferReq( - pipeRawTabletInsertionEvent.convertToTablet(), - pipeRawTabletInsertionEvent.isAligned())); - final PipeTransferTabletRawEventHandler pipeTransferTabletReqHandler = - new PipeTransferTabletRawEventHandler( - pipeRawTabletInsertionEvent, pipeTransferTabletRawReq, this); - - transfer(pipeRawTabletInsertionEvent.getDeviceId(), pipeTransferTabletReqHandler); - } - } - - private void transfer( - final TEndPoint endPoint, - final PipeTransferTabletBatchEventHandler pipeTransferTabletBatchEventHandler) { - AsyncPipeDataTransferServiceClient client = null; - try { - client = clientManager.borrowClient(endPoint); - pipeTransferTabletBatchEventHandler.transfer(client); - } catch (final Exception ex) { - logOnClientException(client, ex); - pipeTransferTabletBatchEventHandler.onError(ex); - } - } - - private void transfer( - final String deviceId, - final PipeTransferTabletInsertNodeEventHandler pipeTransferInsertNodeReqHandler) { - AsyncPipeDataTransferServiceClient client = null; - try { - client = clientManager.borrowClient(deviceId); - pipeTransferInsertNodeReqHandler.transfer(client); - } catch (final Exception ex) { - logOnClientException(client, ex); - pipeTransferInsertNodeReqHandler.onError(ex); - } - } - - private void transfer( - final String deviceId, final PipeTransferTabletRawEventHandler pipeTransferTabletReqHandler) { - AsyncPipeDataTransferServiceClient client = null; - try { - client = clientManager.borrowClient(deviceId); - pipeTransferTabletReqHandler.transfer(client); - } catch (final Exception ex) { - logOnClientException(client, ex); - pipeTransferTabletReqHandler.onError(ex); - } - } - - @Override - public void transfer(final TsFileInsertionEvent tsFileInsertionEvent) throws Exception { - transferQueuedEventsIfNecessary(); - transferBatchedEventsIfNecessary(); - - if (!(tsFileInsertionEvent instanceof PipeTsFileInsertionEvent)) { - LOGGER.warn( - "IoTDBThriftAsyncConnector only support PipeTsFileInsertionEvent. Current event: {}.", - tsFileInsertionEvent); - return; - } - - transferWithoutCheck(tsFileInsertionEvent); - } - - private void transferWithoutCheck(final TsFileInsertionEvent tsFileInsertionEvent) - throws Exception { - final PipeTsFileInsertionEvent pipeTsFileInsertionEvent = - (PipeTsFileInsertionEvent) tsFileInsertionEvent; - // We increase the reference count for this event to determine if the event may be released. - if (!pipeTsFileInsertionEvent.increaseReferenceCount( - IoTDBDataRegionAsyncConnector.class.getName())) { - pipeTsFileInsertionEvent.decreaseReferenceCount( - IoTDBDataRegionAsyncConnector.class.getName(), false); - return; - } - - // We assume that no exceptions will be thrown after reference count is increased. - try { - // Just in case. To avoid the case that exception occurred when constructing the handler. - if (!pipeTsFileInsertionEvent.getTsFile().exists()) { - throw new FileNotFoundException(pipeTsFileInsertionEvent.getTsFile().getAbsolutePath()); - } - - final PipeTransferTsFileHandler pipeTransferTsFileHandler = - new PipeTransferTsFileHandler( - this, - Collections.singletonMap( - new Pair<>( - pipeTsFileInsertionEvent.getPipeName(), - pipeTsFileInsertionEvent.getCreationTime()), - 1.0), - Collections.singletonList(pipeTsFileInsertionEvent), - new AtomicInteger(1), - new AtomicBoolean(false), - pipeTsFileInsertionEvent.getTsFile(), - pipeTsFileInsertionEvent.getModFile(), - pipeTsFileInsertionEvent.isWithMod() - && clientManager.supportModsIfIsDataNodeReceiver()); - - transfer(pipeTransferTsFileHandler); - } catch (final Exception e) { - // Just in case. To avoid the case that exception occurred when constructing the handler. - pipeTsFileInsertionEvent.decreaseReferenceCount( - IoTDBDataRegionAsyncConnector.class.getName(), false); - throw e; - } - } - - private void transfer(final PipeTransferTsFileHandler pipeTransferTsFileHandler) { - AsyncPipeDataTransferServiceClient client = null; - try { - client = clientManager.borrowClient(); - pipeTransferTsFileHandler.transfer(clientManager, client); - } catch (final Exception ex) { - logOnClientException(client, ex); - pipeTransferTsFileHandler.onError(ex); - } - } - - @Override - public void transfer(final Event event) throws Exception { - transferQueuedEventsIfNecessary(); - transferBatchedEventsIfNecessary(); - - if (!(event instanceof PipeHeartbeatEvent - || event instanceof PipeSchemaRegionWritePlanEvent - || event instanceof PipeTerminateEvent)) { - LOGGER.warn( - "IoTDBThriftAsyncConnector does not support transferring generic event: {}.", event); - return; - } - - retryConnector.transfer(event); - } - - //////////////////////////// Leader cache update //////////////////////////// - - public void updateLeaderCache(final String deviceId, final TEndPoint endPoint) { - clientManager.updateLeaderCache(deviceId, endPoint); - } - - //////////////////////////// Exception handlers //////////////////////////// - - private void logOnClientException( - final AsyncPipeDataTransferServiceClient client, final Exception e) { - if (client == null) { - LOGGER.warn(THRIFT_ERROR_FORMATTER_WITHOUT_ENDPOINT, e); - } else { - LOGGER.warn( - String.format(THRIFT_ERROR_FORMATTER_WITH_ENDPOINT, client.getIp(), client.getPort()), e); - } - } - - /** - * Transfer queued {@link Event}s which are waiting for retry. - * - * @throws Exception if an error occurs. The error will be handled by pipe framework, which will - * retry the {@link Event} and mark the {@link Event} as failure and stop the pipe if the - * retry times exceeds the threshold. - * @see PipeConnector#transfer(Event) for more details. - * @see PipeConnector#transfer(TabletInsertionEvent) for more details. - * @see PipeConnector#transfer(TsFileInsertionEvent) for more details. - */ - private void transferQueuedEventsIfNecessary() throws Exception { - while (!retryEventQueue.isEmpty()) { - synchronized (this) { - if (isClosed.get() || retryEventQueue.isEmpty()) { - return; - } - - final Event peekedEvent = retryEventQueue.peek(); - - if (peekedEvent instanceof PipeInsertNodeTabletInsertionEvent) { - retryConnector.transfer((PipeInsertNodeTabletInsertionEvent) peekedEvent); - } else if (peekedEvent instanceof PipeRawTabletInsertionEvent) { - retryConnector.transfer((PipeRawTabletInsertionEvent) peekedEvent); - } else if (peekedEvent instanceof PipeTsFileInsertionEvent) { - retryConnector.transfer((PipeTsFileInsertionEvent) peekedEvent); - } else { - LOGGER.warn( - "IoTDBThriftAsyncConnector does not support transfer generic event: {}.", - peekedEvent); - } - - if (peekedEvent instanceof EnrichedEvent) { - ((EnrichedEvent) peekedEvent) - .decreaseReferenceCount(IoTDBDataRegionAsyncConnector.class.getName(), true); - } - - final Event polledEvent = retryEventQueue.poll(); - if (polledEvent != peekedEvent) { - LOGGER.error( - "The event polled from the queue is not the same as the event peeked from the queue. " - + "Peeked event: {}, polled event: {}.", - peekedEvent, - polledEvent); - } - if (polledEvent != null && LOGGER.isDebugEnabled()) { - LOGGER.debug("Polled event {} from retry queue.", polledEvent); - } - } - } - - // Trigger cron heartbeat event in retry connector to send batch in time - retryConnector.transfer(PipeConnectorSubtask.CRON_HEARTBEAT_EVENT); - } - - /** Try its best to commit data in order. Flush can also be a trigger to transfer batched data. */ - private void transferBatchedEventsIfNecessary() throws IOException, WriteProcessException { - if (!isTabletBatchModeEnabled || tabletBatchBuilder.isEmpty()) { - return; - } - - for (final Pair endPointAndBatch : - tabletBatchBuilder.getAllNonEmptyBatches()) { - transferInBatchWithoutCheck(endPointAndBatch); - } - } - - /** - * Add failure {@link Event} to retry queue. - * - * @param event {@link Event} to retry - */ - @SuppressWarnings("java:S899") - public void addFailureEventToRetryQueue(final Event event) { - if (event instanceof EnrichedEvent && ((EnrichedEvent) event).isReleased()) { - return; - } - - if (isClosed.get()) { - if (event instanceof EnrichedEvent) { - ((EnrichedEvent) event).clearReferenceCount(IoTDBDataRegionAsyncConnector.class.getName()); - } - return; - } - - retryEventQueue.offer(event); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Added event {} to retry queue.", event); - } - - if (isClosed.get()) { - if (event instanceof EnrichedEvent) { - ((EnrichedEvent) event).clearReferenceCount(IoTDBDataRegionAsyncConnector.class.getName()); - } - } - } - - /** - * Add failure {@link EnrichedEvent}s to retry queue. - * - * @param events {@link EnrichedEvent}s to retry - */ - public void addFailureEventsToRetryQueue(final Iterable events) { - events.forEach(this::addFailureEventToRetryQueue); - } - - public synchronized void clearRetryEventsReferenceCount() { - while (!retryEventQueue.isEmpty()) { - final Event event = retryEventQueue.poll(); - if (event instanceof EnrichedEvent) { - ((EnrichedEvent) event).clearReferenceCount(IoTDBDataRegionAsyncConnector.class.getName()); - } - } - } - - //////////////////////////// Operations for close //////////////////////////// - - /** - * When a pipe is dropped, the connector maybe reused and will not be closed. So we just discard - * its queued events in the output pipe connector. - */ - public synchronized void discardEventsOfPipe(final String pipeNameToDrop) { - retryEventQueue.removeIf( - event -> { - if (event instanceof EnrichedEvent - && pipeNameToDrop.equals(((EnrichedEvent) event).getPipeName())) { - ((EnrichedEvent) event) - .clearReferenceCount(IoTDBDataRegionAsyncConnector.class.getName()); - return true; - } - return false; - }); - } - - @Override - // synchronized to avoid close connector when transfer event - public synchronized void close() { - isClosed.set(true); - - retryConnector.close(); - clearRetryEventsReferenceCount(); - - if (tabletBatchBuilder != null) { - tabletBatchBuilder.close(); - } - - super.close(); - } - - //////////////////////////// APIs provided for metric framework //////////////////////////// - - public int getRetryEventQueueSize() { - return retryEventQueue.size(); - } - - // For performance, this will not acquire lock and does not guarantee the correct - // result. However, this shall not cause any exceptions when concurrently read & written. - public int getRetryEventCount(final String pipeName) { - final AtomicInteger count = new AtomicInteger(0); - try { - retryEventQueue.forEach( - event -> { - if (event instanceof EnrichedEvent - && pipeName.equals(((EnrichedEvent) event).getPipeName())) { - count.incrementAndGet(); - } - }); - return count.get(); - } catch (final Exception e) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Failed to get retry event count for pipe {}.", pipeName, e); - } - return count.get(); - } - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/util/PipeTabletEventSorter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/util/PipeTabletEventSorter.java deleted file mode 100644 index 2a5e8769b59c1..0000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/util/PipeTabletEventSorter.java +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.pipe.connector.util; - -import org.apache.tsfile.enums.TSDataType; -import org.apache.tsfile.utils.Binary; -import org.apache.tsfile.utils.BitMap; -import org.apache.tsfile.write.UnSupportedDataTypeException; -import org.apache.tsfile.write.record.Tablet; -import org.apache.tsfile.write.schema.IMeasurementSchema; - -import java.time.LocalDate; -import java.util.Arrays; -import java.util.Comparator; - -public class PipeTabletEventSorter { - - private final Tablet tablet; - - private boolean isSorted = true; - private boolean isDeduplicated = true; - - private Integer[] index; - private int deduplicatedSize; - - public PipeTabletEventSorter(final Tablet tablet) { - this.tablet = tablet; - deduplicatedSize = tablet == null ? 0 : tablet.rowSize; - } - - public void deduplicateAndSortTimestampsIfNecessary() { - if (tablet == null || tablet.rowSize == 0) { - return; - } - - for (int i = 1, size = tablet.rowSize; i < size; ++i) { - final long currentTimestamp = tablet.timestamps[i]; - final long previousTimestamp = tablet.timestamps[i - 1]; - - if (currentTimestamp < previousTimestamp) { - isSorted = false; - } - if (currentTimestamp == previousTimestamp) { - isDeduplicated = false; - } - - if (!isSorted && !isDeduplicated) { - break; - } - } - - if (isSorted && isDeduplicated) { - return; - } - - index = new Integer[tablet.rowSize]; - for (int i = 0, size = tablet.rowSize; i < size; i++) { - index[i] = i; - } - - if (!isSorted) { - sortTimestamps(); - - // Do deduplicate anyway. - // isDeduplicated may be false positive when isSorted is false. - deduplicateTimestamps(); - isDeduplicated = true; - } - - if (!isDeduplicated) { - deduplicateTimestamps(); - } - - sortAndDeduplicateValuesAndBitMaps(); - } - - private void sortTimestamps() { - Arrays.sort(index, Comparator.comparingLong(i -> tablet.timestamps[i])); - Arrays.sort(tablet.timestamps, 0, tablet.rowSize); - } - - private void deduplicateTimestamps() { - deduplicatedSize = 1; - for (int i = 1, size = tablet.rowSize; i < size; i++) { - if (tablet.timestamps[i] != tablet.timestamps[i - 1]) { - index[deduplicatedSize] = index[i]; - tablet.timestamps[deduplicatedSize] = tablet.timestamps[i]; - - ++deduplicatedSize; - } - } - tablet.rowSize = deduplicatedSize; - } - - private void sortAndDeduplicateValuesAndBitMaps() { - int columnIndex = 0; - for (int i = 0, size = tablet.getSchemas().size(); i < size; i++) { - final IMeasurementSchema schema = tablet.getSchemas().get(i); - if (schema != null) { - tablet.values[columnIndex] = - reorderValueList(deduplicatedSize, tablet.values[columnIndex], schema.getType(), index); - if (tablet.bitMaps != null && tablet.bitMaps[columnIndex] != null) { - tablet.bitMaps[columnIndex] = - reorderBitMap(deduplicatedSize, tablet.bitMaps[columnIndex], index); - } - columnIndex++; - } - } - } - - private static Object reorderValueList( - int deduplicatedSize, - final Object valueList, - final TSDataType dataType, - final Integer[] index) { - switch (dataType) { - case BOOLEAN: - final boolean[] boolValues = (boolean[]) valueList; - final boolean[] deduplicatedBoolValues = new boolean[boolValues.length]; - for (int i = 0; i < deduplicatedSize; i++) { - deduplicatedBoolValues[i] = boolValues[index[i]]; - } - return deduplicatedBoolValues; - case INT32: - final int[] intValues = (int[]) valueList; - final int[] deduplicatedIntValues = new int[intValues.length]; - for (int i = 0; i < deduplicatedSize; i++) { - deduplicatedIntValues[i] = intValues[index[i]]; - } - return deduplicatedIntValues; - case DATE: - final LocalDate[] dateValues = (LocalDate[]) valueList; - final LocalDate[] deduplicatedDateValues = new LocalDate[dateValues.length]; - for (int i = 0; i < deduplicatedSize; i++) { - deduplicatedDateValues[i] = dateValues[index[i]]; - } - return deduplicatedDateValues; - case INT64: - case TIMESTAMP: - final long[] longValues = (long[]) valueList; - final long[] deduplicatedLongValues = new long[longValues.length]; - for (int i = 0; i < deduplicatedSize; i++) { - deduplicatedLongValues[i] = longValues[index[i]]; - } - return deduplicatedLongValues; - case FLOAT: - final float[] floatValues = (float[]) valueList; - final float[] deduplicatedFloatValues = new float[floatValues.length]; - for (int i = 0; i < deduplicatedSize; i++) { - deduplicatedFloatValues[i] = floatValues[index[i]]; - } - return deduplicatedFloatValues; - case DOUBLE: - final double[] doubleValues = (double[]) valueList; - final double[] deduplicatedDoubleValues = new double[doubleValues.length]; - for (int i = 0; i < deduplicatedSize; i++) { - deduplicatedDoubleValues[i] = doubleValues[index[i]]; - } - return deduplicatedDoubleValues; - case TEXT: - case BLOB: - case STRING: - final Binary[] binaryValues = (Binary[]) valueList; - final Binary[] deduplicatedBinaryValues = new Binary[binaryValues.length]; - for (int i = 0; i < deduplicatedSize; i++) { - deduplicatedBinaryValues[i] = binaryValues[index[i]]; - } - return deduplicatedBinaryValues; - default: - throw new UnSupportedDataTypeException( - String.format("Data type %s is not supported.", dataType)); - } - } - - private static BitMap reorderBitMap( - int deduplicatedSize, final BitMap bitMap, final Integer[] index) { - final BitMap deduplicatedBitMap = new BitMap(bitMap.getSize()); - for (int i = 0; i < deduplicatedSize; i++) { - if (bitMap.isMarked(index[i])) { - deduplicatedBitMap.mark(i); - } - } - return deduplicatedBitMap; - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/ConsensusPipeDataNodeDispatcher.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/ConsensusPipeDataNodeDispatcher.java index 9a95d8337886b..8a640ec701c9e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/ConsensusPipeDataNodeDispatcher.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/ConsensusPipeDataNodeDispatcher.java @@ -37,6 +37,9 @@ import java.util.Map; +import static org.apache.iotdb.commons.pipe.config.constant.PipeRPCMessageConstant.PIPE_ALREADY_EXIST_MSG; +import static org.apache.iotdb.commons.pipe.config.constant.PipeRPCMessageConstant.PIPE_NOT_EXIST_MSG; + public class ConsensusPipeDataNodeDispatcher implements ConsensusPipeDispatcher { private static final Logger LOGGER = LoggerFactory.getLogger(ConsensusPipeDataNodeDispatcher.class); @@ -62,6 +65,10 @@ public void createPipe( TSStatus status = configNodeClient.createPipe(req); if (TSStatusCode.SUCCESS_STATUS.getStatusCode() != status.getCode()) { LOGGER.warn("Failed to create consensus pipe-{}, status: {}", pipeName, status); + // ignore idempotence logic + if (status.getMessage().contains(PIPE_ALREADY_EXIST_MSG)) { + return; + } throw new PipeException(status.getMessage()); } } catch (Exception e) { @@ -109,6 +116,10 @@ public void dropPipe(ConsensusPipeName pipeName) throws Exception { final TSStatus status = configNodeClient.dropPipe(pipeName.toString()); if (TSStatusCode.SUCCESS_STATUS.getStatusCode() != status.getCode()) { LOGGER.warn("Failed to drop consensus pipe-{}, status: {}", pipeName, status); + // ignore idempotence logic + if (status.getMessage().contains(PIPE_NOT_EXIST_MSG)) { + return; + } throw new PipeException(status.getMessage()); } } catch (Exception e) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/PipeConsensusConnectorMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/PipeConsensusSinkMetrics.java similarity index 95% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/PipeConsensusConnectorMetrics.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/PipeConsensusSinkMetrics.java index 23ab4d410c499..4a26fdd34da4a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/PipeConsensusConnectorMetrics.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/PipeConsensusSinkMetrics.java @@ -21,7 +21,7 @@ import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.PipeConsensusAsyncConnector; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.PipeConsensusAsyncSink; import org.apache.iotdb.metrics.AbstractMetricService; import org.apache.iotdb.metrics.impl.DoNothingMetricManager; import org.apache.iotdb.metrics.metricsets.IMetricSet; @@ -30,8 +30,8 @@ import org.apache.iotdb.metrics.utils.MetricLevel; import org.apache.iotdb.metrics.utils.MetricType; -public class PipeConsensusConnectorMetrics implements IMetricSet { - private final PipeConsensusAsyncConnector pipeConsensusAsyncConnector; +public class PipeConsensusSinkMetrics implements IMetricSet { + private final PipeConsensusAsyncSink pipeConsensusAsyncConnector; private Timer connectorEnqueueTimer = DoNothingMetricManager.DO_NOTHING_TIMER; private Timer connectorWALTransferTimer = DoNothingMetricManager.DO_NOTHING_TIMER; @@ -43,7 +43,7 @@ public class PipeConsensusConnectorMetrics implements IMetricSet { private static final String CONNECTOR = "pipeConsensusAsyncConnector"; - public PipeConsensusConnectorMetrics(PipeConsensusAsyncConnector pipeConsensusAsyncConnector) { + public PipeConsensusSinkMetrics(PipeConsensusAsyncSink pipeConsensusAsyncConnector) { this.pipeConsensusAsyncConnector = pipeConsensusAsyncConnector; } @@ -107,7 +107,7 @@ private void bindAutoGauge(AbstractMetricService metricService) { Metric.PIPE_SEND_EVENT.toString(), MetricLevel.IMPORTANT, pipeConsensusAsyncConnector, - PipeConsensusAsyncConnector::getTransferBufferSize, + PipeConsensusAsyncSink::getTransferBufferSize, Tag.NAME.toString(), CONNECTOR, Tag.REGION.toString(), @@ -118,7 +118,7 @@ private void bindAutoGauge(AbstractMetricService metricService) { Metric.PIPE_SEND_EVENT.toString(), MetricLevel.IMPORTANT, pipeConsensusAsyncConnector, - PipeConsensusAsyncConnector::getRetryBufferSize, + PipeConsensusAsyncSink::getRetryBufferSize, Tag.NAME.toString(), CONNECTOR, Tag.REGION.toString(), diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/PipeConsensusSubtaskExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/PipeConsensusSubtaskExecutor.java index 175a95992cfa9..8b91c8d408996 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/PipeConsensusSubtaskExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/PipeConsensusSubtaskExecutor.java @@ -20,13 +20,17 @@ import org.apache.iotdb.commons.concurrent.ThreadName; import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.db.pipe.execution.PipeConnectorSubtaskExecutor; +import org.apache.iotdb.db.pipe.agent.task.execution.PipeSinkSubtaskExecutor; -public class PipeConsensusSubtaskExecutor extends PipeConnectorSubtaskExecutor { +import java.util.concurrent.atomic.AtomicInteger; + +public class PipeConsensusSubtaskExecutor extends PipeSinkSubtaskExecutor { + + private static final AtomicInteger id = new AtomicInteger(0); public PipeConsensusSubtaskExecutor() { super( PipeConfig.getInstance().getPipeSubtaskExecutorMaxThreadNum(), - ThreadName.PIPE_CONSENSUS_EXECUTOR_POOL); + ThreadName.PIPE_CONSENSUS_EXECUTOR_POOL + "-" + id.getAndIncrement()); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/ReferenceTrackableEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/ReferenceTrackableEvent.java new file mode 100644 index 0000000000000..b3d64d68e9c5d --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/ReferenceTrackableEvent.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.event; + +import org.apache.iotdb.commons.pipe.resource.ref.PipePhantomReferenceManager.PipeEventResource; + +public interface ReferenceTrackableEvent { + + PipeEventResource eventResourceBuilder(); +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/UserDefinedEnrichedEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/UserDefinedEnrichedEvent.java index 82829a4542bec..6a9b312e61bdf 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/UserDefinedEnrichedEvent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/UserDefinedEnrichedEvent.java @@ -20,9 +20,9 @@ package org.apache.iotdb.db.pipe.event; import org.apache.iotdb.commons.consensus.index.ProgressIndex; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.commons.pipe.pattern.PipePattern; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; import org.apache.iotdb.pipe.api.event.Event; import org.apache.iotdb.pipe.api.event.UserDefinedEvent; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/heartbeat/PipeHeartbeatEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/heartbeat/PipeHeartbeatEvent.java index d3830b6dcf020..ad29b2854424a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/heartbeat/PipeHeartbeatEvent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/heartbeat/PipeHeartbeatEvent.java @@ -21,11 +21,12 @@ import org.apache.iotdb.commons.consensus.index.ProgressIndex; import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex; +import org.apache.iotdb.commons.pipe.agent.task.connection.UnboundedBlockingPendingQueue; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.commons.pipe.pattern.PipePattern; -import org.apache.iotdb.commons.pipe.task.connection.UnboundedBlockingPendingQueue; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; -import org.apache.iotdb.db.pipe.metric.PipeHeartbeatEventMetrics; +import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics; +import org.apache.iotdb.db.pipe.metric.overview.PipeHeartbeatEventMetrics; import org.apache.iotdb.db.utils.DateTimeUtils; import org.apache.iotdb.pipe.api.event.Event; @@ -33,6 +34,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Objects; + public class PipeHeartbeatEvent extends EnrichedEvent { private static final Logger LOGGER = LoggerFactory.getLogger(PipeHeartbeatEvent.class); @@ -79,6 +82,10 @@ public PipeHeartbeatEvent( @Override public boolean internallyIncreaseResourceReferenceCount(final String holderMessage) { + if (Objects.nonNull(pipeName)) { + PipeDataNodeSinglePipeMetrics.getInstance() + .increaseHeartbeatEventCount(pipeName, creationTime); + } return true; } @@ -86,8 +93,12 @@ public boolean internallyIncreaseResourceReferenceCount(final String holderMessa public boolean internallyDecreaseResourceReferenceCount(final String holderMessage) { // PipeName == null indicates that the event is the raw event at disruptor, // not the event copied and passed to the extractor - if (shouldPrintMessage && pipeName != null && LOGGER.isDebugEnabled()) { - LOGGER.debug(this.toString()); + if (Objects.nonNull(pipeName)) { + PipeDataNodeSinglePipeMetrics.getInstance() + .decreaseHeartbeatEventCount(pipeName, creationTime); + if (shouldPrintMessage && LOGGER.isDebugEnabled()) { + LOGGER.debug(this.toString()); + } } return true; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/row/PipeRow.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/row/PipeRow.java index 6200f66536ee3..33bf0a5925c39 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/row/PipeRow.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/row/PipeRow.java @@ -190,6 +190,21 @@ public boolean isAligned() { return isAligned; } + public int getCurrentRowSize() { + int rowSize = 0; + rowSize += 8; // timestamp + for (int i = 0; i < valueColumnTypes.length; i++) { + if (valueColumnTypes[i] != null) { + if (valueColumnTypes[i].isBinary()) { + rowSize += getBinary(i) != null ? getBinary(i).getLength() : 0; + } else { + rowSize += valueColumnTypes[i].getDataTypeSize(); + } + } + } + return rowSize; + } + public MeasurementSchema[] getMeasurementSchemaList() { return measurementSchemaList; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/row/PipeRowCollector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/row/PipeRowCollector.java index cb2183b200056..4cb7d972dac5f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/row/PipeRowCollector.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/row/PipeRowCollector.java @@ -19,15 +19,16 @@ package org.apache.iotdb.db.pipe.event.common.row; -import org.apache.iotdb.commons.pipe.config.PipeConfig; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent; +import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryWeightUtil; import org.apache.iotdb.pipe.api.access.Row; import org.apache.iotdb.pipe.api.collector.RowCollector; import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; import org.apache.iotdb.pipe.api.exception.PipeException; +import org.apache.tsfile.utils.Pair; import org.apache.tsfile.write.record.Tablet; import org.apache.tsfile.write.schema.MeasurementSchema; @@ -66,13 +67,12 @@ public void collectRow(Row row) { final String deviceId = pipeRow.getDeviceId(); final List measurementSchemaList = new ArrayList<>(Arrays.asList(measurementSchemaArray)); - tablet = - new Tablet( - deviceId, - measurementSchemaList, - PipeConfig.getInstance().getPipeDataStructureTabletRowSize()); - isAligned = pipeRow.isAligned(); + // Calculate row count and memory size of the tablet based on the first row + Pair rowCountAndMemorySize = + PipeMemoryWeightUtil.calculateTabletRowCountAndMemory(pipeRow); + tablet = new Tablet(deviceId, measurementSchemaList, rowCountAndMemorySize.getLeft()); tablet.initBitMaps(); + isAligned = pipeRow.isAligned(); } final int rowIndex = tablet.rowSize; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/schema/PipeSchemaRegionSnapshotEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/schema/PipeSchemaRegionSnapshotEvent.java index 39bd51d5f53ac..e9181bb2060d9 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/schema/PipeSchemaRegionSnapshotEvent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/schema/PipeSchemaRegionSnapshotEvent.java @@ -19,10 +19,13 @@ package org.apache.iotdb.db.pipe.event.common.schema; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; import org.apache.iotdb.commons.pipe.event.PipeSnapshotEvent; -import org.apache.iotdb.commons.pipe.pattern.PipePattern; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.resource.ref.PipePhantomReferenceManager.PipeEventResource; +import org.apache.iotdb.commons.pipe.resource.snapshot.PipeSnapshotResourceManager; +import org.apache.iotdb.db.pipe.event.ReferenceTrackableEvent; import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeType; import org.apache.iotdb.db.queryengine.plan.statement.StatementType; @@ -39,9 +42,12 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; -public class PipeSchemaRegionSnapshotEvent extends PipeSnapshotEvent { +public class PipeSchemaRegionSnapshotEvent extends PipeSnapshotEvent + implements ReferenceTrackableEvent { private static final Logger LOGGER = LoggerFactory.getLogger(PipeSchemaRegionSnapshotEvent.class); private String mTreeSnapshotPath; private String tagLogSnapshotPath; @@ -229,4 +235,56 @@ public String coreReportMessage() { + " - " + super.coreReportMessage(); } + + /////////////////////////// ReferenceTrackableEvent /////////////////////////// + + @Override + protected void trackResource() { + PipeDataNodeResourceManager.ref().trackPipeEventResource(this, eventResourceBuilder()); + } + + @Override + public PipeEventResource eventResourceBuilder() { + return new PipeSchemaRegionSnapshotEventResource( + this.isReleased, + this.referenceCount, + this.resourceManager, + this.mTreeSnapshotPath, + this.tagLogSnapshotPath); + } + + private static class PipeSchemaRegionSnapshotEventResource extends PipeEventResource { + + private final PipeSnapshotResourceManager resourceManager; + private final String mTreeSnapshotPath; + private final String tagLogSnapshotPath; + + private PipeSchemaRegionSnapshotEventResource( + final AtomicBoolean isReleased, + final AtomicInteger referenceCount, + final PipeSnapshotResourceManager resourceManager, + final String mTreeSnapshotPath, + final String tagLogSnapshotPath) { + super(isReleased, referenceCount); + this.resourceManager = resourceManager; + this.mTreeSnapshotPath = mTreeSnapshotPath; + this.tagLogSnapshotPath = tagLogSnapshotPath; + } + + @Override + protected void finalizeResource() { + try { + resourceManager.decreaseSnapshotReference(mTreeSnapshotPath); + if (!tagLogSnapshotPath.isEmpty()) { + resourceManager.decreaseSnapshotReference(tagLogSnapshotPath); + } + } catch (final Exception e) { + LOGGER.warn( + "Decrease reference count for mTree snapshot {} or tLog {} error.", + mTreeSnapshotPath, + tagLogSnapshotPath, + e); + } + } + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/schema/PipeSchemaRegionWritePlanEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/schema/PipeSchemaRegionWritePlanEvent.java index d6eefe086e55c..435b38d61c658 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/schema/PipeSchemaRegionWritePlanEvent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/schema/PipeSchemaRegionWritePlanEvent.java @@ -19,10 +19,10 @@ package org.apache.iotdb.db.pipe.event.common.schema; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; import org.apache.iotdb.commons.pipe.event.PipeWritePlanEvent; -import org.apache.iotdb.commons.pipe.pattern.PipePattern; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeType; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeInsertNodeTabletInsertionEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeInsertNodeTabletInsertionEvent.java index de3e39ed41276..772d811292b6d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeInsertNodeTabletInsertionEvent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeInsertNodeTabletInsertionEvent.java @@ -22,21 +22,29 @@ import org.apache.iotdb.commons.consensus.index.ProgressIndex; import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex; import org.apache.iotdb.commons.path.PartialPath; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.commons.pipe.pattern.PipePattern; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.resource.ref.PipePhantomReferenceManager.PipeEventResource; +import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; +import org.apache.iotdb.db.pipe.event.ReferenceTrackableEvent; +import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics; import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; +import org.apache.iotdb.db.pipe.resource.memory.InsertNodeMemoryEstimator; +import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryWeightUtil; +import org.apache.iotdb.db.pipe.resource.memory.PipeTabletMemoryBlock; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowsNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertTabletNode; import org.apache.iotdb.db.storageengine.dataregion.wal.exception.WALPipeException; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler; import org.apache.iotdb.pipe.api.access.Row; import org.apache.iotdb.pipe.api.collector.RowCollector; import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; import org.apache.iotdb.pipe.api.exception.PipeException; +import org.apache.tsfile.utils.Accountable; +import org.apache.tsfile.utils.RamUsageEstimator; import org.apache.tsfile.write.UnSupportedDataTypeException; import org.apache.tsfile.write.record.Tablet; import org.slf4j.Logger; @@ -47,51 +55,39 @@ import java.util.Collection; import java.util.List; import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.stream.Collectors; public class PipeInsertNodeTabletInsertionEvent extends EnrichedEvent - implements TabletInsertionEvent { + implements TabletInsertionEvent, ReferenceTrackableEvent, Accountable, AutoCloseable { private static final Logger LOGGER = LoggerFactory.getLogger(PipeInsertNodeTabletInsertionEvent.class); + private static final long INSTANCE_SIZE = + RamUsageEstimator.shallowSizeOfInstance(PipeInsertNodeTabletInsertionEvent.class) + + RamUsageEstimator.shallowSizeOfInstance(AtomicInteger.class) + + RamUsageEstimator.shallowSizeOfInstance(AtomicBoolean.class); - private final WALEntryHandler walEntryHandler; - private final boolean isAligned; - private final boolean isGeneratedByPipe; + private final AtomicReference allocatedMemoryBlock; + private volatile List tablets; private List dataContainers; - private final PartialPath devicePath; + private InsertNode insertNode; private ProgressIndex progressIndex; - public PipeInsertNodeTabletInsertionEvent( - final WALEntryHandler walEntryHandler, - final PartialPath devicePath, - final ProgressIndex progressIndex, - final boolean isAligned, - final boolean isGeneratedByPipe) { - this( - walEntryHandler, - devicePath, - progressIndex, - isAligned, - isGeneratedByPipe, - null, - 0, - null, - null, - Long.MIN_VALUE, - Long.MAX_VALUE); + private long extractTime = 0; + + public PipeInsertNodeTabletInsertionEvent(final InsertNode insertNode) { + this(insertNode, null, 0, null, null, Long.MIN_VALUE, Long.MAX_VALUE); } private PipeInsertNodeTabletInsertionEvent( - final WALEntryHandler walEntryHandler, - final PartialPath devicePath, - final ProgressIndex progressIndex, - final boolean isAligned, - final boolean isGeneratedByPipe, + final InsertNode insertNode, final String pipeName, final long creationTime, final PipeTaskMeta pipeTaskMeta, @@ -99,47 +95,42 @@ private PipeInsertNodeTabletInsertionEvent( final long startTime, final long endTime) { super(pipeName, creationTime, pipeTaskMeta, pattern, startTime, endTime); - this.walEntryHandler = walEntryHandler; // Record device path here so there's no need to get it from InsertNode cache later. - this.devicePath = devicePath; - this.progressIndex = progressIndex; - this.isAligned = isAligned; - this.isGeneratedByPipe = isGeneratedByPipe; + this.progressIndex = insertNode.getProgressIndex(); + this.insertNode = insertNode; + this.allocatedMemoryBlock = new AtomicReference<>(); } - public InsertNode getInsertNode() throws WALPipeException { - return walEntryHandler.getInsertNode(); + public InsertNode getInsertNode() { + return insertNode; } public ByteBuffer getByteBuffer() throws WALPipeException { - return walEntryHandler.getByteBuffer(); - } - - // This method is a pre-determination of whether to use binary transfers. - // If the insert node is null in cache, it means that we need to read the bytebuffer from the wal, - // and when the pattern is default, we can transfer the bytebuffer directly without serializing or - // deserializing - public InsertNode getInsertNodeViaCacheIfPossible() { - return walEntryHandler.getInsertNodeViaCacheIfPossible(); + return insertNode.serializeToByteBuffer(); } public String getDeviceId() { - return Objects.nonNull(devicePath) ? devicePath.getFullPath() : null; + return Objects.nonNull(insertNode.getDevicePath()) + ? insertNode.getDevicePath().getFullPath() + : null; } /////////////////////////// EnrichedEvent /////////////////////////// @Override public boolean internallyIncreaseResourceReferenceCount(final String holderMessage) { + extractTime = System.nanoTime(); try { - PipeDataNodeResourceManager.wal().pin(walEntryHandler); + if (Objects.nonNull(pipeName)) { + PipeDataNodeSinglePipeMetrics.getInstance() + .increaseInsertNodeEventCount(pipeName, creationTime); + PipeDataNodeAgent.task() + .addFloatingMemoryUsageInByte(pipeName, creationTime, ramBytesUsed()); + } return true; } catch (final Exception e) { LOGGER.warn( - String.format( - "Increase reference count for memtable %d error. Holder Message: %s", - walEntryHandler.getMemTableId(), holderMessage), - e); + String.format("Increase reference count error. Holder Message: %s", holderMessage), e); return false; } } @@ -147,20 +138,28 @@ public boolean internallyIncreaseResourceReferenceCount(final String holderMessa @Override public boolean internallyDecreaseResourceReferenceCount(final String holderMessage) { try { - PipeDataNodeResourceManager.wal().unpin(walEntryHandler); - // Release the containers' memory. + // release the containers' memory and close memory block if (dataContainers != null) { dataContainers.clear(); dataContainers = null; } + close(); return true; } catch (final Exception e) { LOGGER.warn( - String.format( - "Decrease reference count for memtable %d error. Holder Message: %s", - walEntryHandler.getMemTableId(), holderMessage), - e); + String.format("Decrease reference count error. Holder Message: %s", holderMessage), e); return false; + } finally { + if (Objects.nonNull(pipeName)) { + PipeDataNodeAgent.task() + .decreaseFloatingMemoryUsageInByte(pipeName, creationTime, ramBytesUsed()); + PipeDataNodeSinglePipeMetrics.getInstance() + .decreaseInsertNodeEventCount( + pipeName, + creationTime, + shouldReportOnCommit ? System.nanoTime() - extractTime : -1); + } + insertNode = null; } } @@ -183,28 +182,18 @@ public PipeInsertNodeTabletInsertionEvent shallowCopySelfAndBindPipeTaskMetaForP final long startTime, final long endTime) { return new PipeInsertNodeTabletInsertionEvent( - walEntryHandler, - devicePath, - progressIndex, - isAligned, - isGeneratedByPipe, - pipeName, - creationTime, - pipeTaskMeta, - pattern, - startTime, - endTime); + insertNode, pipeName, creationTime, pipeTaskMeta, pattern, startTime, endTime); } @Override public boolean isGeneratedByPipe() { - return isGeneratedByPipe; + return insertNode.isGeneratedByPipe(); } @Override public boolean mayEventTimeOverlappedWithTimeRange() { try { - final InsertNode insertNode = getInsertNodeViaCacheIfPossible(); + final InsertNode insertNode = getInsertNode(); if (Objects.isNull(insertNode)) { return true; } @@ -248,7 +237,7 @@ public boolean mayEventTimeOverlappedWithTimeRange() { @Override public boolean mayEventPathsOverlappedWithPattern() { try { - final InsertNode insertNode = getInsertNodeViaCacheIfPossible(); + final InsertNode insertNode = getInsertNode(); if (Objects.isNull(insertNode)) { return true; } @@ -307,10 +296,22 @@ public boolean isAligned(final int i) { return initDataContainers().get(i).isAligned(); } - public List convertToTablets() { - return initDataContainers().stream() - .map(TabletInsertionDataContainer::convertToTablet) - .collect(Collectors.toList()); + public synchronized List convertToTablets() { + if (Objects.isNull(tablets)) { + tablets = + initDataContainers().stream() + .map(TabletInsertionDataContainer::convertToTablet) + .collect(Collectors.toList()); + allocatedMemoryBlock.compareAndSet( + null, + PipeDataNodeResourceManager.memory() + .forceAllocateForTabletWithRetry( + tablets.stream() + .map(PipeMemoryWeightUtil::calculateTabletSizeInBytes) + .reduce(Long::sum) + .orElse(0L))); + } + return tablets; } /////////////////////////// dataContainer /////////////////////////// @@ -389,8 +390,11 @@ public List toRawTabletInsertionEvents() { @Override public String toString() { return String.format( - "PipeInsertNodeTabletInsertionEvent{walEntryHandler=%s, progressIndex=%s, isAligned=%s, isGeneratedByPipe=%s, dataContainers=%s}", - walEntryHandler, progressIndex, isAligned, isGeneratedByPipe, dataContainers) + "PipeInsertNodeTabletInsertionEvent{progressIndex=%s, isAligned=%s, isGeneratedByPipe=%s, dataContainers=%s}", + progressIndex, + Objects.nonNull(insertNode) ? insertNode.isAligned() : null, + Objects.nonNull(insertNode) ? insertNode.isGeneratedByPipe() : null, + dataContainers) + " - " + super.toString(); } @@ -398,9 +402,78 @@ public String toString() { @Override public String coreReportMessage() { return String.format( - "PipeInsertNodeTabletInsertionEvent{walEntryHandler=%s, progressIndex=%s, isAligned=%s, isGeneratedByPipe=%s}", - walEntryHandler, progressIndex, isAligned, isGeneratedByPipe) + "PipeInsertNodeTabletInsertionEvent{progressIndex=%s, isAligned=%s, isGeneratedByPipe=%s}", + progressIndex, + Objects.nonNull(insertNode) ? insertNode.isAligned() : null, + Objects.nonNull(insertNode) ? insertNode.isGeneratedByPipe() : null) + " - " + super.coreReportMessage(); } + + // Notes: + // 1. We only consider insertion event's memory for degrade and restart, because degrade/restart + // may not be of use for releasing other events' memory. + // 2. We do not consider eventParsers because they may not exist and if it is invoked, the event + // will soon be released. + @Override + public long ramBytesUsed() { + return INSTANCE_SIZE + + (Objects.nonNull(insertNode) ? InsertNodeMemoryEstimator.sizeOf(insertNode) : 0) + + (Objects.nonNull(progressIndex) ? progressIndex.ramBytesUsed() : 0); + } + + /////////////////////////// ReferenceTrackableEvent /////////////////////////// + + @Override + protected void trackResource() { + PipeDataNodeResourceManager.ref().trackPipeEventResource(this, eventResourceBuilder()); + } + + @Override + public PipeEventResource eventResourceBuilder() { + return new PipeInsertNodeTabletInsertionEventResource( + this.isReleased, this.referenceCount, this.allocatedMemoryBlock); + } + + private static class PipeInsertNodeTabletInsertionEventResource extends PipeEventResource { + + private final AtomicReference allocatedMemoryBlock; + + private PipeInsertNodeTabletInsertionEventResource( + final AtomicBoolean isReleased, + final AtomicInteger referenceCount, + final AtomicReference allocatedMemoryBlock) { + super(isReleased, referenceCount); + this.allocatedMemoryBlock = allocatedMemoryBlock; + } + + @Override + protected void finalizeResource() { + try { + allocatedMemoryBlock.getAndUpdate( + memoryBlock -> { + if (Objects.nonNull(memoryBlock)) { + memoryBlock.close(); + } + return null; + }); + } catch (final Exception e) { + LOGGER.warn("Decrease reference count error.", e); + } + } + } + + /////////////////////////// AutoCloseable /////////////////////////// + + @Override + public synchronized void close() { + allocatedMemoryBlock.getAndUpdate( + memoryBlock -> { + if (Objects.nonNull(memoryBlock)) { + memoryBlock.close(); + } + return null; + }); + tablets = null; + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeRawTabletInsertionEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeRawTabletInsertionEvent.java index 65da3828931be..5ae27d5eb3c73 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeRawTabletInsertionEvent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeRawTabletInsertionEvent.java @@ -21,23 +21,35 @@ import org.apache.iotdb.commons.consensus.index.ProgressIndex; import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.commons.pipe.pattern.PipePattern; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.resource.ref.PipePhantomReferenceManager.PipeEventResource; import org.apache.iotdb.commons.utils.TestOnly; +import org.apache.iotdb.db.pipe.event.ReferenceTrackableEvent; +import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; +import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics; import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; +import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryWeightUtil; import org.apache.iotdb.db.pipe.resource.memory.PipeTabletMemoryBlock; import org.apache.iotdb.pipe.api.access.Row; import org.apache.iotdb.pipe.api.collector.RowCollector; import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; +import org.apache.tsfile.utils.RamUsageEstimator; import org.apache.tsfile.write.record.Tablet; import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiConsumer; -public class PipeRawTabletInsertionEvent extends EnrichedEvent implements TabletInsertionEvent { +public class PipeRawTabletInsertionEvent extends EnrichedEvent + implements TabletInsertionEvent, ReferenceTrackableEvent, AutoCloseable { + // For better calculation + private static final long INSTANCE_SIZE = + RamUsageEstimator.shallowSizeOfInstance(PipeRawTabletInsertionEvent.class); private Tablet tablet; private String deviceId; // Only used when the tablet is released. private final boolean isAligned; @@ -45,11 +57,11 @@ public class PipeRawTabletInsertionEvent extends EnrichedEvent implements Tablet private final EnrichedEvent sourceEvent; private boolean needToReport; - private PipeTabletMemoryBlock allocatedMemoryBlock; + private final PipeTabletMemoryBlock allocatedMemoryBlock; private TabletInsertionDataContainer dataContainer; - private ProgressIndex overridingProgressIndex; + private volatile ProgressIndex overridingProgressIndex; private PipeRawTabletInsertionEvent( final Tablet tablet, @@ -67,6 +79,17 @@ private PipeRawTabletInsertionEvent( this.isAligned = isAligned; this.sourceEvent = sourceEvent; this.needToReport = needToReport; + + // Allocate empty memory block, will be resized later. + this.allocatedMemoryBlock = + PipeDataNodeResourceManager.memory().forceAllocateForTabletWithRetry(0); + + addOnCommittedHook( + () -> { + if (shouldReportOnCommit) { + eliminateProgressIndex(); + } + }); } public PipeRawTabletInsertionEvent( @@ -109,12 +132,23 @@ public PipeRawTabletInsertionEvent( @Override public boolean internallyIncreaseResourceReferenceCount(final String holderMessage) { - allocatedMemoryBlock = PipeDataNodeResourceManager.memory().forceAllocateWithRetry(tablet); + PipeDataNodeResourceManager.memory() + .forceResize( + allocatedMemoryBlock, + PipeMemoryWeightUtil.calculateTabletSizeInBytes(tablet) + INSTANCE_SIZE); + if (Objects.nonNull(pipeName)) { + PipeDataNodeSinglePipeMetrics.getInstance() + .increaseRawTabletEventCount(pipeName, creationTime); + } return true; } @Override public boolean internallyDecreaseResourceReferenceCount(final String holderMessage) { + if (Objects.nonNull(pipeName)) { + PipeDataNodeSinglePipeMetrics.getInstance() + .decreaseRawTabletEventCount(pipeName, creationTime); + } allocatedMemoryBlock.close(); // Record the deviceId before the memory is released, @@ -127,10 +161,11 @@ public boolean internallyDecreaseResourceReferenceCount(final String holderMessa return true; } - @Override - protected void reportProgress() { + protected void eliminateProgressIndex() { if (needToReport) { - super.reportProgress(); + if (sourceEvent instanceof PipeTsFileInsertionEvent) { + ((PipeTsFileInsertionEvent) sourceEvent).eliminateProgressIndex(); + } } } @@ -201,6 +236,11 @@ public void markAsNeedToReport() { this.needToReport = true; } + // This getter is reserved for user-defined plugins + public boolean isNeedToReport() { + return needToReport; + } + public String getDeviceId() { // NonNull indicates that the internallyDecreaseResourceReferenceCount has not been called. return Objects.nonNull(tablet) ? tablet.deviceId : deviceId; @@ -252,8 +292,8 @@ public Tablet convertToTablet() { } public long count() { - final Tablet covertedTablet = shouldParseTimeOrPattern() ? convertToTablet() : tablet; - return (long) covertedTablet.rowSize * covertedTablet.getSchemas().size(); + final Tablet convertedTablet = shouldParseTimeOrPattern() ? convertToTablet() : tablet; + return (long) convertedTablet.rowSize * convertedTablet.getSchemas().size(); } /////////////////////////// parsePatternOrTime /////////////////////////// @@ -297,4 +337,47 @@ public String coreReportMessage() { + " - " + super.coreReportMessage(); } + + /////////////////////////// ReferenceTrackableEvent /////////////////////////// + + @Override + protected void trackResource() { + PipeDataNodeResourceManager.ref().trackPipeEventResource(this, eventResourceBuilder()); + } + + @Override + public PipeEventResource eventResourceBuilder() { + return new PipeRawTabletInsertionEventResource( + this.isReleased, this.referenceCount, this.allocatedMemoryBlock); + } + + private static class PipeRawTabletInsertionEventResource extends PipeEventResource { + + private final PipeTabletMemoryBlock allocatedMemoryBlock; + + private PipeRawTabletInsertionEventResource( + final AtomicBoolean isReleased, + final AtomicInteger referenceCount, + final PipeTabletMemoryBlock allocatedMemoryBlock) { + super(isReleased, referenceCount); + this.allocatedMemoryBlock = allocatedMemoryBlock; + } + + @Override + protected void finalizeResource() { + allocatedMemoryBlock.close(); + } + } + + /////////////////////////// AutoCloseable /////////////////////////// + + @Override + public void close() { + // The semantic of close is to release the memory occupied by parsing, this method does nothing + // to unify the external close semantic: + // 1. PipeRawTabletInsertionEvent: the tablet occupying memory upon construction, even when + // parsing is involved. + // 2. PipeInsertNodeTabletInsertionEvent: the tablet is only constructed when it's actually + // involved in parsing. + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/TabletInsertionDataContainer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/TabletInsertionDataContainer.java index 454854115544f..aa1ee8e6325fc 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/TabletInsertionDataContainer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/TabletInsertionDataContainer.java @@ -19,9 +19,9 @@ package org.apache.iotdb.db.pipe.event.common.tablet; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.commons.pipe.pattern.PipePattern; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.db.pipe.event.common.row.PipeRow; import org.apache.iotdb.db.pipe.event.common.row.PipeRowCollector; @@ -59,6 +59,8 @@ public class TabletInsertionDataContainer { private static final Logger LOGGER = LoggerFactory.getLogger(TabletInsertionDataContainer.class); + private static final LocalDate EMPTY_LOCALDATE = LocalDate.of(1000, 1, 1); + private final PipeTaskMeta pipeTaskMeta; // used to report progress private final EnrichedEvent sourceEvent; // used to report progress and filter value columns by time range @@ -174,8 +176,12 @@ private void parse(final InsertRowNode insertRowNode, final PipePattern pattern) this.valueColumnTypes[filteredColumnIndex] = originValueColumnTypes[i]; final BitMap bitMap = new BitMap(this.timestampColumn.length); if (Objects.isNull(originValueColumns[i]) || Objects.isNull(originValueColumnTypes[i])) { - this.valueColumns[filteredColumnIndex] = null; - bitMap.markAll(); + fillNullValue( + originValueColumnTypes[i], + this.valueColumns, + bitMap, + filteredColumnIndex, + rowIndexList.size()); } else { this.valueColumns[filteredColumnIndex] = filterValueColumnsByRowIndexList( @@ -257,8 +263,12 @@ private void parse(final InsertTabletNode insertTabletNode, final PipePattern pa this.valueColumnTypes[filteredColumnIndex] = originValueColumnTypes[i]; final BitMap bitMap = new BitMap(this.timestampColumn.length); if (Objects.isNull(originValueColumns[i]) || Objects.isNull(originValueColumnTypes[i])) { - this.valueColumns[filteredColumnIndex] = null; - bitMap.markAll(); + fillNullValue( + originValueColumnTypes[i], + this.valueColumns, + bitMap, + filteredColumnIndex, + rowIndexList.size()); } else { this.valueColumns[filteredColumnIndex] = filterValueColumnsByRowIndexList( @@ -348,8 +358,12 @@ private void parse(final Tablet tablet, final boolean isAligned, final PipePatte this.valueColumnTypes[filteredColumnIndex] = originValueColumnTypes[i]; final BitMap bitMap = new BitMap(this.timestampColumn.length); if (Objects.isNull(originValueColumns[i]) || Objects.isNull(originValueColumnTypes[i])) { - this.valueColumns[filteredColumnIndex] = null; - bitMap.markAll(); + fillNullValue( + originValueColumnTypes[i], + this.valueColumns, + bitMap, + filteredColumnIndex, + rowIndexList.size()); } else { this.valueColumns[filteredColumnIndex] = filterValueColumnsByRowIndexList( @@ -479,7 +493,7 @@ private static Object filterValueColumnsByRowIndexList( for (int i = 0; i < rowIndexList.size(); ++i) { if (originNullValueColumnBitmap.isMarked(rowIndexList.get(i))) { - valueColumns[i] = LocalDate.MIN; + valueColumns[i] = EMPTY_LOCALDATE; nullValueColumnBitmap.mark(i); } else { valueColumns[i] = dateValueColumns[rowIndexList.get(i)]; @@ -493,7 +507,7 @@ private static Object filterValueColumnsByRowIndexList( : (int[]) originValueColumn; for (int i = 0; i < rowIndexList.size(); ++i) { if (originNullValueColumnBitmap.isMarked(rowIndexList.get(i))) { - valueColumns[i] = LocalDate.MIN; + valueColumns[i] = EMPTY_LOCALDATE; nullValueColumnBitmap.mark(i); } else { valueColumns[i] = @@ -599,6 +613,51 @@ private static Object filterValueColumnsByRowIndexList( } } + private void fillNullValue( + final TSDataType type, + final Object[] valueColumns, + final BitMap nullValueColumnBitmap, + final int columnIndex, + final int rowSize) { + nullValueColumnBitmap.markAll(); + if (Objects.isNull(type)) { + return; + } + switch (type) { + case TIMESTAMP: + case INT64: + valueColumns[columnIndex] = new long[rowSize]; + break; + case INT32: + valueColumns[columnIndex] = new int[rowSize]; + break; + case DOUBLE: + valueColumns[columnIndex] = new double[rowSize]; + break; + case FLOAT: + valueColumns[columnIndex] = new float[rowSize]; + break; + case BOOLEAN: + valueColumns[columnIndex] = new boolean[rowSize]; + break; + case DATE: + final LocalDate[] dates = new LocalDate[rowSize]; + Arrays.fill(dates, EMPTY_LOCALDATE); + valueColumns[columnIndex] = dates; + break; + case TEXT: + case BLOB: + case STRING: + final Binary[] columns = new Binary[rowSize]; + Arrays.fill(columns, Binary.EMPTY_VALUE); + valueColumns[columnIndex] = columns; + break; + default: + throw new UnSupportedDataTypeException( + String.format("Data type %s is not supported.", type)); + } + } + //////////////////////////// process //////////////////////////// public List processRowByRow(final BiConsumer consumer) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/terminate/PipeTerminateEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/terminate/PipeTerminateEvent.java index bf152dfbf1744..a8bf19ebf3b17 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/terminate/PipeTerminateEvent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/terminate/PipeTerminateEvent.java @@ -19,14 +19,22 @@ package org.apache.iotdb.db.pipe.event.common.terminate; +import org.apache.iotdb.commons.concurrent.IoTThreadFactory; +import org.apache.iotdb.commons.concurrent.ThreadName; +import org.apache.iotdb.commons.concurrent.threadpool.WrappedThreadPoolExecutor; import org.apache.iotdb.commons.consensus.index.ProgressIndex; import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.commons.pipe.pattern.PipePattern; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; +import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; +import org.apache.iotdb.db.pipe.agent.task.PipeDataNodeTask; import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; -import org.apache.iotdb.db.pipe.task.PipeDataNodeTask; + +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; /** * The {@link PipeTerminateEvent} is an {@link EnrichedEvent} that controls the termination of pipe, @@ -35,15 +43,34 @@ * be discarded. */ public class PipeTerminateEvent extends EnrichedEvent { + private final int dataRegionId; + private final boolean shouldMark; + + // Do not use call run policy to avoid deadlock + private static final ExecutorService terminateExecutor = + new WrappedThreadPoolExecutor( + 0, + IoTDBDescriptor.getInstance().getConfig().getPipeTaskThreadCount(), + 0L, + TimeUnit.SECONDS, + new ArrayBlockingQueue<>( + IoTDBDescriptor.getInstance().getConfig().getPipeTaskThreadCount()), + new IoTThreadFactory(ThreadName.PIPE_TERMINATE_EXECUTION_POOL.getName()), + ThreadName.PIPE_TERMINATE_EXECUTION_POOL.getName()); + public PipeTerminateEvent( final String pipeName, final long creationTime, final PipeTaskMeta pipeTaskMeta, - final int dataRegionId) { + final int dataRegionId, + final boolean shouldMark) { super(pipeName, creationTime, pipeTaskMeta, null, Long.MIN_VALUE, Long.MAX_VALUE); this.dataRegionId = dataRegionId; + this.shouldMark = shouldMark; + + addOnCommittedHook(this::markCompleted); } @Override @@ -71,7 +98,7 @@ public EnrichedEvent shallowCopySelfAndBindPipeTaskMetaForProgressReport( final long endTime) { // Should record PipeTaskMeta, for the terminateEvent shall report progress to // notify the pipeTask it's completed. - return new PipeTerminateEvent(pipeName, creationTime, pipeTaskMeta, dataRegionId); + return new PipeTerminateEvent(pipeName, creationTime, pipeTaskMeta, dataRegionId, shouldMark); } @Override @@ -89,14 +116,18 @@ public boolean mayEventPathsOverlappedWithPattern() { return true; } - @Override - public void reportProgress() { - PipeDataNodeAgent.task().markCompleted(pipeName, dataRegionId); + public void markCompleted() { + // To avoid deadlock + if (shouldMark) { + terminateExecutor.submit( + () -> PipeDataNodeAgent.task().markCompleted(pipeName, dataRegionId)); + } } @Override public String toString() { - return String.format("PipeTerminateEvent{dataRegionId=%s}", dataRegionId) + return String.format( + "PipeTerminateEvent{dataRegionId=%s, shouldMark=%s}", dataRegionId, shouldMark) + " - " + super.toString(); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeCompactedTsFileInsertionEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeCompactedTsFileInsertionEvent.java new file mode 100644 index 0000000000000..25beecfbea6e0 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeCompactedTsFileInsertionEvent.java @@ -0,0 +1,252 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.event.common.tsfile; + +import org.apache.iotdb.commons.consensus.index.ProgressIndex; +import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.agent.task.progress.CommitterKey; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; +import org.apache.iotdb.commons.pipe.event.EnrichedEvent; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.assigner.PipeTsFileEpochProgressIndexKeeper; +import org.apache.iotdb.db.storageengine.dataregion.memtable.TsFileProcessor; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; + +import java.io.File; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +public class PipeCompactedTsFileInsertionEvent extends PipeTsFileInsertionEvent { + + private final String dataRegionId; + private final Set originFilePaths; + private final List commitIds; + + public PipeCompactedTsFileInsertionEvent( + final CommitterKey committerKey, + final Set originalEvents, + final PipeTsFileInsertionEvent anyOfOriginalEvents, + final TsFileResource tsFileResource, + final boolean shouldReportProgress) { + super( + tsFileResource, + null, + bindIsWithMod(originalEvents), + bindIsLoaded(originalEvents), + bindIsGeneratedByHistoricalExtractor(originalEvents), + committerKey.getPipeName(), + committerKey.getCreationTime(), + anyOfOriginalEvents.getPipeTaskMeta(), + anyOfOriginalEvents.getPipePattern(), + anyOfOriginalEvents.getStartTime(), + anyOfOriginalEvents.getEndTime()); + + this.dataRegionId = String.valueOf(committerKey.getRegionId()); + this.originFilePaths = + originalEvents.stream() + .map(PipeTsFileInsertionEvent::getTsFile) + .map(File::getPath) + .collect(Collectors.toSet()); + this.commitIds = + originalEvents.stream() + .map(PipeTsFileInsertionEvent::getCommitId) + .distinct() + .collect(Collectors.toList()); + + // init fields of EnrichedEvent + this.committerKey = committerKey; + isPatternParsed = bindIsPatternParsed(originalEvents); + isTimeParsed = bindIsTimeParsed(originalEvents); + this.shouldReportOnCommit = shouldReportProgress; + + // init fields of PipeTsFileInsertionEvent + flushPointCount = bindFlushPointCount(originalEvents); + overridingProgressIndex = bindOverridingProgressIndex(originalEvents); + } + + private static boolean bindIsWithMod(Set originalEvents) { + return originalEvents.stream().anyMatch(PipeTsFileInsertionEvent::isWithMod); + } + + private static boolean bindIsLoaded(Set originalEvents) { + return originalEvents.stream().anyMatch(PipeTsFileInsertionEvent::isLoaded); + } + + private static boolean bindIsGeneratedByHistoricalExtractor( + Set originalEvents) { + return originalEvents.stream() + .anyMatch(PipeTsFileInsertionEvent::isGeneratedByHistoricalExtractor); + } + + private static boolean bindIsTimeParsed(Set originalEvents) { + return originalEvents.stream().noneMatch(EnrichedEvent::shouldParseTime); + } + + private static boolean bindIsPatternParsed(Set originalEvents) { + return originalEvents.stream().noneMatch(EnrichedEvent::shouldParsePattern); + } + + private static long bindFlushPointCount(Set originalEvents) { + return originalEvents.stream() + .mapToLong( + e -> + e.getFlushPointCount() == TsFileProcessor.FLUSH_POINT_COUNT_NOT_SET + ? 0 + : e.getFlushPointCount()) + .sum(); + } + + private ProgressIndex bindOverridingProgressIndex(Set originalEvents) { + ProgressIndex overridingProgressIndex = MinimumProgressIndex.INSTANCE; + for (PipeTsFileInsertionEvent originalEvent : originalEvents) { + if (originalEvent.overridingProgressIndex != null) { + overridingProgressIndex = + overridingProgressIndex.updateToMinimumEqualOrIsAfterProgressIndex( + originalEvent.overridingProgressIndex); + } + } + return overridingProgressIndex != null + && !overridingProgressIndex.equals(MinimumProgressIndex.INSTANCE) + ? overridingProgressIndex + : null; + } + + @Override + public int getRebootTimes() { + throw new UnsupportedOperationException( + "PipeCompactedTsFileInsertionEvent does not support getRebootTimes."); + } + + @Override + public boolean hasMultipleCommitIds() { + return true; + } + + @Override + public long getCommitId() { + // max of commitIds is used as the commit id for this event + return commitIds.stream() + .max(Long::compareTo) + .orElseThrow( + () -> + new IllegalStateException( + "No commit IDs found in PipeCompactedTsFileInsertionEvent.")); + } + + // return dummy events for each commit ID (except the max one) + @Override + public List getDummyEventsForCommitIds() { + return commitIds.stream() + .filter(commitId -> commitId != getCommitId()) + .map(PipeCompactedTsFileInsertionDummyEvent::new) + .collect(Collectors.toList()); + } + + @Override + public List getCommitIds() { + return commitIds; + } + + @Override + public boolean equalsInPipeConsensus(final Object o) { + throw new UnsupportedOperationException( + "PipeCompactedTsFileInsertionEvent does not support equalsInPipeConsensus."); + } + + @Override + public void eliminateProgressIndex() { + if (Objects.isNull(overridingProgressIndex)) { + for (final String originFilePath : originFilePaths) { + PipeTsFileEpochProgressIndexKeeper.getInstance() + .eliminateProgressIndex(dataRegionId, pipeName, originFilePath); + } + } + } + + public class PipeCompactedTsFileInsertionDummyEvent extends EnrichedEvent { + + private final long commitId; + + public PipeCompactedTsFileInsertionDummyEvent(final long commitId) { + super( + PipeCompactedTsFileInsertionEvent.this.pipeName, + PipeCompactedTsFileInsertionEvent.this.creationTime, + PipeCompactedTsFileInsertionEvent.this.pipeTaskMeta, + null, // PipePattern is not needed for dummy event + Long.MIN_VALUE, + Long.MAX_VALUE); + this.commitId = commitId; // Use the commitId passed in + this.shouldReportOnCommit = false; // Dummy events do not report progress + } + + @Override + public long getCommitId() { + return commitId; + } + + @Override + public boolean internallyIncreaseResourceReferenceCount(String holderMessage) { + return true; + } + + @Override + public boolean internallyDecreaseResourceReferenceCount(String holderMessage) { + return true; + } + + @Override + public ProgressIndex getProgressIndex() { + return MinimumProgressIndex.INSTANCE; + } + + @Override + public EnrichedEvent shallowCopySelfAndBindPipeTaskMetaForProgressReport( + String pipeName, + long creationTime, + PipeTaskMeta pipeTaskMeta, + PipePattern pattern, + long startTime, + long endTime) { + return null; + } + + @Override + public boolean isGeneratedByPipe() { + return false; + } + + @Override + public boolean mayEventTimeOverlappedWithTimeRange() { + return false; + } + + @Override + public boolean mayEventPathsOverlappedWithPattern() { + return false; + } + + @Override + public String coreReportMessage() { + return "PipeCompactedTsFileInsertionDummyEvent"; + } + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeTsFileInsertionEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeTsFileInsertionEvent.java index 9fab25ee38bd9..baa6ae9a3bae7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeTsFileInsertionEvent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeTsFileInsertionEvent.java @@ -21,14 +21,21 @@ import org.apache.iotdb.commons.consensus.index.ProgressIndex; import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex; +import org.apache.iotdb.commons.exception.pipe.PipeRuntimeOutOfMemoryCriticalException; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.config.PipeConfig; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.commons.pipe.pattern.PipePattern; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.resource.ref.PipePhantomReferenceManager.PipeEventResource; +import org.apache.iotdb.db.pipe.event.ReferenceTrackableEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.tsfile.container.TsFileInsertionDataContainer; import org.apache.iotdb.db.pipe.event.common.tsfile.container.TsFileInsertionDataContainerProvider; +import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics; import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; +import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryManager; import org.apache.iotdb.db.pipe.resource.tsfile.PipeTsFileResourceManager; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.assigner.PipeTsFileEpochProgressIndexKeeper; import org.apache.iotdb.db.storageengine.dataregion.memtable.TsFileProcessor; import org.apache.iotdb.db.storageengine.dataregion.modification.ModificationFile; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; @@ -44,59 +51,53 @@ import java.io.File; import java.io.IOException; import java.util.Collections; +import java.util.Iterator; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; -public class PipeTsFileInsertionEvent extends EnrichedEvent implements TsFileInsertionEvent { +public class PipeTsFileInsertionEvent extends EnrichedEvent + implements TsFileInsertionEvent, ReferenceTrackableEvent { private static final Logger LOGGER = LoggerFactory.getLogger(PipeTsFileInsertionEvent.class); - private final TsFileResource resource; - private File tsFile; + protected final TsFileResource resource; + protected File tsFile; + protected long extractTime = 0; // This is true iff the modFile exists and should be transferred - private boolean isWithMod; - private File modFile; + protected boolean isWithMod; + protected File modFile; - private final boolean isLoaded; - private final boolean isGeneratedByPipe; - private final boolean isGeneratedByPipeConsensus; - private final boolean isGeneratedByHistoricalExtractor; + protected final boolean isLoaded; + protected final boolean isGeneratedByPipe; + protected final boolean isGeneratedByPipeConsensus; + protected final boolean isGeneratedByHistoricalExtractor; - private final AtomicBoolean isClosed; - private TsFileInsertionDataContainer dataContainer; + protected final AtomicBoolean isClosed; + protected final AtomicReference dataContainer; // The point count of the TsFile. Used for metrics on PipeConsensus' receiver side. // May be updated after it is flushed. Should be negative if not set. - private long flushPointCount = TsFileProcessor.FLUSH_POINT_COUNT_NOT_SET; + protected long flushPointCount = TsFileProcessor.FLUSH_POINT_COUNT_NOT_SET; - public PipeTsFileInsertionEvent( - final TsFileResource resource, - final boolean isLoaded, - final boolean isGeneratedByPipe, - final boolean isGeneratedByHistoricalExtractor) { + protected volatile ProgressIndex overridingProgressIndex; + + public PipeTsFileInsertionEvent(final TsFileResource resource, final boolean isLoaded) { // The modFile must be copied before the event is assigned to the listening pipes this( - resource, - true, - isLoaded, - isGeneratedByPipe, - isGeneratedByHistoricalExtractor, - null, - 0, - null, - null, - Long.MIN_VALUE, - Long.MAX_VALUE); + resource, null, true, isLoaded, false, null, 0, null, null, Long.MIN_VALUE, Long.MAX_VALUE); } public PipeTsFileInsertionEvent( final TsFileResource resource, + final File tsFile, final boolean isWithMod, final boolean isLoaded, - final boolean isGeneratedByPipe, final boolean isGeneratedByHistoricalExtractor, final String pipeName, final long creationTime, @@ -105,19 +106,25 @@ public PipeTsFileInsertionEvent( final long startTime, final long endTime) { super(pipeName, creationTime, pipeTaskMeta, pattern, startTime, endTime); - this.resource = resource; - tsFile = resource.getTsFile(); + + // For events created at assigner or historical extractor, the tsFile is get from the resource + // For events created for source, the tsFile is inherited from the assigner, because the + // original tsFile may be gone, and we need to get the assigner's hard-linked tsFile to + // hard-link it to each pipe dir + this.tsFile = Objects.isNull(tsFile) ? resource.getTsFile() : tsFile; final ModificationFile modFile = resource.getModFile(); this.isWithMod = isWithMod && modFile.exists(); this.modFile = this.isWithMod ? new File(modFile.getFilePath()) : null; this.isLoaded = isLoaded; - this.isGeneratedByPipe = isGeneratedByPipe; + this.isGeneratedByPipe = resource.isGeneratedByPipe(); this.isGeneratedByPipeConsensus = resource.isGeneratedByPipeConsensus(); this.isGeneratedByHistoricalExtractor = isGeneratedByHistoricalExtractor; + this.dataContainer = new AtomicReference<>(null); + isClosed = new AtomicBoolean(resource.isClosed()); // Register close listener if TsFile is not closed if (!isClosed.get()) { @@ -153,6 +160,13 @@ public PipeTsFileInsertionEvent( // If the status is "closed", then the resource status is "closed", the tsFile won't be altered // and can be sent. isClosed.set(resource.isClosed()); + + addOnCommittedHook( + () -> { + if (shouldReportOnCommit) { + eliminateProgressIndex(); + } + }); } /** @@ -160,6 +174,10 @@ public PipeTsFileInsertionEvent( * otherwise. */ public boolean waitForTsFileClose() throws InterruptedException { + if (Objects.isNull(resource)) { + return true; + } + if (!isClosed.get()) { isClosed.set(resource.isClosed()); @@ -190,6 +208,7 @@ public boolean waitForTsFileClose() throws InterruptedException { return !resource.isEmpty(); } + @Override public File getTsFile() { return tsFile; } @@ -212,10 +231,6 @@ public boolean isLoaded() { return isLoaded; } - public long getFileStartTime() { - return resource.getFileStartTime(); - } - /** * Only used for metrics on PipeConsensus' receiver side. If the event is recovered after data * node's restart, the flushPointCount can be not set. It's totally fine for the PipeConsensus' @@ -228,14 +243,20 @@ public long getFlushPointCount() { return flushPointCount; } + public long getTimePartitionId() { + return resource.getTimePartition(); + } + /////////////////////////// EnrichedEvent /////////////////////////// @Override public boolean internallyIncreaseResourceReferenceCount(final String holderMessage) { + extractTime = System.nanoTime(); try { - tsFile = PipeDataNodeResourceManager.tsfile().increaseFileReference(tsFile, true, resource); + tsFile = PipeDataNodeResourceManager.tsfile().increaseFileReference(tsFile, true, pipeName); if (isWithMod) { - modFile = PipeDataNodeResourceManager.tsfile().increaseFileReference(modFile, false, null); + modFile = + PipeDataNodeResourceManager.tsfile().increaseFileReference(modFile, false, pipeName); } return true; } catch (final Exception e) { @@ -245,16 +266,22 @@ public boolean internallyIncreaseResourceReferenceCount(final String holderMessa tsFile, modFile, holderMessage), e); return false; + } finally { + if (Objects.nonNull(pipeName)) { + PipeDataNodeSinglePipeMetrics.getInstance() + .increaseTsFileEventCount(pipeName, creationTime); + } } } @Override public boolean internallyDecreaseResourceReferenceCount(final String holderMessage) { try { - PipeDataNodeResourceManager.tsfile().decreaseFileReference(tsFile); + PipeDataNodeResourceManager.tsfile().decreaseFileReference(tsFile, pipeName); if (isWithMod) { - PipeDataNodeResourceManager.tsfile().decreaseFileReference(modFile); + PipeDataNodeResourceManager.tsfile().decreaseFileReference(modFile, pipeName); } + close(); return true; } catch (final Exception e) { LOGGER.warn( @@ -263,26 +290,48 @@ public boolean internallyDecreaseResourceReferenceCount(final String holderMessa tsFile.getPath(), holderMessage), e); return false; + } finally { + if (Objects.nonNull(pipeName)) { + PipeDataNodeSinglePipeMetrics.getInstance() + .decreaseTsFileEventCount( + pipeName, + creationTime, + shouldReportOnCommit ? System.nanoTime() - extractTime : -1); + } } } + @Override + public void bindProgressIndex(final ProgressIndex overridingProgressIndex) { + this.overridingProgressIndex = overridingProgressIndex; + } + @Override public ProgressIndex getProgressIndex() { - try { - if (!waitForTsFileClose()) { - LOGGER.warn( - "Skipping temporary TsFile {}'s progressIndex, will report MinimumProgressIndex", - tsFile); - return MinimumProgressIndex.INSTANCE; - } - return resource.getMaxProgressIndexAfterClose(); - } catch (final InterruptedException e) { + return resource.getMaxProgressIndex(); + } + + /** + * Get ProgressIndex without waiting for tsfile close. Can be used in getting progressIndex when + * memTable becomes immutable. + */ + public ProgressIndex forceGetProgressIndex() { + if (resource.isEmpty()) { LOGGER.warn( - String.format( - "Interrupted when waiting for closing TsFile %s.", resource.getTsFilePath())); - Thread.currentThread().interrupt(); + "Skipping temporary TsFile {}'s progressIndex, will report MinimumProgressIndex", tsFile); return MinimumProgressIndex.INSTANCE; } + if (Objects.nonNull(overridingProgressIndex)) { + return overridingProgressIndex; + } + return resource.getMaxProgressIndex(); + } + + public void eliminateProgressIndex() { + if (Objects.isNull(overridingProgressIndex) && Objects.nonNull(resource)) { + PipeTsFileEpochProgressIndexKeeper.getInstance() + .eliminateProgressIndex(resource.getDataRegionId(), pipeName, resource.getTsFilePath()); + } } @Override @@ -295,9 +344,9 @@ public PipeTsFileInsertionEvent shallowCopySelfAndBindPipeTaskMetaForProgressRep final long endTime) { return new PipeTsFileInsertionEvent( resource, + tsFile, isWithMod, isLoaded, - isGeneratedByPipe, isGeneratedByHistoricalExtractor, pipeName, creationTime, @@ -314,16 +363,15 @@ public boolean isGeneratedByPipe() { @Override public boolean mayEventTimeOverlappedWithTimeRange() { - // If the tsFile is not closed the resource.getFileEndTime() will be Long.MIN_VALUE - // In that case we only judge the resource.getFileStartTime() to avoid losing data - return isClosed.get() - ? startTime <= resource.getFileEndTime() && resource.getFileStartTime() <= endTime - : resource.getFileStartTime() <= endTime; + // Notice that this is only called at realtime extraction, and the tsFile is always closed + // Thus we can use the end time to judge the overlap + return Objects.isNull(resource) + || startTime <= resource.getFileEndTime() && resource.getFileStartTime() <= endTime; } @Override public boolean mayEventPathsOverlappedWithPattern() { - if (!resource.isClosed()) { + if (Objects.isNull(resource) || !resource.isClosed()) { return true; } @@ -331,7 +379,8 @@ public boolean mayEventPathsOverlappedWithPattern() { final Map deviceIsAlignedMap = PipeDataNodeResourceManager.tsfile() .getDeviceIsAlignedMapFromCache( - PipeTsFileResourceManager.getHardlinkOrCopiedFileInPipeDir(resource.getTsFile()), + PipeTsFileResourceManager.getHardlinkOrCopiedFileInPipeDir( + resource.getTsFile(), pipeName), false); final Set deviceSet = Objects.nonNull(deviceIsAlignedMap) ? deviceIsAlignedMap.keySet() : resource.getDevices(); @@ -340,7 +389,7 @@ public boolean mayEventPathsOverlappedWithPattern() { // TODO: use IDeviceID deviceID -> pipePattern.mayOverlapWithDevice(((PlainDeviceID) deviceID).toStringID())); - } catch (final IOException e) { + } catch (final Exception e) { LOGGER.warn( "Pipe {}: failed to get devices from TsFile {}, extract it anyway", pipeName, @@ -352,27 +401,130 @@ public boolean mayEventPathsOverlappedWithPattern() { /////////////////////////// TsFileInsertionEvent /////////////////////////// + @FunctionalInterface + public interface TabletInsertionEventConsumer { + void consume(final PipeRawTabletInsertionEvent event); + } + + public void consumeTabletInsertionEventsWithRetry( + final TabletInsertionEventConsumer consumer, final String callerName) throws PipeException { + final Iterable iterable = toTabletInsertionEvents(); + final Iterator iterator = iterable.iterator(); + int tabletEventCount = 0; + while (iterator.hasNext()) { + final TabletInsertionEvent parsedEvent = iterator.next(); + tabletEventCount++; + int retryCount = 0; + while (true) { + // If failed due do insufficient memory, retry until success to avoid race among multiple + // processor threads + try { + consumer.consume((PipeRawTabletInsertionEvent) parsedEvent); + break; + } catch (final PipeRuntimeOutOfMemoryCriticalException e) { + if (retryCount++ % 100 == 0) { + LOGGER.warn( + "{}: failed to allocate memory for parsing TsFile {}, tablet event no. {}, retry count is {}, will keep retrying.", + callerName, + getTsFile(), + tabletEventCount, + retryCount, + e); + } else if (LOGGER.isDebugEnabled()) { + LOGGER.debug( + "{}: failed to allocate memory for parsing TsFile {}, tablet event no. {}, retry count is {}, will keep retrying.", + callerName, + getTsFile(), + tabletEventCount, + retryCount, + e); + } + } + } + } + } + @Override - public Iterable toTabletInsertionEvents() { + public Iterable toTabletInsertionEvents() throws PipeException { + // 20 - 40 seconds for waiting + // Can not be unlimited or will cause deadlock + return toTabletInsertionEvents((long) ((1 + Math.random()) * 20 * 1000)); + } + + public Iterable toTabletInsertionEvents(final long timeoutMs) + throws PipeException { try { if (!waitForTsFileClose()) { LOGGER.warn( "Pipe skipping temporary TsFile's parsing which shouldn't be transferred: {}", tsFile); return Collections.emptyList(); } + waitForResourceEnough4Parsing(timeoutMs); return initDataContainer().toTabletInsertionEvents(); - } catch (final InterruptedException e) { - Thread.currentThread().interrupt(); + } catch (final Exception e) { close(); + // close() should be called before re-interrupting the thread + if (e instanceof InterruptedException) { + Thread.currentThread().interrupt(); + } + final String errorMsg = - String.format( - "Interrupted when waiting for closing TsFile %s.", resource.getTsFilePath()); + e instanceof InterruptedException + ? String.format( + "Interrupted when waiting for closing TsFile %s.", resource.getTsFilePath()) + : String.format( + "Parse TsFile %s error. Because: %s", resource.getTsFilePath(), e.getMessage()); LOGGER.warn(errorMsg, e); throw new PipeException(errorMsg); } } + private void waitForResourceEnough4Parsing(final long timeoutMs) throws InterruptedException { + final PipeMemoryManager memoryManager = PipeDataNodeResourceManager.memory(); + if (memoryManager.isEnough4TabletParsing()) { + return; + } + + final long startTime = System.currentTimeMillis(); + long lastRecordTime = startTime; + + final long memoryCheckIntervalMs = + PipeConfig.getInstance().getPipeCheckMemoryEnoughIntervalMs(); + while (!memoryManager.isEnough4TabletParsing()) { + Thread.sleep(memoryCheckIntervalMs); + + final long currentTime = System.currentTimeMillis(); + final double elapsedRecordTimeSeconds = (currentTime - lastRecordTime) / 1000.0; + final double waitTimeSeconds = (currentTime - startTime) / 1000.0; + if (elapsedRecordTimeSeconds > 10.0) { + LOGGER.info( + "Wait for resource enough for parsing {} for {} seconds.", + resource != null ? resource.getTsFilePath() : "tsfile", + waitTimeSeconds); + lastRecordTime = currentTime; + } else if (LOGGER.isDebugEnabled()) { + LOGGER.debug( + "Wait for resource enough for parsing {} for {} seconds.", + resource != null ? resource.getTsFilePath() : "tsfile", + waitTimeSeconds); + } + + if (waitTimeSeconds * 1000 > timeoutMs) { + // should contain 'TimeoutException' in exception message + throw new PipeException( + String.format("TimeoutException: Waited %s seconds", waitTimeSeconds)); + } + } + + final long currentTime = System.currentTimeMillis(); + final double waitTimeSeconds = (currentTime - startTime) / 1000.0; + LOGGER.info( + "Wait for resource enough for parsing {} for {} seconds.", + resource != null ? resource.getTsFilePath() : "tsfile", + waitTimeSeconds); + } + /** The method is used to prevent circular replication in PipeConsensus */ public boolean isGeneratedByPipeConsensus() { return isGeneratedByPipeConsensus; @@ -384,35 +536,42 @@ public boolean isGeneratedByHistoricalExtractor() { private TsFileInsertionDataContainer initDataContainer() { try { - if (dataContainer == null) { - dataContainer = - new TsFileInsertionDataContainerProvider( - tsFile, pipePattern, startTime, endTime, pipeTaskMeta, this) - .provide(); - } - return dataContainer; + dataContainer.compareAndSet( + null, + new TsFileInsertionDataContainerProvider( + pipeName, + creationTime, + tsFile, + pipePattern, + startTime, + endTime, + pipeTaskMeta, + this) + .provide()); + return dataContainer.get(); } catch (final IOException e) { close(); - final String errorMsg = String.format("Read TsFile %s error.", resource.getTsFilePath()); + final String errorMsg = String.format("Read TsFile %s error.", tsFile.getPath()); LOGGER.warn(errorMsg, e); throw new PipeException(errorMsg); } } public long count(final boolean skipReportOnCommit) throws IOException { - long count = 0; + AtomicLong count = new AtomicLong(); if (shouldParseTime()) { try { - for (final TabletInsertionEvent event : toTabletInsertionEvents()) { - final PipeRawTabletInsertionEvent rawEvent = ((PipeRawTabletInsertionEvent) event); - count += rawEvent.count(); - if (skipReportOnCommit) { - rawEvent.skipReportOnCommit(); - } - } - return count; + consumeTabletInsertionEventsWithRetry( + event -> { + count.addAndGet(event.count()); + if (skipReportOnCommit) { + event.skipReportOnCommit(); + } + }, + "PipeTsFileInsertionEvent::count"); + return count.get(); } finally { close(); } @@ -427,10 +586,13 @@ public long count(final boolean skipReportOnCommit) throws IOException { /** Release the resource of {@link TsFileInsertionDataContainer}. */ @Override public void close() { - if (dataContainer != null) { - dataContainer.close(); - dataContainer = null; - } + dataContainer.getAndUpdate( + container -> { + if (Objects.nonNull(container)) { + container.close(); + } + return null; + }); } /////////////////////////// Object /////////////////////////// @@ -438,8 +600,8 @@ public void close() { @Override public String toString() { return String.format( - "PipeTsFileInsertionEvent{resource=%s, tsFile=%s, isLoaded=%s, isGeneratedByPipe=%s, isClosed=%s, dataContainer=%s}", - resource, tsFile, isLoaded, isGeneratedByPipe, isClosed.get(), dataContainer) + "PipeTsFileInsertionEvent{resource=%s, tsFile=%s, isLoaded=%s, isGeneratedByPipe=%s, dataContainer=%s}", + resource, tsFile, isLoaded, isGeneratedByPipe, dataContainer) + " - " + super.toString(); } @@ -447,9 +609,75 @@ public String toString() { @Override public String coreReportMessage() { return String.format( - "PipeTsFileInsertionEvent{resource=%s, tsFile=%s, isLoaded=%s, isGeneratedByPipe=%s, isClosed=%s}", - resource, tsFile, isLoaded, isGeneratedByPipe, isClosed.get()) + "PipeTsFileInsertionEvent{resource=%s, tsFile=%s, isLoaded=%s, isGeneratedByPipe=%s}", + resource, tsFile, isLoaded, isGeneratedByPipe) + " - " + super.coreReportMessage(); } + + /////////////////////////// ReferenceTrackableEvent /////////////////////////// + + @Override + public void trackResource() { + PipeDataNodeResourceManager.ref().trackPipeEventResource(this, eventResourceBuilder()); + } + + @Override + public PipeEventResource eventResourceBuilder() { + return new PipeTsFileInsertionEventResource( + this.isReleased, + this.referenceCount, + this.pipeName, + this.tsFile, + this.isWithMod, + this.modFile, + this.dataContainer); + } + + private static class PipeTsFileInsertionEventResource extends PipeEventResource { + + private final File tsFile; + private final boolean isWithMod; + private final File modFile; + private final AtomicReference dataContainer; + private final String pipeName; + + private PipeTsFileInsertionEventResource( + final AtomicBoolean isReleased, + final AtomicInteger referenceCount, + final String pipeName, + final File tsFile, + final boolean isWithMod, + final File modFile, + final AtomicReference dataContainer) { + super(isReleased, referenceCount); + this.pipeName = pipeName; + this.tsFile = tsFile; + this.isWithMod = isWithMod; + this.modFile = modFile; + this.dataContainer = dataContainer; + } + + @Override + protected void finalizeResource() { + try { + // decrease reference count + PipeDataNodeResourceManager.tsfile().decreaseFileReference(tsFile, pipeName); + if (isWithMod) { + PipeDataNodeResourceManager.tsfile().decreaseFileReference(modFile, pipeName); + } + + // close data container + dataContainer.getAndUpdate( + container -> { + if (Objects.nonNull(container)) { + container.close(); + } + return null; + }); + } catch (final Exception e) { + LOGGER.warn("Decrease reference count for TsFile {} error.", tsFile.getPath(), e); + } + } + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/TsFileInsertionPointCounter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/TsFileInsertionPointCounter.java index 219a8933a73c6..991d05e8660f9 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/TsFileInsertionPointCounter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/TsFileInsertionPointCounter.java @@ -19,7 +19,7 @@ package org.apache.iotdb.db.pipe.event.common.tsfile; -import org.apache.iotdb.commons.pipe.pattern.PipePattern; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; import org.apache.tsfile.file.metadata.IDeviceID; import org.apache.tsfile.file.metadata.PlainDeviceID; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/TsFileInsertionDataContainer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/TsFileInsertionDataContainer.java index 2e8a7ec6efa99..77ef2c871cb72 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/TsFileInsertionDataContainer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/TsFileInsertionDataContainer.java @@ -19,9 +19,13 @@ package org.apache.iotdb.db.pipe.event.common.tsfile.container; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.config.PipeConfig; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.commons.pipe.pattern.PipePattern; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; +import org.apache.iotdb.db.pipe.metric.overview.PipeTsFileToTabletsMetrics; +import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; +import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlock; import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; import org.apache.tsfile.read.TsFileSequenceReader; @@ -36,20 +40,35 @@ public abstract class TsFileInsertionDataContainer implements AutoCloseable { private static final Logger LOGGER = LoggerFactory.getLogger(TsFileInsertionDataContainer.class); + protected final String pipeName; + protected final long creationTime; + protected final PipePattern pattern; // used to filter data protected final GlobalTimeExpression timeFilterExpression; // used to filter data protected final PipeTaskMeta pipeTaskMeta; // used to report progress protected final EnrichedEvent sourceEvent; // used to report progress + protected final long initialTimeNano = System.nanoTime(); + protected boolean timeUsageReported = false; + + protected final PipeMemoryBlock allocatedMemoryBlockForTablet; + protected TsFileSequenceReader tsFileSequenceReader; + protected Iterable tabletInsertionIterable; + protected TsFileInsertionDataContainer( + final String pipeName, + final long creationTime, final PipePattern pattern, final long startTime, final long endTime, final PipeTaskMeta pipeTaskMeta, final EnrichedEvent sourceEvent) { + this.pipeName = pipeName; + this.creationTime = creationTime; + this.pattern = pattern; timeFilterExpression = (startTime == Long.MIN_VALUE && endTime == Long.MAX_VALUE) @@ -58,6 +77,12 @@ protected TsFileInsertionDataContainer( this.pipeTaskMeta = pipeTaskMeta; this.sourceEvent = sourceEvent; + + // Allocate empty memory block, will be resized later. + this.allocatedMemoryBlockForTablet = + PipeDataNodeResourceManager.memory() + .forceAllocateForTabletWithRetry( + PipeConfig.getInstance().getPipeDataStructureTabletSizeInBytes()); } /** @@ -67,6 +92,20 @@ protected TsFileInsertionDataContainer( @Override public void close() { + + tabletInsertionIterable = null; + + try { + if (pipeName != null && !timeUsageReported) { + PipeTsFileToTabletsMetrics.getInstance() + .recordTsFileToTabletTime( + pipeName + "_" + creationTime, System.nanoTime() - initialTimeNano); + timeUsageReported = true; + } + } catch (final Exception e) { + LOGGER.warn("Failed to report time usage for parsing tsfile for pipe {}", pipeName, e); + } + try { if (tsFileSequenceReader != null) { tsFileSequenceReader.close(); @@ -74,5 +113,9 @@ public void close() { } catch (final IOException e) { LOGGER.warn("Failed to close TsFileSequenceReader", e); } + + if (allocatedMemoryBlockForTablet != null) { + allocatedMemoryBlockForTablet.close(); + } } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/TsFileInsertionDataContainerProvider.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/TsFileInsertionDataContainerProvider.java index 9ce61d70eab60..1050950d17680 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/TsFileInsertionDataContainerProvider.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/TsFileInsertionDataContainerProvider.java @@ -19,14 +19,17 @@ package org.apache.iotdb.db.pipe.event.common.tsfile.container; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.pipe.pattern.IoTDBPipePattern; -import org.apache.iotdb.commons.pipe.pattern.PipePattern; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.datastructure.pattern.IoTDBPipePattern; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; import org.apache.iotdb.db.pipe.event.common.tsfile.container.query.TsFileInsertionQueryDataContainer; import org.apache.iotdb.db.pipe.event.common.tsfile.container.scan.TsFileInsertionScanDataContainer; +import org.apache.iotdb.db.pipe.metric.overview.PipeTsFileToTabletsMetrics; import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; +import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryManager; +import org.apache.iotdb.db.pipe.resource.tsfile.PipeTsFilePublicResource; import org.apache.tsfile.file.metadata.IDeviceID; import org.apache.tsfile.file.metadata.PlainDeviceID; @@ -39,6 +42,9 @@ public class TsFileInsertionDataContainerProvider { + private final String pipeName; + private final long creationTime; + private final File tsFile; private final PipePattern pattern; private final long startTime; @@ -48,12 +54,16 @@ public class TsFileInsertionDataContainerProvider { protected final PipeTsFileInsertionEvent sourceEvent; public TsFileInsertionDataContainerProvider( + final String pipeName, + final long creationTime, final File tsFile, final PipePattern pipePattern, final long startTime, final long endTime, final PipeTaskMeta pipeTaskMeta, final PipeTsFileInsertionEvent sourceEvent) { + this.pipeName = pipeName; + this.creationTime = creationTime; this.tsFile = tsFile; this.pattern = pipePattern; this.startTime = startTime; @@ -63,20 +73,29 @@ public TsFileInsertionDataContainerProvider( } public TsFileInsertionDataContainer provide() throws IOException { - if (startTime != Long.MIN_VALUE - || endTime != Long.MAX_VALUE - || pattern instanceof IoTDBPipePattern - && !((IoTDBPipePattern) pattern).mayMatchMultipleTimeSeriesInOneDevice()) { - // 1. If time filter exists, use query here because the scan container may filter it - // row by row in single page chunk. - // 2. If the pattern matches only one time series in one device, use query container here + if (pipeName != null) { + PipeTsFileToTabletsMetrics.getInstance() + .markTsFileToTabletInvocation(pipeName + "_" + creationTime); + } + + // Use scan container to save memory + if ((double) PipeDataNodeResourceManager.memory().getUsedMemorySizeInBytes() + / PipeMemoryManager.getTotalNonFloatingMemorySizeInBytes() + > PipeTsFilePublicResource.MEMORY_SUFFICIENT_THRESHOLD) { + return new TsFileInsertionScanDataContainer( + pipeName, creationTime, tsFile, pattern, startTime, endTime, pipeTaskMeta, sourceEvent); + } + + if (pattern instanceof IoTDBPipePattern + && !((IoTDBPipePattern) pattern).mayMatchMultipleTimeSeriesInOneDevice()) { + // If the pattern matches only one time series in one device, use query container here // because there is no timestamps merge overhead. // // Note: We judge prefix pattern as matching multiple timeseries in one device because it's // hard to know whether it only matches one timeseries, while matching multiple is often the // case. return new TsFileInsertionQueryDataContainer( - tsFile, pattern, startTime, endTime, pipeTaskMeta, sourceEvent); + pipeName, creationTime, tsFile, pattern, startTime, endTime, pipeTaskMeta, sourceEvent); } final Map deviceIsAlignedMap = @@ -85,7 +104,7 @@ public TsFileInsertionDataContainer provide() throws IOException { // If we failed to get from cache, it indicates that the memory usage is high. // We use scan data container because it requires less memory. return new TsFileInsertionScanDataContainer( - tsFile, pattern, startTime, endTime, pipeTaskMeta, sourceEvent); + pipeName, creationTime, tsFile, pattern, startTime, endTime, pipeTaskMeta, sourceEvent); } final int originalSize = deviceIsAlignedMap.size(); @@ -95,8 +114,10 @@ public TsFileInsertionDataContainer provide() throws IOException { return (double) filteredDeviceIsAlignedMap.size() / originalSize > PipeConfig.getInstance().getPipeTsFileScanParsingThreshold() ? new TsFileInsertionScanDataContainer( - tsFile, pattern, startTime, endTime, pipeTaskMeta, sourceEvent) + pipeName, creationTime, tsFile, pattern, startTime, endTime, pipeTaskMeta, sourceEvent) : new TsFileInsertionQueryDataContainer( + pipeName, + creationTime, tsFile, pattern, startTime, diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/query/TsFileInsertionQueryDataContainer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/query/TsFileInsertionQueryDataContainer.java index 2a0c7c410f733..897d820df90f2 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/query/TsFileInsertionQueryDataContainer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/query/TsFileInsertionQueryDataContainer.java @@ -19,10 +19,10 @@ package org.apache.iotdb.db.pipe.event.common.tsfile.container.query; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; import org.apache.iotdb.commons.pipe.config.PipeConfig; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.commons.pipe.pattern.PipePattern; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.tsfile.container.TsFileInsertionDataContainer; @@ -72,10 +72,12 @@ public class TsFileInsertionQueryDataContainer extends TsFileInsertionDataContai public TsFileInsertionQueryDataContainer( final File tsFile, final PipePattern pattern, final long startTime, final long endTime) throws IOException { - this(tsFile, pattern, startTime, endTime, null, null); + this(null, 0, tsFile, pattern, startTime, endTime, null, null); } public TsFileInsertionQueryDataContainer( + final String pipeName, + final long creationTime, final File tsFile, final PipePattern pattern, final long startTime, @@ -83,10 +85,21 @@ public TsFileInsertionQueryDataContainer( final PipeTaskMeta pipeTaskMeta, final EnrichedEvent sourceEvent) throws IOException { - this(tsFile, pattern, startTime, endTime, pipeTaskMeta, sourceEvent, null); + this( + pipeName, + creationTime, + tsFile, + pattern, + startTime, + endTime, + pipeTaskMeta, + sourceEvent, + null); } public TsFileInsertionQueryDataContainer( + final String pipeName, + final long creationTime, final File tsFile, final PipePattern pattern, final long startTime, @@ -95,7 +108,7 @@ public TsFileInsertionQueryDataContainer( final EnrichedEvent sourceEvent, final Map deviceIsAlignedMap) throws IOException { - super(pattern, startTime, endTime, pipeTaskMeta, sourceEvent); + super(pipeName, creationTime, pattern, startTime, endTime, pipeTaskMeta, sourceEvent); try { final PipeTsFileResourceManager tsFileResourceManager = PipeDataNodeResourceManager.tsfile(); @@ -263,75 +276,83 @@ private Map> readFilteredDeviceMeasurementsMap( @Override public Iterable toTabletInsertionEvents() { - return () -> - new Iterator() { - - private TsFileInsertionQueryDataTabletIterator tabletIterator = null; - - @Override - public boolean hasNext() { - while (tabletIterator == null || !tabletIterator.hasNext()) { - if (!deviceMeasurementsMapIterator.hasNext()) { - close(); - return false; - } - - final Map.Entry> entry = deviceMeasurementsMapIterator.next(); - - try { - tabletIterator = - new TsFileInsertionQueryDataTabletIterator( - tsFileReader, - measurementDataTypeMap, - ((PlainDeviceID) entry.getKey()).toStringID(), - entry.getValue(), - timeFilterExpression); - } catch (final Exception e) { - close(); - throw new PipeException("failed to create TsFileInsertionDataTabletIterator", e); - } - } - - return true; - } + if (tabletInsertionIterable == null) { + tabletInsertionIterable = + () -> + new Iterator() { + + private TsFileInsertionQueryDataTabletIterator tabletIterator = null; + + @Override + public boolean hasNext() { + while (tabletIterator == null || !tabletIterator.hasNext()) { + if (!deviceMeasurementsMapIterator.hasNext()) { + close(); + return false; + } + + final Map.Entry> entry = + deviceMeasurementsMapIterator.next(); + + try { + tabletIterator = + new TsFileInsertionQueryDataTabletIterator( + tsFileReader, + measurementDataTypeMap, + ((PlainDeviceID) entry.getKey()).toStringID(), + entry.getValue(), + timeFilterExpression, + allocatedMemoryBlockForTablet); + } catch (final Exception e) { + close(); + throw new PipeException( + "failed to create TsFileInsertionDataTabletIterator", e); + } + } + + return true; + } + + @Override + public TabletInsertionEvent next() { + if (!hasNext()) { + close(); + throw new NoSuchElementException(); + } + + final Tablet tablet = tabletIterator.next(); + final boolean isAligned = + deviceIsAlignedMap.getOrDefault(new PlainDeviceID(tablet.deviceId), false); + + final TabletInsertionEvent next; + if (!hasNext()) { + next = + new PipeRawTabletInsertionEvent( + tablet, + isAligned, + sourceEvent != null ? sourceEvent.getPipeName() : null, + sourceEvent != null ? sourceEvent.getCreationTime() : 0, + pipeTaskMeta, + sourceEvent, + true); + close(); + } else { + next = + new PipeRawTabletInsertionEvent( + tablet, + isAligned, + sourceEvent != null ? sourceEvent.getPipeName() : null, + sourceEvent != null ? sourceEvent.getCreationTime() : 0, + pipeTaskMeta, + sourceEvent, + false); + } + return next; + } + }; + } - @Override - public TabletInsertionEvent next() { - if (!hasNext()) { - close(); - throw new NoSuchElementException(); - } - - final Tablet tablet = tabletIterator.next(); - final boolean isAligned = - deviceIsAlignedMap.getOrDefault(new PlainDeviceID(tablet.deviceId), false); - - final TabletInsertionEvent next; - if (!hasNext()) { - next = - new PipeRawTabletInsertionEvent( - tablet, - isAligned, - sourceEvent != null ? sourceEvent.getPipeName() : null, - sourceEvent != null ? sourceEvent.getCreationTime() : 0, - pipeTaskMeta, - sourceEvent, - true); - close(); - } else { - next = - new PipeRawTabletInsertionEvent( - tablet, - isAligned, - sourceEvent != null ? sourceEvent.getPipeName() : null, - sourceEvent != null ? sourceEvent.getCreationTime() : 0, - pipeTaskMeta, - sourceEvent, - false); - } - return next; - } - }; + return tabletInsertionIterable; } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/query/TsFileInsertionQueryDataTabletIterator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/query/TsFileInsertionQueryDataTabletIterator.java index efe58f5ff60e7..5fa252412d4fc 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/query/TsFileInsertionQueryDataTabletIterator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/query/TsFileInsertionQueryDataTabletIterator.java @@ -19,7 +19,9 @@ package org.apache.iotdb.db.pipe.event.common.tsfile.container.query; -import org.apache.iotdb.commons.pipe.config.PipeConfig; +import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; +import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlock; +import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryWeightUtil; import org.apache.iotdb.pipe.api.exception.PipeException; import org.apache.tsfile.common.constant.TsFileConstant; @@ -31,6 +33,7 @@ import org.apache.tsfile.read.expression.IExpression; import org.apache.tsfile.read.expression.QueryExpression; import org.apache.tsfile.read.query.dataset.QueryDataSet; +import org.apache.tsfile.utils.Pair; import org.apache.tsfile.write.record.Tablet; import org.apache.tsfile.write.schema.MeasurementSchema; @@ -40,6 +43,7 @@ import java.util.List; import java.util.Map; import java.util.NoSuchElementException; +import java.util.Objects; import java.util.stream.Collectors; public class TsFileInsertionQueryDataTabletIterator implements Iterator { @@ -54,12 +58,17 @@ public class TsFileInsertionQueryDataTabletIterator implements Iterator private final QueryDataSet queryDataSet; + private final PipeMemoryBlock allocatedBlockForTablet; + + private RowRecord rowRecord; + TsFileInsertionQueryDataTabletIterator( final TsFileReader tsFileReader, final Map measurementDataTypeMap, final String deviceId, final List measurements, - final IExpression timeFilterExpression) + final IExpression timeFilterExpression, + final PipeMemoryBlock allocatedBlockForTablet) throws IOException { this.tsFileReader = tsFileReader; this.measurementDataTypeMap = measurementDataTypeMap; @@ -77,6 +86,8 @@ public class TsFileInsertionQueryDataTabletIterator implements Iterator this.timeFilterExpression = timeFilterExpression; this.queryDataSet = buildQueryDataSet(); + + this.allocatedBlockForTablet = Objects.requireNonNull(allocatedBlockForTablet); } private QueryDataSet buildQueryDataSet() throws IOException { @@ -116,12 +127,31 @@ private Tablet buildNextTablet() throws IOException { measurementDataTypeMap.get(deviceId + TsFileConstant.PATH_SEPARATOR + measurement); schemas.add(new MeasurementSchema(measurement, dataType)); } - final Tablet tablet = - new Tablet(deviceId, schemas, PipeConfig.getInstance().getPipeDataStructureTabletRowSize()); - tablet.initBitMaps(); + Tablet tablet = null; + if (!queryDataSet.hasNext()) { + tablet = new Tablet(deviceId, schemas, 1); + tablet.initBitMaps(); + return tablet; + } + + boolean isFirstRow = true; while (queryDataSet.hasNext()) { - final RowRecord rowRecord = queryDataSet.next(); + final RowRecord rowRecord = this.rowRecord != null ? this.rowRecord : queryDataSet.next(); + if (isFirstRow) { + // Calculate row count and memory size of the tablet based on the first row + this.rowRecord = rowRecord; // Save the first row for later use + Pair rowCountAndMemorySize = + PipeMemoryWeightUtil.calculateTabletRowCountAndMemory(rowRecord); + tablet = new Tablet(deviceId, schemas, rowCountAndMemorySize.getLeft()); + tablet.initBitMaps(); + if (allocatedBlockForTablet.getMemoryUsageInBytes() < rowCountAndMemorySize.getRight()) { + PipeDataNodeResourceManager.memory() + .forceResize(allocatedBlockForTablet, rowCountAndMemorySize.getRight()); + } + this.rowRecord = null; // Clear the saved first row + isFirstRow = false; + } final int rowIndex = tablet.rowSize; @@ -134,7 +164,7 @@ private Tablet buildNextTablet() throws IOException { tablet.addValue( measurements.get(i), rowIndex, - field == null ? null : field.getObjectValue(field.getDataType())); + field == null ? null : field.getObjectValue(schemas.get(i).getType())); } tablet.rowSize++; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/scan/TsFileInsertionScanDataContainer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/scan/TsFileInsertionScanDataContainer.java index a841986064e35..30006b358080a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/scan/TsFileInsertionScanDataContainer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/scan/TsFileInsertionScanDataContainer.java @@ -19,12 +19,15 @@ package org.apache.iotdb.db.pipe.event.common.tsfile.container.scan; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; import org.apache.iotdb.commons.pipe.config.PipeConfig; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.commons.pipe.pattern.PipePattern; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.tsfile.container.TsFileInsertionDataContainer; +import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; +import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlock; +import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryWeightUtil; import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; import org.apache.iotdb.pipe.api.exception.PipeException; @@ -43,6 +46,7 @@ import org.apache.tsfile.read.reader.chunk.ChunkReader; import org.apache.tsfile.utils.Binary; import org.apache.tsfile.utils.DateUtils; +import org.apache.tsfile.utils.Pair; import org.apache.tsfile.utils.TsPrimitiveType; import org.apache.tsfile.write.UnSupportedDataTypeException; import org.apache.tsfile.write.record.Tablet; @@ -52,28 +56,46 @@ import java.io.IOException; import java.time.LocalDate; import java.util.ArrayList; +import java.util.HashMap; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.NoSuchElementException; import java.util.Objects; public class TsFileInsertionScanDataContainer extends TsFileInsertionDataContainer { + private static final LocalDate EMPTY_DATE = LocalDate.of(1000, 1, 1); + + private final int pipeMaxAlignedSeriesNumInOneBatch = + PipeConfig.getInstance().getPipeMaxAlignedSeriesNumInOneBatch(); + private final long startTime; private final long endTime; private final Filter filter; private IChunkReader chunkReader; private BatchData data; + private final PipeMemoryBlock allocatedMemoryBlockForBatchData; - private boolean isMultiPage; + private boolean currentIsMultiPage; private String currentDevice; private boolean currentIsAligned; private final List currentMeasurements = new ArrayList<>(); + // Cached time chunk + private final List timeChunkList = new ArrayList<>(); + private final List isMultiPageList = new ArrayList<>(); + + private final Map measurementIndexMap = new HashMap<>(); + private int lastIndex = -1; + private ChunkHeader firstChunkHeader4NextSequentialValueChunks; + private byte lastMarker = Byte.MIN_VALUE; public TsFileInsertionScanDataContainer( + final String pipeName, + final long creationTime, final File tsFile, final PipePattern pattern, final long startTime, @@ -81,12 +103,18 @@ public TsFileInsertionScanDataContainer( final PipeTaskMeta pipeTaskMeta, final EnrichedEvent sourceEvent) throws IOException { - super(pattern, startTime, endTime, pipeTaskMeta, sourceEvent); + super(pipeName, creationTime, pattern, startTime, endTime, pipeTaskMeta, sourceEvent); this.startTime = startTime; this.endTime = endTime; filter = Objects.nonNull(timeFilterExpression) ? timeFilterExpression.getFilter() : null; + // Allocate empty memory block, will be resized later. + this.allocatedMemoryBlockForBatchData = + PipeDataNodeResourceManager.memory() + .forceAllocateForTabletWithRetry( + PipeConfig.getInstance().getPipeDataStructureTabletSizeInBytes()); + try { tsFileSequenceReader = new TsFileSequenceReader(tsFile.getAbsolutePath(), false, false); tsFileSequenceReader.position((long) TSFileConfig.MAGIC_STRING.getBytes().length + 1); @@ -98,34 +126,89 @@ public TsFileInsertionScanDataContainer( } } + public TsFileInsertionScanDataContainer( + final File tsFile, + final PipePattern pattern, + final long startTime, + final long endTime, + final PipeTaskMeta pipeTaskMeta, + final EnrichedEvent sourceEvent) + throws IOException { + this(null, 0, tsFile, pattern, startTime, endTime, pipeTaskMeta, sourceEvent); + } + @Override public Iterable toTabletInsertionEvents() { - return () -> - new Iterator() { + if (tabletInsertionIterable == null) { + tabletInsertionIterable = + () -> + new Iterator() { + + @Override + public boolean hasNext() { + return Objects.nonNull(chunkReader); + } + + @Override + public TabletInsertionEvent next() { + if (!hasNext()) { + close(); + throw new NoSuchElementException(); + } + + // currentIsAligned is initialized when TsFileInsertionEventScanParser is + // constructed. + // When the getNextTablet function is called, currentIsAligned may be updated, + // causing + // the currentIsAligned information to be inconsistent with the current Tablet + // information. + final boolean isAligned = currentIsAligned; + final Tablet tablet = getNextTablet(); + final boolean hasNext = hasNext(); + try { + return new PipeRawTabletInsertionEvent( + tablet, + isAligned, + sourceEvent != null ? sourceEvent.getPipeName() : null, + sourceEvent != null ? sourceEvent.getCreationTime() : 0, + pipeTaskMeta, + sourceEvent, + !hasNext); + } finally { + if (!hasNext) { + close(); + } + } + } + }; + } + return tabletInsertionIterable; + } + public Iterable> toTabletWithIsAligneds() { + return () -> + new Iterator>() { @Override public boolean hasNext() { return Objects.nonNull(chunkReader); } @Override - public TabletInsertionEvent next() { + public Pair next() { if (!hasNext()) { close(); throw new NoSuchElementException(); } + // currentIsAligned is initialized when TsFileInsertionEventScanParser is constructed. + // When the getNextTablet function is called, currentIsAligned may be updated, causing + // the currentIsAligned information to be inconsistent with the current Tablet + // information. + final boolean isAligned = currentIsAligned; final Tablet tablet = getNextTablet(); final boolean hasNext = hasNext(); try { - return new PipeRawTabletInsertionEvent( - tablet, - currentIsAligned, - sourceEvent != null ? sourceEvent.getPipeName() : null, - sourceEvent != null ? sourceEvent.getCreationTime() : 0, - pipeTaskMeta, - sourceEvent, - !hasNext); + return new Pair<>(tablet, isAligned); } finally { if (!hasNext) { close(); @@ -137,19 +220,39 @@ public TabletInsertionEvent next() { private Tablet getNextTablet() { try { - final Tablet tablet = - new Tablet( - currentDevice, - currentMeasurements, - PipeConfig.getInstance().getPipeDataStructureTabletRowSize()); - tablet.initBitMaps(); + Tablet tablet = null; + + if (!data.hasCurrent()) { + tablet = new Tablet(currentDevice, currentMeasurements, 1); + tablet.initBitMaps(); + // Ignore the memory cost of tablet + PipeDataNodeResourceManager.memory().forceResize(allocatedMemoryBlockForTablet, 0); + return tablet; + } + boolean isFirstRow = true; while (data.hasCurrent()) { - if (isMultiPage || data.currentTime() >= startTime && data.currentTime() <= endTime) { + if (currentIsMultiPage + || data.currentTime() >= startTime && data.currentTime() <= endTime) { + if (isFirstRow) { + // Calculate row count and memory size of the tablet based on the first row + Pair rowCountAndMemorySize = + PipeMemoryWeightUtil.calculateTabletRowCountAndMemory(data); + tablet = + new Tablet(currentDevice, currentMeasurements, rowCountAndMemorySize.getLeft()); + tablet.initBitMaps(); + if (allocatedMemoryBlockForTablet.getMemoryUsageInBytes() + < rowCountAndMemorySize.getRight()) { + PipeDataNodeResourceManager.memory() + .forceResize(allocatedMemoryBlockForTablet, rowCountAndMemorySize.getRight()); + } + isFirstRow = false; + } + final int rowIndex = tablet.rowSize; tablet.addTimestamp(rowIndex, data.currentTime()); - putValueToColumns(data, tablet.values, rowIndex); + putValueToColumns(data, tablet, rowIndex); tablet.rowSize++; } @@ -159,16 +262,20 @@ private Tablet getNextTablet() { data = chunkReader.nextPageData(); } - if (tablet.rowSize == tablet.getMaxRowNumber()) { + if (tablet != null && tablet.rowSize == tablet.getMaxRowNumber()) { break; } } + if (tablet == null) { + tablet = new Tablet(currentDevice, currentMeasurements, 1); + tablet.initBitMaps(); + } + // Switch chunk reader iff current chunk is all consumed if (!data.hasCurrent()) { prepareData(); } - return tablet; } catch (final Exception e) { close(); @@ -189,16 +296,32 @@ private void prepareData() throws IOException { do { data = chunkReader.nextPageData(); + long size = PipeMemoryWeightUtil.calculateBatchDataRamBytesUsed(data); + if (allocatedMemoryBlockForBatchData.getMemoryUsageInBytes() < size) { + PipeDataNodeResourceManager.memory().forceResize(allocatedMemoryBlockForBatchData, size); + } } while (!data.hasCurrent() && chunkReader.hasNextSatisfiedPage()); } while (!data.hasCurrent()); } - private void putValueToColumns(final BatchData data, final Object[] columns, final int rowIndex) { - final TSDataType type = data.getDataType(); - if (type == TSDataType.VECTOR) { + private void putValueToColumns(final BatchData data, final Tablet tablet, final int rowIndex) { + final Object[] columns = tablet.values; + + if (data.getDataType() == TSDataType.VECTOR) { for (int i = 0; i < columns.length; ++i) { final TsPrimitiveType primitiveType = data.getVector()[i]; - switch (primitiveType.getDataType()) { + if (Objects.isNull(primitiveType)) { + tablet.bitMaps[i].mark(rowIndex); + final TSDataType type = tablet.getSchemas().get(i).getType(); + if (type == TSDataType.TEXT || type == TSDataType.BLOB || type == TSDataType.STRING) { + ((Binary[]) columns[i])[rowIndex] = Binary.EMPTY_VALUE; + } + if (type == TSDataType.DATE) { + ((LocalDate[]) columns[i])[rowIndex] = EMPTY_DATE; + } + continue; + } + switch (tablet.getSchemas().get(i).getType()) { case BOOLEAN: ((boolean[]) columns[i])[rowIndex] = primitiveType.getBoolean(); break; @@ -229,7 +352,7 @@ private void putValueToColumns(final BatchData data, final Object[] columns, fin } } } else { - switch (type) { + switch (tablet.getSchemas().get(0).getType()) { case BOOLEAN: ((boolean[]) columns[0])[rowIndex] = data.getBoolean(); break; @@ -262,7 +385,6 @@ private void putValueToColumns(final BatchData data, final Object[] columns, fin private void moveToNextChunkReader() throws IOException, IllegalStateException { ChunkHeader chunkHeader; - Chunk timeChunk = null; final List valueChunkList = new ArrayList<>(); currentMeasurements.clear(); @@ -280,17 +402,9 @@ private void moveToNextChunkReader() throws IOException, IllegalStateException { case MetaMarker.TIME_CHUNK_HEADER: case MetaMarker.ONLY_ONE_PAGE_CHUNK_HEADER: case MetaMarker.ONLY_ONE_PAGE_TIME_CHUNK_HEADER: - if (Objects.nonNull(timeChunk) && !currentMeasurements.isEmpty()) { - chunkReader = - isMultiPage - ? new AlignedChunkReader(timeChunk, valueChunkList, filter) - : new AlignedSinglePageWholeChunkReader(timeChunk, valueChunkList); - currentIsAligned = true; - lastMarker = marker; - return; - } - - isMultiPage = marker == MetaMarker.CHUNK_HEADER || marker == MetaMarker.TIME_CHUNK_HEADER; + // Notice that the data in one chunk group is either aligned or non-aligned + // There is no need to consider non-aligned chunks when there are value chunks + currentIsMultiPage = marker == MetaMarker.CHUNK_HEADER; chunkHeader = tsFileSequenceReader.readChunkHeader(marker); @@ -302,9 +416,10 @@ private void moveToNextChunkReader() throws IOException, IllegalStateException { if ((chunkHeader.getChunkType() & TsFileConstant.TIME_COLUMN_MASK) == TsFileConstant.TIME_COLUMN_MASK) { - timeChunk = + timeChunkList.add( new Chunk( - chunkHeader, tsFileSequenceReader.readChunk(-1, chunkHeader.getDataSize())); + chunkHeader, tsFileSequenceReader.readChunk(-1, chunkHeader.getDataSize()))); + isMultiPageList.add(marker == MetaMarker.TIME_CHUNK_HEADER); break; } @@ -315,7 +430,7 @@ private void moveToNextChunkReader() throws IOException, IllegalStateException { } chunkReader = - isMultiPage + currentIsMultiPage ? new ChunkReader( new Chunk( chunkHeader, @@ -331,39 +446,64 @@ private void moveToNextChunkReader() throws IOException, IllegalStateException { return; case MetaMarker.VALUE_CHUNK_HEADER: case MetaMarker.ONLY_ONE_PAGE_VALUE_CHUNK_HEADER: - chunkHeader = tsFileSequenceReader.readChunkHeader(marker); + if (Objects.isNull(firstChunkHeader4NextSequentialValueChunks)) { + chunkHeader = tsFileSequenceReader.readChunkHeader(marker); + + if (Objects.isNull(currentDevice) + || !pattern.matchesMeasurement(currentDevice, chunkHeader.getMeasurementID())) { + tsFileSequenceReader.position( + tsFileSequenceReader.position() + chunkHeader.getDataSize()); + break; + } - if (Objects.isNull(currentDevice) - || !pattern.matchesMeasurement(currentDevice, chunkHeader.getMeasurementID())) { - tsFileSequenceReader.position( - tsFileSequenceReader.position() + chunkHeader.getDataSize()); - break; + // Increase value index + final int valueIndex = + measurementIndexMap.compute( + chunkHeader.getMeasurementID(), + (measurement, index) -> Objects.nonNull(index) ? index + 1 : 0); + + // Emit when encountered non-sequential value chunk, or the chunk list size exceeds + // certain value to avoid OOM + // Do not record or end current value chunks when there are empty chunks + if (chunkHeader.getDataSize() == 0) { + break; + } + boolean needReturn = false; + if (lastIndex >= 0 + && (valueIndex != lastIndex + || valueChunkList.size() >= pipeMaxAlignedSeriesNumInOneBatch)) { + needReturn = recordAlignedChunk(valueChunkList, marker); + } + lastIndex = valueIndex; + if (needReturn) { + firstChunkHeader4NextSequentialValueChunks = chunkHeader; + return; + } + } else { + chunkHeader = firstChunkHeader4NextSequentialValueChunks; + firstChunkHeader4NextSequentialValueChunks = null; } - // Do not record empty chunk - if (chunkHeader.getDataSize() > 0) { - valueChunkList.add( - new Chunk( - chunkHeader, tsFileSequenceReader.readChunk(-1, chunkHeader.getDataSize()))); - currentMeasurements.add( - new MeasurementSchema(chunkHeader.getMeasurementID(), chunkHeader.getDataType())); - } + valueChunkList.add( + new Chunk( + chunkHeader, tsFileSequenceReader.readChunk(-1, chunkHeader.getDataSize()))); + currentMeasurements.add( + new MeasurementSchema(chunkHeader.getMeasurementID(), chunkHeader.getDataType())); break; case MetaMarker.CHUNK_GROUP_HEADER: // Return before "currentDevice" changes - if (Objects.nonNull(timeChunk) && !currentMeasurements.isEmpty()) { - chunkReader = - isMultiPage - ? new AlignedChunkReader(timeChunk, valueChunkList, filter) - : new AlignedSinglePageWholeChunkReader(timeChunk, valueChunkList); - currentIsAligned = true; - lastMarker = marker; + if (recordAlignedChunk(valueChunkList, marker)) { return; } - // TODO: Replace it by IDeviceID final String deviceID = ((PlainDeviceID) tsFileSequenceReader.readChunkGroupHeader().getDeviceID()) .toStringID(); + // Clear because the cached data will never be used in the next chunk group + lastIndex = -1; + timeChunkList.clear(); + isMultiPageList.clear(); + measurementIndexMap.clear(); + currentDevice = pattern.mayOverlapWithDevice(deviceID) ? deviceID : null; break; case MetaMarker.OPERATION_INDEX_RANGE: @@ -375,14 +515,34 @@ private void moveToNextChunkReader() throws IOException, IllegalStateException { } lastMarker = marker; - if (Objects.nonNull(timeChunk) && !currentMeasurements.isEmpty()) { + if (!recordAlignedChunk(valueChunkList, marker)) { + chunkReader = null; + } + } + + private boolean recordAlignedChunk(final List valueChunkList, final byte marker) + throws IOException { + if (!valueChunkList.isEmpty()) { + final Chunk timeChunk = timeChunkList.get(lastIndex); + timeChunk.getData().rewind(); + currentIsMultiPage = isMultiPageList.get(lastIndex); chunkReader = - isMultiPage + currentIsMultiPage ? new AlignedChunkReader(timeChunk, valueChunkList, filter) : new AlignedSinglePageWholeChunkReader(timeChunk, valueChunkList); currentIsAligned = true; - } else { - chunkReader = null; + lastMarker = marker; + return true; + } + return false; + } + + @Override + public void close() { + super.close(); + + if (allocatedMemoryBlockForBatchData != null) { + allocatedMemoryBlockForBatchData.close(); } } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/realtime/PipeRealtimeEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/realtime/PipeRealtimeEvent.java index 7e12a909b1f19..07658d83cf45e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/realtime/PipeRealtimeEvent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/realtime/PipeRealtimeEvent.java @@ -20,10 +20,11 @@ package org.apache.iotdb.db.pipe.event.realtime; import org.apache.iotdb.commons.consensus.index.ProgressIndex; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.commons.pipe.pattern.PipePattern; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.epoch.TsFileEpoch; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.PipeRealtimeDataRegionSource; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.epoch.TsFileEpoch; import java.util.Map; @@ -87,6 +88,11 @@ public void gcSchemaInfo() { device2Measurements = null; } + public boolean mayExtractorUseTablets(final PipeRealtimeDataRegionSource extractor) { + final TsFileEpoch.State state = tsFileEpoch.getState(extractor); + return state.equals(TsFileEpoch.State.EMPTY) || state.equals(TsFileEpoch.State.USING_TABLET); + } + @Override public boolean increaseReferenceCount(final String holderMessage) { // This method must be overridden, otherwise during the real-time data extraction stage, the @@ -124,6 +130,11 @@ public boolean internallyDecreaseResourceReferenceCount(final String holderMessa return event.internallyDecreaseResourceReferenceCount(holderMessage); } + @Override + public void bindProgressIndex(final ProgressIndex progressIndex) { + event.bindProgressIndex(progressIndex); + } + @Override public ProgressIndex getProgressIndex() { return event.getProgressIndex(); @@ -171,7 +182,9 @@ public PipeRealtimeEvent shallowCopySelfAndBindPipeTaskMetaForProgressReport( event.shallowCopySelfAndBindPipeTaskMetaForProgressReport( pipeName, creationTime, pipeTaskMeta, pattern, startTime, endTime), this.tsFileEpoch, - this.device2Measurements, + // device2Measurements is not used anymore, so it is not copied. + // If null is not passed, the field will not be GCed and may cause OOM. + null, pipeTaskMeta, pattern, startTime, diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/realtime/PipeRealtimeEventFactory.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/realtime/PipeRealtimeEventFactory.java index 1dd86e1e5d880..cf52a5c0b2e6a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/realtime/PipeRealtimeEventFactory.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/realtime/PipeRealtimeEventFactory.java @@ -24,35 +24,25 @@ import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.epoch.TsFileEpochManager; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.epoch.TsFileEpochManager; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.DeleteDataNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler; public class PipeRealtimeEventFactory { private static final TsFileEpochManager TS_FILE_EPOCH_MANAGER = new TsFileEpochManager(); public static PipeRealtimeEvent createRealtimeEvent( - final TsFileResource resource, final boolean isLoaded, final boolean isGeneratedByPipe) { + final TsFileResource resource, final boolean isLoaded) { return TS_FILE_EPOCH_MANAGER.bindPipeTsFileInsertionEvent( - new PipeTsFileInsertionEvent(resource, isLoaded, isGeneratedByPipe, false), resource); + new PipeTsFileInsertionEvent(resource, isLoaded), resource); } public static PipeRealtimeEvent createRealtimeEvent( - final WALEntryHandler walEntryHandler, - final InsertNode insertNode, - final TsFileResource resource) { + final InsertNode insertNode, final TsFileResource resource) { return TS_FILE_EPOCH_MANAGER.bindPipeInsertNodeTabletInsertionEvent( - new PipeInsertNodeTabletInsertionEvent( - walEntryHandler, - insertNode.getDevicePath(), - insertNode.getProgressIndex(), - insertNode.isAligned(), - insertNode.isGeneratedByPipe()), - insertNode, - resource); + new PipeInsertNodeTabletInsertionEvent(insertNode), insertNode, resource); } public static PipeRealtimeEvent createRealtimeEvent( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionHybridExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionHybridExtractor.java deleted file mode 100644 index ced920f378c66..0000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionHybridExtractor.java +++ /dev/null @@ -1,371 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.pipe.extractor.dataregion.realtime; - -import org.apache.iotdb.commons.exception.pipe.PipeRuntimeNonCriticalException; -import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.pipe.event.ProgressReportEvent; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; -import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; -import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent; -import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; -import org.apache.iotdb.db.pipe.event.realtime.PipeRealtimeEvent; -import org.apache.iotdb.db.pipe.extractor.dataregion.IoTDBDataRegionExtractor; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.epoch.TsFileEpoch; -import org.apache.iotdb.db.pipe.metric.PipeDataRegionExtractorMetrics; -import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; -import org.apache.iotdb.db.storageengine.dataregion.wal.WALManager; -import org.apache.iotdb.pipe.api.event.Event; -import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; -import org.apache.iotdb.pipe.api.event.dml.insertion.TsFileInsertionEvent; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Objects; - -public class PipeRealtimeDataRegionHybridExtractor extends PipeRealtimeDataRegionExtractor { - - private static final Logger LOGGER = - LoggerFactory.getLogger(PipeRealtimeDataRegionHybridExtractor.class); - - @Override - protected void doExtract(final PipeRealtimeEvent event) { - final Event eventToExtract = event.getEvent(); - - if (eventToExtract instanceof TabletInsertionEvent) { - extractTabletInsertion(event); - } else if (eventToExtract instanceof TsFileInsertionEvent) { - extractTsFileInsertion(event); - } else if (eventToExtract instanceof PipeHeartbeatEvent) { - extractHeartbeat(event); - } else if (eventToExtract instanceof PipeSchemaRegionWritePlanEvent) { - extractDirectly(event); - } else { - throw new UnsupportedOperationException( - String.format( - "Unsupported event type %s for hybrid realtime extractor %s", - eventToExtract.getClass(), this)); - } - } - - @Override - public boolean isNeedListenToTsFile() { - return shouldExtractInsertion; - } - - @Override - public boolean isNeedListenToInsertNode() { - return shouldExtractInsertion; - } - - private void extractTabletInsertion(final PipeRealtimeEvent event) { - if (canNotUseTabletAnyMore()) { - event - .getTsFileEpoch() - .migrateState( - this, - state -> { - switch (state) { - case EMPTY: - case USING_TSFILE: - return TsFileEpoch.State.USING_TSFILE; - case USING_TABLET: - case USING_BOTH: - default: - return TsFileEpoch.State.USING_BOTH; - } - }); - } - - final TsFileEpoch.State state = event.getTsFileEpoch().getState(this); - switch (state) { - case USING_TSFILE: - // Ignore the tablet event. - event.decreaseReferenceCount(PipeRealtimeDataRegionHybridExtractor.class.getName(), false); - break; - case EMPTY: - case USING_TABLET: - case USING_BOTH: - if (!pendingQueue.waitedOffer(event)) { - // This would not happen, but just in case. - // pendingQueue is unbounded, so it should never reach capacity. - final String errorMessage = - String.format( - "extractTabletInsertion: pending queue of PipeRealtimeDataRegionHybridExtractor %s " - + "has reached capacity, discard tablet event %s, current state %s", - this, event, event.getTsFileEpoch().getState(this)); - LOGGER.error(errorMessage); - PipeDataNodeAgent.runtime() - .report(pipeTaskMeta, new PipeRuntimeNonCriticalException(errorMessage)); - - // Ignore the tablet event. - event.decreaseReferenceCount( - PipeRealtimeDataRegionHybridExtractor.class.getName(), false); - } - break; - default: - throw new UnsupportedOperationException( - String.format( - "Unsupported state %s for hybrid realtime extractor %s", - state, PipeRealtimeDataRegionHybridExtractor.class.getName())); - } - } - - private void extractTsFileInsertion(final PipeRealtimeEvent event) { - event - .getTsFileEpoch() - .migrateState( - this, - state -> { - switch (state) { - case EMPTY: - case USING_TSFILE: - return TsFileEpoch.State.USING_TSFILE; - case USING_TABLET: - if (((PipeTsFileInsertionEvent) event.getEvent()).getFileStartTime() - < event.getTsFileEpoch().getInsertNodeMinTime()) { - // Some insert nodes in the tsfile epoch are not captured by pipe, so we should - // capture the tsfile event to make sure all data in the tsfile epoch can be - // extracted. - // - // The situation can be caused by the following operations: - // 1. PipeA: start historical data extraction with flush - // 2. Data insertion - // 3. PipeB: start realtime data extraction - // 4. PipeB: start historical data extraction without flush - // 5. Data inserted in the step2 is not captured by PipeB, and if its tsfile - // epoch's state is USING_TABLET, the tsfile event will be ignored, which - // will cause the data loss in the tsfile epoch. - return TsFileEpoch.State.USING_BOTH; - } else { - // All data in the tsfile epoch has been extracted in tablet mode, so we should - // simply keep the state of the tsfile epoch and discard the tsfile event. - return TsFileEpoch.State.USING_TABLET; - } - case USING_BOTH: - default: - return TsFileEpoch.State.USING_BOTH; - } - }); - - final TsFileEpoch.State state = event.getTsFileEpoch().getState(this); - switch (state) { - case USING_TABLET: - // Though the data in tsfile event has been extracted in tablet mode, we still need to - // extract the tsfile event to help to determine isTsFileEventCountInQueueExceededLimit(). - // The extracted tsfile event will be discarded in supplyTsFileInsertion. - case EMPTY: - case USING_TSFILE: - case USING_BOTH: - if (!pendingQueue.waitedOffer(event)) { - // This would not happen, but just in case. - // pendingQueue is unbounded, so it should never reach capacity. - final String errorMessage = - String.format( - "extractTsFileInsertion: pending queue of PipeRealtimeDataRegionHybridExtractor %s " - + "has reached capacity, discard TsFile event %s, current state %s", - this, event, event.getTsFileEpoch().getState(this)); - LOGGER.error(errorMessage); - PipeDataNodeAgent.runtime() - .report(pipeTaskMeta, new PipeRuntimeNonCriticalException(errorMessage)); - - // Ignore the tsfile event. - event.decreaseReferenceCount( - PipeRealtimeDataRegionHybridExtractor.class.getName(), false); - } - break; - default: - throw new UnsupportedOperationException( - String.format( - "Unsupported state %s for hybrid realtime extractor %s", - state, PipeRealtimeDataRegionHybridExtractor.class.getName())); - } - } - - private boolean canNotUseTabletAnyMore() { - // In the following 5 cases, we should not extract any more tablet events. all the data - // represented by the tablet events should be carried by the following tsfile event: - // 1. If Wal size > maximum size of wal buffer, - // the write operation will be throttled, so we should not extract any more tablet events. - // 2. The number of pinned memtables has reached the dangerous threshold. - // 3. The number of historical tsFile events to transfer has exceeded the limit. - // 4. The number of realtime tsfile events to transfer has exceeded the limit. - // 5. The number of linked tsfiles has reached the dangerous threshold. - return mayWalSizeReachThrottleThreshold() - || mayMemTablePinnedCountReachDangerousThreshold() - || isHistoricalTsFileEventCountExceededLimit() - || isRealtimeTsFileEventCountExceededLimit() - || mayTsFileLinkedCountReachDangerousThreshold(); - } - - private boolean mayWalSizeReachThrottleThreshold() { - return 3 * WALManager.getInstance().getTotalDiskUsage() - > IoTDBDescriptor.getInstance().getConfig().getThrottleThreshold(); - } - - private boolean mayMemTablePinnedCountReachDangerousThreshold() { - return PipeDataNodeResourceManager.wal().getPinnedWalCount() - >= PipeConfig.getInstance().getPipeMaxAllowedPinnedMemTableCount(); - } - - private boolean isHistoricalTsFileEventCountExceededLimit() { - final IoTDBDataRegionExtractor extractor = - PipeDataRegionExtractorMetrics.getInstance().getExtractorMap().get(getTaskID()); - return Objects.nonNull(extractor) - && extractor.getHistoricalTsFileInsertionEventCount() - >= PipeConfig.getInstance().getPipeMaxAllowedHistoricalTsFilePerDataRegion(); - } - - private boolean isRealtimeTsFileEventCountExceededLimit() { - return pendingQueue.getTsFileInsertionEventCount() - >= PipeConfig.getInstance().getPipeMaxAllowedPendingTsFileEpochPerDataRegion(); - } - - private boolean mayTsFileLinkedCountReachDangerousThreshold() { - return PipeDataNodeResourceManager.tsfile().getLinkedTsfileCount() - >= PipeConfig.getInstance().getPipeMaxAllowedLinkedTsFileCount(); - } - - @Override - public Event supply() { - PipeRealtimeEvent realtimeEvent = (PipeRealtimeEvent) pendingQueue.directPoll(); - - while (realtimeEvent != null) { - final Event suppliedEvent; - - // Used to judge the type of the event, not directly for supplying. - final Event eventToSupply = realtimeEvent.getEvent(); - if (eventToSupply instanceof TabletInsertionEvent) { - suppliedEvent = supplyTabletInsertion(realtimeEvent); - } else if (eventToSupply instanceof TsFileInsertionEvent) { - suppliedEvent = supplyTsFileInsertion(realtimeEvent); - } else if (eventToSupply instanceof PipeHeartbeatEvent) { - suppliedEvent = supplyHeartbeat(realtimeEvent); - } else if (eventToSupply instanceof PipeSchemaRegionWritePlanEvent - || eventToSupply instanceof ProgressReportEvent) { - suppliedEvent = supplyDirectly(realtimeEvent); - } else { - throw new UnsupportedOperationException( - String.format( - "Unsupported event type %s for hybrid realtime extractor %s to supply.", - eventToSupply.getClass(), this)); - } - - realtimeEvent.decreaseReferenceCount( - PipeRealtimeDataRegionHybridExtractor.class.getName(), false); - - if (suppliedEvent != null) { - return suppliedEvent; - } - - realtimeEvent = (PipeRealtimeEvent) pendingQueue.directPoll(); - } - - // Means the pending queue is empty. - return null; - } - - private Event supplyTabletInsertion(final PipeRealtimeEvent event) { - event - .getTsFileEpoch() - .migrateState( - this, - state -> { - if (!state.equals(TsFileEpoch.State.EMPTY)) { - return state; - } - - return canNotUseTabletAnyMore() - ? TsFileEpoch.State.USING_TSFILE - : TsFileEpoch.State.USING_TABLET; - }); - - final TsFileEpoch.State state = event.getTsFileEpoch().getState(this); - switch (state) { - case USING_TSFILE: - // If the state is USING_TSFILE, discard the event and poll the next one. - return null; - case EMPTY: - case USING_TABLET: - case USING_BOTH: - default: - if (event.increaseReferenceCount(PipeRealtimeDataRegionHybridExtractor.class.getName())) { - return event.getEvent(); - } else { - // If the event's reference count can not be increased, it means the data represented by - // this event is not reliable anymore. but the data represented by this event - // has been carried by the following tsfile event, so we can just discard this event. - event.getTsFileEpoch().migrateState(this, s -> TsFileEpoch.State.USING_BOTH); - LOGGER.warn( - "Discard tablet event {} because it is not reliable anymore. " - + "Change the state of TsFileEpoch to USING_TSFILE.", - event); - return null; - } - } - } - - private Event supplyTsFileInsertion(final PipeRealtimeEvent event) { - event - .getTsFileEpoch() - .migrateState( - this, - state -> { - // This would not happen, but just in case. - if (state.equals(TsFileEpoch.State.EMPTY)) { - LOGGER.error( - String.format("EMPTY TsFileEpoch when supplying TsFile Event %s", event)); - return TsFileEpoch.State.USING_TSFILE; - } - return state; - }); - - final TsFileEpoch.State state = event.getTsFileEpoch().getState(this); - switch (state) { - case USING_TABLET: - // If the state is USING_TABLET, discard the event and poll the next one. - return null; - case EMPTY: - case USING_TSFILE: - case USING_BOTH: - default: - if (event.increaseReferenceCount(PipeRealtimeDataRegionHybridExtractor.class.getName())) { - return event.getEvent(); - } else { - // If the event's reference count can not be increased, it means the data represented by - // this event is not reliable anymore. the data has been lost. we simply discard this - // event - // and report the exception to PipeRuntimeAgent. - final String errorMessage = - String.format( - "TsFile Event %s can not be supplied because " - + "the reference count can not be increased, " - + "the data represented by this event is lost", - event.getEvent()); - LOGGER.error(errorMessage); - PipeDataNodeAgent.runtime() - .report(pipeTaskMeta, new PipeRuntimeNonCriticalException(errorMessage)); - return null; - } - } - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/assigner/PipeDataRegionAssigner.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/assigner/PipeDataRegionAssigner.java deleted file mode 100644 index 762577d2f841c..0000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/assigner/PipeDataRegionAssigner.java +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.pipe.extractor.dataregion.realtime.assigner; - -import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.commons.pipe.event.ProgressReportEvent; -import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; -import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; -import org.apache.iotdb.db.pipe.event.realtime.PipeRealtimeEvent; -import org.apache.iotdb.db.pipe.event.realtime.PipeRealtimeEventFactory; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.PipeRealtimeDataRegionExtractor; -import org.apache.iotdb.db.pipe.metric.PipeAssignerMetrics; -import org.apache.iotdb.db.pipe.pattern.CachedSchemaPatternMatcher; -import org.apache.iotdb.db.pipe.pattern.PipeDataRegionMatcher; -import org.apache.iotdb.pipe.api.event.dml.insertion.TsFileInsertionEvent; - -import java.io.Closeable; - -public class PipeDataRegionAssigner implements Closeable { - - private static final int nonForwardingEventsProgressReportInterval = - PipeConfig.getInstance().getPipeNonForwardingEventsProgressReportInterval(); - - /** - * The {@link PipeDataRegionMatcher} is used to match the event with the extractor based on the - * pattern. - */ - private final PipeDataRegionMatcher matcher; - - /** The {@link DisruptorQueue} is used to assign the event to the extractor. */ - private final DisruptorQueue disruptor; - - private final String dataRegionId; - - private int counter = 0; - - public String getDataRegionId() { - return dataRegionId; - } - - public PipeDataRegionAssigner(final String dataRegionId) { - this.matcher = new CachedSchemaPatternMatcher(); - this.disruptor = new DisruptorQueue(this::assignToExtractor); - this.dataRegionId = dataRegionId; - PipeAssignerMetrics.getInstance().register(this); - } - - public void publishToAssign(final PipeRealtimeEvent event) { - event.increaseReferenceCount(PipeDataRegionAssigner.class.getName()); - - disruptor.publish(event); - - if (event.getEvent() instanceof PipeHeartbeatEvent) { - ((PipeHeartbeatEvent) event.getEvent()).onPublished(); - } - } - - public void assignToExtractor( - final PipeRealtimeEvent event, final long sequence, final boolean endOfBatch) { - matcher - .match(event) - .forEach( - extractor -> { - if (event.getEvent().isGeneratedByPipe() && !extractor.isForwardingPipeRequests()) { - // The frequency of progress reports is limited by the counter, while progress - // reports to TsFileInsertionEvent are not limited. - if (!(event.getEvent() instanceof TsFileInsertionEvent)) { - if (counter < nonForwardingEventsProgressReportInterval) { - counter++; - return; - } - counter = 0; - } - - final ProgressReportEvent reportEvent = - new ProgressReportEvent( - extractor.getPipeName(), - extractor.getCreationTime(), - extractor.getPipeTaskMeta(), - extractor.getPipePattern(), - extractor.getRealtimeDataExtractionStartTime(), - extractor.getRealtimeDataExtractionEndTime()); - reportEvent.bindProgressIndex(event.getProgressIndex()); - reportEvent.increaseReferenceCount(PipeDataRegionAssigner.class.getName()); - extractor.extract(PipeRealtimeEventFactory.createRealtimeEvent(reportEvent)); - return; - } - - final PipeRealtimeEvent copiedEvent = - event.shallowCopySelfAndBindPipeTaskMetaForProgressReport( - extractor.getPipeName(), - extractor.getCreationTime(), - extractor.getPipeTaskMeta(), - extractor.getPipePattern(), - extractor.getRealtimeDataExtractionStartTime(), - extractor.getRealtimeDataExtractionEndTime()); - final EnrichedEvent innerEvent = copiedEvent.getEvent(); - if (innerEvent instanceof PipeTsFileInsertionEvent) { - ((PipeTsFileInsertionEvent) innerEvent) - .disableMod4NonTransferPipes(extractor.isShouldTransferModFile()); - } - - copiedEvent.increaseReferenceCount(PipeDataRegionAssigner.class.getName()); - extractor.extract(copiedEvent); - - if (innerEvent instanceof PipeHeartbeatEvent) { - ((PipeHeartbeatEvent) innerEvent).onAssigned(); - } - }); - event.gcSchemaInfo(); - event.decreaseReferenceCount(PipeDataRegionAssigner.class.getName(), false); - } - - public void startAssignTo(final PipeRealtimeDataRegionExtractor extractor) { - matcher.register(extractor); - } - - public void stopAssignTo(final PipeRealtimeDataRegionExtractor extractor) { - matcher.deregister(extractor); - } - - public boolean notMoreExtractorNeededToBeAssigned() { - return matcher.getRegisterCount() == 0; - } - - /** - * Clear the matcher and disruptor. The method {@link PipeDataRegionAssigner#publishToAssign} - * should not be used after calling this method. - */ - @Override - public void close() { - PipeAssignerMetrics.getInstance().deregister(dataRegionId); - matcher.clear(); - disruptor.clear(); - } - - public int getTabletInsertionEventCount() { - return disruptor.getTabletInsertionEventCount(); - } - - public int getTsFileInsertionEventCount() { - return disruptor.getTsFileInsertionEventCount(); - } - - public int getPipeHeartbeatEventCount() { - return disruptor.getPipeHeartbeatEventCount(); - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeMetrics.java index 172653f3b0127..e54e515afd231 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeMetrics.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeMetrics.java @@ -20,6 +20,18 @@ package org.apache.iotdb.db.pipe.metric; import org.apache.iotdb.commons.pipe.metric.PipeEventCommitMetrics; +import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics; +import org.apache.iotdb.db.pipe.metric.overview.PipeHeartbeatEventMetrics; +import org.apache.iotdb.db.pipe.metric.overview.PipeResourceMetrics; +import org.apache.iotdb.db.pipe.metric.overview.PipeTsFileToTabletsMetrics; +import org.apache.iotdb.db.pipe.metric.processor.PipeProcessorMetrics; +import org.apache.iotdb.db.pipe.metric.receiver.PipeDataNodeReceiverMetrics; +import org.apache.iotdb.db.pipe.metric.schema.PipeSchemaRegionListenerMetrics; +import org.apache.iotdb.db.pipe.metric.schema.PipeSchemaRegionSinkMetrics; +import org.apache.iotdb.db.pipe.metric.schema.PipeSchemaRegionSourceMetrics; +import org.apache.iotdb.db.pipe.metric.sink.PipeDataRegionSinkMetrics; +import org.apache.iotdb.db.pipe.metric.source.PipeAssignerMetrics; +import org.apache.iotdb.db.pipe.metric.source.PipeDataRegionSourceMetrics; import org.apache.iotdb.metrics.AbstractMetricService; import org.apache.iotdb.metrics.metricsets.IMetricSet; @@ -30,35 +42,35 @@ public class PipeDataNodeMetrics implements IMetricSet { @Override public void bindTo(final AbstractMetricService metricService) { PipeAssignerMetrics.getInstance().bindTo(metricService); - PipeDataRegionExtractorMetrics.getInstance().bindTo(metricService); + PipeDataRegionSourceMetrics.getInstance().bindTo(metricService); PipeProcessorMetrics.getInstance().bindTo(metricService); - PipeDataRegionConnectorMetrics.getInstance().bindTo(metricService); + PipeDataRegionSinkMetrics.getInstance().bindTo(metricService); PipeHeartbeatEventMetrics.getInstance().bindTo(metricService); - PipeWALInsertNodeCacheMetrics.getInstance().bindTo(metricService); PipeResourceMetrics.getInstance().bindTo(metricService); PipeEventCommitMetrics.getInstance().bindTo(metricService); PipeSchemaRegionListenerMetrics.getInstance().bindTo(metricService); - PipeSchemaRegionExtractorMetrics.getInstance().bindTo(metricService); - PipeSchemaRegionConnectorMetrics.getInstance().bindTo(metricService); - PipeDataNodeRemainingEventAndTimeMetrics.getInstance().bindTo(metricService); + PipeSchemaRegionSourceMetrics.getInstance().bindTo(metricService); + PipeSchemaRegionSinkMetrics.getInstance().bindTo(metricService); + PipeDataNodeSinglePipeMetrics.getInstance().bindTo(metricService); PipeDataNodeReceiverMetrics.getInstance().bindTo(metricService); + PipeTsFileToTabletsMetrics.getInstance().bindTo(metricService); } @Override public void unbindFrom(final AbstractMetricService metricService) { PipeAssignerMetrics.getInstance().unbindFrom(metricService); - PipeDataRegionExtractorMetrics.getInstance().unbindFrom(metricService); + PipeDataRegionSourceMetrics.getInstance().unbindFrom(metricService); PipeProcessorMetrics.getInstance().unbindFrom(metricService); - PipeDataRegionConnectorMetrics.getInstance().unbindFrom(metricService); + PipeDataRegionSinkMetrics.getInstance().unbindFrom(metricService); PipeHeartbeatEventMetrics.getInstance().unbindFrom(metricService); - PipeWALInsertNodeCacheMetrics.getInstance().unbindFrom(metricService); PipeResourceMetrics.getInstance().unbindFrom(metricService); PipeEventCommitMetrics.getInstance().unbindFrom(metricService); PipeSchemaRegionListenerMetrics.getInstance().unbindFrom(metricService); - PipeSchemaRegionExtractorMetrics.getInstance().unbindFrom(metricService); - PipeSchemaRegionConnectorMetrics.getInstance().unbindFrom(metricService); - PipeDataNodeRemainingEventAndTimeMetrics.getInstance().unbindFrom(metricService); + PipeSchemaRegionSourceMetrics.getInstance().unbindFrom(metricService); + PipeSchemaRegionSinkMetrics.getInstance().unbindFrom(metricService); + PipeDataNodeSinglePipeMetrics.getInstance().unbindFrom(metricService); PipeDataNodeReceiverMetrics.getInstance().unbindFrom(metricService); + PipeTsFileToTabletsMetrics.getInstance().unbindFrom(metricService); } //////////////////////////// singleton //////////////////////////// diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeRemainingEventAndTimeMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeRemainingEventAndTimeMetrics.java deleted file mode 100644 index 83426a500c3a7..0000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeRemainingEventAndTimeMetrics.java +++ /dev/null @@ -1,263 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.pipe.metric; - -import org.apache.iotdb.commons.pipe.progress.PipeEventCommitManager; -import org.apache.iotdb.commons.service.metric.enums.Metric; -import org.apache.iotdb.commons.service.metric.enums.Tag; -import org.apache.iotdb.db.pipe.extractor.dataregion.IoTDBDataRegionExtractor; -import org.apache.iotdb.db.pipe.extractor.schemaregion.IoTDBSchemaRegionExtractor; -import org.apache.iotdb.db.pipe.task.subtask.connector.PipeConnectorSubtask; -import org.apache.iotdb.db.pipe.task.subtask.processor.PipeProcessorSubtask; -import org.apache.iotdb.metrics.AbstractMetricService; -import org.apache.iotdb.metrics.metricsets.IMetricSet; -import org.apache.iotdb.metrics.utils.MetricLevel; -import org.apache.iotdb.metrics.utils.MetricType; - -import com.google.common.collect.ImmutableSet; -import org.apache.tsfile.utils.Pair; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.ConcurrentHashMap; - -public class PipeDataNodeRemainingEventAndTimeMetrics implements IMetricSet { - - private static final Logger LOGGER = - LoggerFactory.getLogger(PipeDataNodeRemainingEventAndTimeMetrics.class); - - @SuppressWarnings("java:S3077") - private volatile AbstractMetricService metricService; - - private final Map - remainingEventAndTimeOperatorMap = new ConcurrentHashMap<>(); - - //////////////////////////// bindTo & unbindFrom (metric framework) //////////////////////////// - - @Override - public void bindTo(final AbstractMetricService metricService) { - this.metricService = metricService; - ImmutableSet.copyOf(remainingEventAndTimeOperatorMap.keySet()).forEach(this::createMetrics); - } - - private void createMetrics(final String pipeID) { - createAutoGauge(pipeID); - } - - private void createAutoGauge(final String pipeID) { - final PipeDataNodeRemainingEventAndTimeOperator operator = - remainingEventAndTimeOperatorMap.get(pipeID); - metricService.createAutoGauge( - Metric.PIPE_DATANODE_REMAINING_EVENT_COUNT.toString(), - MetricLevel.IMPORTANT, - operator, - PipeDataNodeRemainingEventAndTimeOperator::getRemainingEvents, - Tag.NAME.toString(), - operator.getPipeName(), - Tag.CREATION_TIME.toString(), - String.valueOf(operator.getCreationTime())); - metricService.createAutoGauge( - Metric.PIPE_DATANODE_REMAINING_TIME.toString(), - MetricLevel.IMPORTANT, - operator, - PipeDataNodeRemainingEventAndTimeOperator::getRemainingTime, - Tag.NAME.toString(), - operator.getPipeName(), - Tag.CREATION_TIME.toString(), - String.valueOf(operator.getCreationTime())); - } - - @Override - public void unbindFrom(final AbstractMetricService metricService) { - ImmutableSet.copyOf(remainingEventAndTimeOperatorMap.keySet()).forEach(this::deregister); - if (!remainingEventAndTimeOperatorMap.isEmpty()) { - LOGGER.warn( - "Failed to unbind from pipe remaining event and time metrics, RemainingEventAndTimeOperator map not empty"); - } - } - - private void removeMetrics(final String pipeID) { - removeAutoGauge(pipeID); - } - - private void removeAutoGauge(final String pipeID) { - final PipeDataNodeRemainingEventAndTimeOperator operator = - remainingEventAndTimeOperatorMap.get(pipeID); - metricService.remove( - MetricType.AUTO_GAUGE, - Metric.PIPE_DATANODE_REMAINING_EVENT_COUNT.toString(), - Tag.NAME.toString(), - operator.getPipeName(), - Tag.CREATION_TIME.toString(), - String.valueOf(operator.getCreationTime())); - metricService.remove( - MetricType.AUTO_GAUGE, - Metric.PIPE_DATANODE_REMAINING_TIME.toString(), - Tag.NAME.toString(), - operator.getPipeName(), - Tag.CREATION_TIME.toString(), - String.valueOf(operator.getCreationTime())); - remainingEventAndTimeOperatorMap.remove(pipeID); - } - - //////////////////////////// register & deregister (pipe integration) //////////////////////////// - - public void register(final IoTDBDataRegionExtractor extractor) { - // The metric is global thus the regionId is omitted - final String pipeID = extractor.getPipeName() + "_" + extractor.getCreationTime(); - remainingEventAndTimeOperatorMap - .computeIfAbsent(pipeID, k -> new PipeDataNodeRemainingEventAndTimeOperator()) - .register(extractor); - if (Objects.nonNull(metricService)) { - createMetrics(pipeID); - } - } - - public void register(final PipeProcessorSubtask processorSubtask) { - // The metric is global thus the regionId is omitted - final String pipeID = processorSubtask.getPipeName() + "_" + processorSubtask.getCreationTime(); - remainingEventAndTimeOperatorMap - .computeIfAbsent(pipeID, k -> new PipeDataNodeRemainingEventAndTimeOperator()) - .register(processorSubtask); - if (Objects.nonNull(metricService)) { - createMetrics(pipeID); - } - } - - public void register( - final PipeConnectorSubtask connectorSubtask, final String pipeName, final long creationTime) { - // The metric is global thus the regionId is omitted - final String pipeID = pipeName + "_" + creationTime; - remainingEventAndTimeOperatorMap - .computeIfAbsent(pipeID, k -> new PipeDataNodeRemainingEventAndTimeOperator()) - .register(connectorSubtask, pipeName, creationTime); - if (Objects.nonNull(metricService)) { - createMetrics(pipeID); - } - } - - public void register(final IoTDBSchemaRegionExtractor extractor) { - // The metric is global thus the regionId is omitted - final String pipeID = extractor.getPipeName() + "_" + extractor.getCreationTime(); - remainingEventAndTimeOperatorMap - .computeIfAbsent(pipeID, k -> new PipeDataNodeRemainingEventAndTimeOperator()) - .register(extractor); - if (Objects.nonNull(metricService)) { - createMetrics(pipeID); - } - } - - public void thawRate(final String pipeID) { - if (!remainingEventAndTimeOperatorMap.containsKey(pipeID)) { - // In dataNode, the "thawRate" may be called when there are no subtasks, and we call - // "startPipe". - // We thaw it later in "startPipeTask". - return; - } - remainingEventAndTimeOperatorMap.get(pipeID).thawRate(true); - } - - public void freezeRate(final String pipeID) { - if (!remainingEventAndTimeOperatorMap.containsKey(pipeID)) { - // In dataNode, the "freezeRate" may be called when there are no subtasks, and we call - // "stopPipe" after calling "startPipe". - // We do nothing because in that case the rate is not thawed initially - return; - } - remainingEventAndTimeOperatorMap.get(pipeID).freezeRate(true); - } - - public void deregister(final String pipeID) { - if (!remainingEventAndTimeOperatorMap.containsKey(pipeID)) { - LOGGER.warn( - "Failed to deregister pipe remaining event and time metrics, RemainingEventAndTimeOperator({}) does not exist", - pipeID); - return; - } - if (Objects.nonNull(metricService)) { - removeMetrics(pipeID); - } - } - - public void markRegionCommit(final String pipeID, final boolean isDataRegion) { - if (Objects.isNull(metricService)) { - return; - } - final PipeDataNodeRemainingEventAndTimeOperator operator = - remainingEventAndTimeOperatorMap.get(pipeID); - if (Objects.isNull(operator)) { - LOGGER.warn( - "Failed to mark pipe region commit, RemainingEventAndTimeOperator({}) does not exist", - pipeID); - return; - } - - if (isDataRegion) { - operator.markDataRegionCommit(); - } else { - operator.markSchemaRegionCommit(); - } - } - - public void markCollectInvocationCount(final String pipeID, final long collectInvocationCount) { - if (Objects.isNull(metricService)) { - return; - } - final PipeDataNodeRemainingEventAndTimeOperator operator = - remainingEventAndTimeOperatorMap.get(pipeID); - if (Objects.isNull(operator)) { - return; - } - - operator.markCollectInvocationCount(collectInvocationCount); - } - - //////////////////////////// Show pipes //////////////////////////// - - public Pair getRemainingEventAndTime( - final String pipeName, final long creationTime) { - final PipeDataNodeRemainingEventAndTimeOperator operator = - remainingEventAndTimeOperatorMap.computeIfAbsent( - pipeName + "_" + creationTime, k -> new PipeDataNodeRemainingEventAndTimeOperator()); - return new Pair<>(operator.getRemainingEvents(), operator.getRemainingTime()); - } - - //////////////////////////// singleton //////////////////////////// - - private static class PipeDataNodeRemainingEventAndTimeMetricsHolder { - - private static final PipeDataNodeRemainingEventAndTimeMetrics INSTANCE = - new PipeDataNodeRemainingEventAndTimeMetrics(); - - private PipeDataNodeRemainingEventAndTimeMetricsHolder() { - // Empty constructor - } - } - - public static PipeDataNodeRemainingEventAndTimeMetrics getInstance() { - return PipeDataNodeRemainingEventAndTimeMetricsHolder.INSTANCE; - } - - private PipeDataNodeRemainingEventAndTimeMetrics() { - PipeEventCommitManager.getInstance().setCommitRateMarker(this::markRegionCommit); - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeWALInsertNodeCacheMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeWALInsertNodeCacheMetrics.java deleted file mode 100644 index ccf556e5e09ae..0000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeWALInsertNodeCacheMetrics.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.pipe.metric; - -import org.apache.iotdb.commons.service.metric.enums.Metric; -import org.apache.iotdb.commons.service.metric.enums.Tag; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALInsertNodeCache; -import org.apache.iotdb.metrics.AbstractMetricService; -import org.apache.iotdb.metrics.metricsets.IMetricSet; -import org.apache.iotdb.metrics.utils.MetricLevel; -import org.apache.iotdb.metrics.utils.MetricType; - -import com.google.common.collect.ImmutableSet; -import org.checkerframework.checker.nullness.qual.NonNull; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.ConcurrentHashMap; - -public class PipeWALInsertNodeCacheMetrics implements IMetricSet { - - private static final Logger LOGGER = LoggerFactory.getLogger(PipeWALInsertNodeCacheMetrics.class); - - @SuppressWarnings("java:S3077") - private volatile AbstractMetricService metricService; - - private final Map cacheMap = new ConcurrentHashMap<>(); - - //////////////////////////// bindTo & unbindFrom (metric framework) //////////////////////////// - - @Override - public void bindTo(AbstractMetricService metricService) { - this.metricService = metricService; - ImmutableSet dataRegionIds = ImmutableSet.copyOf(cacheMap.keySet()); - for (Integer dataRegionId : dataRegionIds) { - createMetrics(dataRegionId); - } - } - - private void createMetrics(Integer dataRegionId) { - createAutoGauge(dataRegionId); - } - - private void createAutoGauge(Integer dataRegionId) { - metricService.createAutoGauge( - Metric.PIPE_WAL_INSERT_NODE_CACHE_HIT_RATE.toString(), - MetricLevel.IMPORTANT, - cacheMap.get(dataRegionId), - WALInsertNodeCache::getCacheHitRate, - Tag.REGION.toString(), - String.valueOf(dataRegionId)); - metricService.createAutoGauge( - Metric.PIPE_WAL_INSERT_NODE_CACHE_HIT_COUNT.toString(), - MetricLevel.IMPORTANT, - cacheMap.get(dataRegionId), - WALInsertNodeCache::getCacheHitCount, - Tag.REGION.toString(), - String.valueOf(dataRegionId)); - metricService.createAutoGauge( - Metric.PIPE_WAL_INSERT_NODE_CACHE_REQUEST_COUNT.toString(), - MetricLevel.IMPORTANT, - cacheMap.get(dataRegionId), - WALInsertNodeCache::getCacheRequestCount, - Tag.REGION.toString(), - String.valueOf(dataRegionId)); - } - - @Override - public void unbindFrom(AbstractMetricService metricService) { - ImmutableSet dataRegionIds = ImmutableSet.copyOf(cacheMap.keySet()); - for (Integer dataRegionId : dataRegionIds) { - deregister(dataRegionId); - } - if (!cacheMap.isEmpty()) { - LOGGER.warn("Failed to unbind from wal insert node cache metrics, cache map not empty"); - } - } - - private void removeMetrics(Integer dataRegionId) { - removeAutoGauge(dataRegionId); - } - - private void removeAutoGauge(Integer dataRegionId) { - metricService.remove( - MetricType.AUTO_GAUGE, - Metric.PIPE_WAL_INSERT_NODE_CACHE_HIT_RATE.toString(), - Tag.REGION.toString(), - String.valueOf(dataRegionId)); - metricService.remove( - MetricType.AUTO_GAUGE, - Metric.PIPE_WAL_INSERT_NODE_CACHE_HIT_COUNT.toString(), - Tag.REGION.toString(), - String.valueOf(dataRegionId)); - metricService.remove( - MetricType.AUTO_GAUGE, - Metric.PIPE_WAL_INSERT_NODE_CACHE_REQUEST_COUNT.toString(), - Tag.REGION.toString(), - String.valueOf(dataRegionId)); - } - - //////////////////////////// register & deregister (pipe integration) //////////////////////////// - - public void register(@NonNull WALInsertNodeCache walInsertNodeCache, Integer dataRegionId) { - cacheMap.putIfAbsent(dataRegionId, walInsertNodeCache); - if (Objects.nonNull(metricService)) { - createMetrics(dataRegionId); - } - } - - public void deregister(Integer dataRegionId) { - // TODO: waiting called by WALInsertNodeCache - if (!cacheMap.containsKey(dataRegionId)) { - LOGGER.warn( - "Failed to deregister wal insert node cache metrics, WALInsertNodeCache({}) does not exist", - dataRegionId); - return; - } - if (Objects.nonNull(metricService)) { - removeMetrics(dataRegionId); - } - cacheMap.remove(dataRegionId); - } - - //////////////////////////// singleton //////////////////////////// - - private static class PipeWALInsertNodeCacheMetricsHolder { - - private static final PipeWALInsertNodeCacheMetrics INSTANCE = - new PipeWALInsertNodeCacheMetrics(); - - private PipeWALInsertNodeCacheMetricsHolder() { - // empty constructor - } - } - - public static PipeWALInsertNodeCacheMetrics getInstance() { - return PipeWALInsertNodeCacheMetricsHolder.INSTANCE; - } - - private PipeWALInsertNodeCacheMetrics() { - // empty constructor - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeRemainingEventAndTimeOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeRemainingEventAndTimeOperator.java similarity index 61% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeRemainingEventAndTimeOperator.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeRemainingEventAndTimeOperator.java index e85a212b96d6d..f54e8bcfccb2c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeRemainingEventAndTimeOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeRemainingEventAndTimeOperator.java @@ -17,17 +17,16 @@ * under the License. */ -package org.apache.iotdb.db.pipe.metric; +package org.apache.iotdb.db.pipe.metric.overview; -import org.apache.iotdb.commons.enums.PipeRemainingTimeRateAverageTime; +import org.apache.iotdb.commons.enums.PipeRateAverage; import org.apache.iotdb.commons.pipe.config.PipeConfig; import org.apache.iotdb.commons.pipe.metric.PipeRemainingOperator; -import org.apache.iotdb.db.pipe.extractor.dataregion.IoTDBDataRegionExtractor; -import org.apache.iotdb.db.pipe.extractor.schemaregion.IoTDBSchemaRegionExtractor; -import org.apache.iotdb.db.pipe.task.subtask.connector.PipeConnectorSubtask; -import org.apache.iotdb.db.pipe.task.subtask.processor.PipeProcessorSubtask; +import org.apache.iotdb.db.pipe.source.schemaregion.IoTDBSchemaRegionSource; import org.apache.iotdb.metrics.core.IoTDBMetricManager; import org.apache.iotdb.metrics.core.type.IoTDBHistogram; +import org.apache.iotdb.metrics.impl.DoNothingMetricManager; +import org.apache.iotdb.metrics.type.Timer; import org.apache.iotdb.pipe.api.event.Event; import com.codahale.metrics.Clock; @@ -38,44 +37,87 @@ import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -class PipeDataNodeRemainingEventAndTimeOperator extends PipeRemainingOperator { - private final Set dataRegionExtractors = - Collections.newSetFromMap(new ConcurrentHashMap<>()); - private final Set dataRegionProcessors = - Collections.newSetFromMap(new ConcurrentHashMap<>()); - private final Set dataRegionConnectors = - Collections.newSetFromMap(new ConcurrentHashMap<>()); - private final Set schemaRegionExtractors = +public class PipeDataNodeRemainingEventAndTimeOperator extends PipeRemainingOperator { + + // Calculate from schema region extractors directly for it requires less computation + private final Set schemaRegionExtractors = Collections.newSetFromMap(new ConcurrentHashMap<>()); + + private final AtomicInteger insertNodeEventCount = new AtomicInteger(0); + private final AtomicInteger rawTabletEventCount = new AtomicInteger(0); + private final AtomicInteger tsfileEventCount = new AtomicInteger(0); + private final AtomicInteger heartbeatEventCount = new AtomicInteger(0); + private final AtomicReference dataRegionCommitMeter = new AtomicReference<>(null); private final AtomicReference schemaRegionCommitMeter = new AtomicReference<>(null); private final IoTDBHistogram collectInvocationHistogram = - (IoTDBHistogram) IoTDBMetricManager.getInstance().createHistogram(null); + (IoTDBHistogram) IoTDBMetricManager.getInstance().createHistogram(); + + private Timer insertNodeTransferTimer = DoNothingMetricManager.DO_NOTHING_TIMER; + private Timer tsfileTransferTimer = DoNothingMetricManager.DO_NOTHING_TIMER; private double lastDataRegionCommitSmoothingValue = Long.MAX_VALUE; private double lastSchemaRegionCommitSmoothingValue = Long.MAX_VALUE; + PipeDataNodeRemainingEventAndTimeOperator(final String pipeName, final long creationTime) { + super(pipeName, creationTime); + } + //////////////////////////// Remaining event & time calculation //////////////////////////// - long getRemainingEvents() { - return dataRegionExtractors.stream() - .map(IoTDBDataRegionExtractor::getEventCount) - .reduce(Integer::sum) - .orElse(0) - + dataRegionProcessors.stream() - .map(processorSubtask -> processorSubtask.getEventCount(false)) - .reduce(Integer::sum) - .orElse(0) - + dataRegionConnectors.stream() - .map(connectorSubtask -> connectorSubtask.getEventCount(pipeName)) - .reduce(Integer::sum) - .orElse(0) - + schemaRegionExtractors.stream() - .map(IoTDBSchemaRegionExtractor::getUnTransferredEventCount) - .reduce(Long::sum) - .orElse(0L); + void increaseInsertNodeEventCount() { + insertNodeEventCount.incrementAndGet(); + } + + void decreaseInsertNodeEventCount() { + insertNodeEventCount.decrementAndGet(); + } + + void increaseRawTabletEventCount() { + rawTabletEventCount.incrementAndGet(); + } + + void decreaseRawTabletEventCount() { + rawTabletEventCount.decrementAndGet(); + } + + void increaseTsFileEventCount() { + tsfileEventCount.incrementAndGet(); + } + + void decreaseTsFileEventCount() { + tsfileEventCount.decrementAndGet(); + } + + void increaseHeartbeatEventCount() { + heartbeatEventCount.incrementAndGet(); + } + + void decreaseHeartbeatEventCount() { + heartbeatEventCount.decrementAndGet(); + } + + public long getRemainingNonHeartbeatEvents() { + final long remainingEvents = + tsfileEventCount.get() + + rawTabletEventCount.get() + + insertNodeEventCount.get() + + schemaRegionExtractors.stream() + .map(IoTDBSchemaRegionSource::getUnTransferredEventCount) + .reduce(Long::sum) + .orElse(0L); + + // There are cases where the indicator is negative. For example, after the Pipe is restarted, + // the Processor SubTask is still collecting Events, resulting in a negative count. This + // situation cannot be avoided because the Pipe may be restarted internally. + return remainingEvents >= 0 ? remainingEvents : 0; + } + + public int getInsertNodeEventCount() { + return insertNodeEventCount.get(); } /** @@ -85,34 +127,16 @@ long getRemainingEvents() { * * @return The estimated remaining time */ - double getRemainingTime() { - final PipeRemainingTimeRateAverageTime pipeRemainingTimeCommitRateAverageTime = + public double getRemainingTime() { + final PipeRateAverage pipeRemainingTimeCommitRateAverageTime = PipeConfig.getInstance().getPipeRemainingTimeCommitRateAverageTime(); final double invocationValue = collectInvocationHistogram.getMean(); // Do not take heartbeat event into account final double totalDataRegionWriteEventCount = - (dataRegionExtractors.stream() - .map(IoTDBDataRegionExtractor::getEventCount) - .reduce(Integer::sum) - .orElse(0) - - dataRegionExtractors.stream() - .map(IoTDBDataRegionExtractor::getPipeHeartbeatEventCount) - .reduce(Integer::sum) - .orElse(0)) - * Math.max(invocationValue, 1) - + dataRegionProcessors.stream() - .map(processorSubtask -> processorSubtask.getEventCount(true)) - .reduce(Integer::sum) - .orElse(0) - + dataRegionConnectors.stream() - .map(connectorSubtask -> connectorSubtask.getEventCount(pipeName)) - .reduce(Integer::sum) - .orElse(0) - - dataRegionConnectors.stream() - .map(PipeConnectorSubtask::getPipeHeartbeatEventCount) - .reduce(Integer::sum) - .orElse(0); + tsfileEventCount.get() * Math.max(invocationValue, 1) + + rawTabletEventCount.get() + + insertNodeEventCount.get(); dataRegionCommitMeter.updateAndGet( meter -> { @@ -134,7 +158,7 @@ long getRemainingEvents() { final long totalSchemaRegionWriteEventCount = schemaRegionExtractors.stream() - .map(IoTDBSchemaRegionExtractor::getUnTransferredEventCount) + .map(IoTDBSchemaRegionSource::getUnTransferredEventCount) .reduce(Long::sum) .orElse(0L); @@ -168,24 +192,7 @@ long getRemainingEvents() { //////////////////////////// Register & deregister (pipe integration) //////////////////////////// - void register(final IoTDBDataRegionExtractor extractor) { - setNameAndCreationTime(extractor.getPipeName(), extractor.getCreationTime()); - dataRegionExtractors.add(extractor); - } - - void register(final PipeProcessorSubtask processorSubtask) { - setNameAndCreationTime(processorSubtask.getPipeName(), processorSubtask.getCreationTime()); - dataRegionProcessors.add(processorSubtask); - } - - void register( - final PipeConnectorSubtask connectorSubtask, final String pipeName, final long creationTime) { - setNameAndCreationTime(pipeName, creationTime); - dataRegionConnectors.add(connectorSubtask); - } - - void register(final IoTDBSchemaRegionExtractor extractor) { - setNameAndCreationTime(extractor.getPipeName(), extractor.getCreationTime()); + void register(final IoTDBSchemaRegionSource extractor) { schemaRegionExtractors.add(extractor); } @@ -211,11 +218,27 @@ void markSchemaRegionCommit() { }); } - void markCollectInvocationCount(final long collectInvocationCount) { + void markTsFileCollectInvocationCount(final long collectInvocationCount) { // If collectInvocationCount == 0, the event will still be committed once collectInvocationHistogram.update(Math.max(collectInvocationCount, 1)); } + public void setInsertNodeTransferTimer(Timer insertNodeTransferTimer) { + this.insertNodeTransferTimer = insertNodeTransferTimer; + } + + public Timer getInsertNodeTransferTimer() { + return insertNodeTransferTimer; + } + + public void setTsFileTransferTimer(Timer tsFileTransferTimer) { + this.tsfileTransferTimer = tsFileTransferTimer; + } + + public Timer getTsFileTransferTimer() { + return tsfileTransferTimer; + } + //////////////////////////// Switch //////////////////////////// // Thread-safe & Idempotent diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeSinglePipeMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeSinglePipeMetrics.java new file mode 100644 index 0000000000000..54705fc84cf87 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeSinglePipeMetrics.java @@ -0,0 +1,388 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.metric.overview; + +import org.apache.iotdb.commons.pipe.agent.task.progress.PipeEventCommitManager; +import org.apache.iotdb.commons.service.metric.enums.Metric; +import org.apache.iotdb.commons.service.metric.enums.Tag; +import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; +import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; +import org.apache.iotdb.db.pipe.source.dataregion.IoTDBDataRegionSource; +import org.apache.iotdb.db.pipe.source.schemaregion.IoTDBSchemaRegionSource; +import org.apache.iotdb.metrics.AbstractMetricService; +import org.apache.iotdb.metrics.metricsets.IMetricSet; +import org.apache.iotdb.metrics.utils.MetricLevel; +import org.apache.iotdb.metrics.utils.MetricType; + +import com.google.common.collect.ImmutableSet; +import org.apache.tsfile.utils.Pair; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; + +public class PipeDataNodeSinglePipeMetrics implements IMetricSet { + + private static final Logger LOGGER = LoggerFactory.getLogger(PipeDataNodeSinglePipeMetrics.class); + + @SuppressWarnings("java:S3077") + private volatile AbstractMetricService metricService; + + public final Map + remainingEventAndTimeOperatorMap = new ConcurrentHashMap<>(); + + //////////////////////////// bindTo & unbindFrom (metric framework) //////////////////////////// + + @Override + public void bindTo(final AbstractMetricService metricService) { + this.metricService = metricService; + ImmutableSet.copyOf(remainingEventAndTimeOperatorMap.keySet()).forEach(this::createMetrics); + } + + private void createMetrics(final String pipeID) { + createAutoGauge(pipeID); + } + + private void createAutoGauge(final String pipeID) { + final PipeDataNodeRemainingEventAndTimeOperator operator = + remainingEventAndTimeOperatorMap.get(pipeID); + metricService.createAutoGauge( + Metric.PIPE_DATANODE_REMAINING_EVENT_COUNT.toString(), + MetricLevel.IMPORTANT, + operator, + PipeDataNodeRemainingEventAndTimeOperator::getRemainingNonHeartbeatEvents, + Tag.NAME.toString(), + operator.getPipeName(), + Tag.CREATION_TIME.toString(), + String.valueOf(operator.getCreationTime())); + metricService.createAutoGauge( + Metric.PIPE_DATANODE_REMAINING_TIME.toString(), + MetricLevel.IMPORTANT, + operator, + PipeDataNodeRemainingEventAndTimeOperator::getRemainingTime, + Tag.NAME.toString(), + operator.getPipeName(), + Tag.CREATION_TIME.toString(), + String.valueOf(operator.getCreationTime())); + + // Resources + metricService.createAutoGauge( + Metric.PIPE_FLOATING_MEMORY_USAGE.toString(), + MetricLevel.IMPORTANT, + PipeDataNodeAgent.task(), + a -> a.getFloatingMemoryUsageInByte(operator.getPipeName()), + Tag.NAME.toString(), + operator.getPipeName(), + Tag.CREATION_TIME.toString(), + String.valueOf(operator.getCreationTime())); + metricService.createAutoGauge( + Metric.PIPE_LINKED_TSFILE_COUNT.toString(), + MetricLevel.IMPORTANT, + PipeDataNodeResourceManager.tsfile(), + a -> a.getLinkedTsFileCount(operator.getPipeName()), + Tag.NAME.toString(), + operator.getPipeName(), + Tag.CREATION_TIME.toString(), + String.valueOf(operator.getCreationTime())); + metricService.createAutoGauge( + Metric.PIPE_LINKED_TSFILE_SIZE.toString(), + MetricLevel.IMPORTANT, + PipeDataNodeResourceManager.tsfile(), + a -> a.getTotalLinkedTsFileSize(operator.getPipeName()), + Tag.NAME.toString(), + operator.getPipeName(), + Tag.CREATION_TIME.toString(), + String.valueOf(operator.getCreationTime())); + + operator.setInsertNodeTransferTimer( + metricService.getOrCreateTimer( + Metric.PIPE_INSERT_NODE_EVENT_TRANSFER_TIME.toString(), + MetricLevel.IMPORTANT, + Tag.NAME.toString(), + operator.getPipeName())); + + operator.setTsFileTransferTimer( + metricService.getOrCreateTimer( + Metric.PIPE_TSFILE_EVENT_TRANSFER_TIME.toString(), + MetricLevel.IMPORTANT, + Tag.NAME.toString(), + operator.getPipeName())); + } + + @Override + public void unbindFrom(final AbstractMetricService metricService) { + ImmutableSet.copyOf(remainingEventAndTimeOperatorMap.keySet()).forEach(this::deregister); + if (!remainingEventAndTimeOperatorMap.isEmpty()) { + LOGGER.warn( + "Failed to unbind from pipe remaining event and time metrics, RemainingEventAndTimeOperator map not empty"); + } + } + + private void removeMetrics(final String pipeID) { + removeAutoGauge(pipeID); + } + + private void removeAutoGauge(final String pipeID) { + final PipeDataNodeRemainingEventAndTimeOperator operator = + remainingEventAndTimeOperatorMap.get(pipeID); + metricService.remove( + MetricType.AUTO_GAUGE, + Metric.PIPE_DATANODE_REMAINING_EVENT_COUNT.toString(), + Tag.NAME.toString(), + operator.getPipeName(), + Tag.CREATION_TIME.toString(), + String.valueOf(operator.getCreationTime())); + metricService.remove( + MetricType.AUTO_GAUGE, + Metric.PIPE_DATANODE_REMAINING_TIME.toString(), + Tag.NAME.toString(), + operator.getPipeName(), + Tag.CREATION_TIME.toString(), + String.valueOf(operator.getCreationTime())); + metricService.remove( + MetricType.AUTO_GAUGE, + Metric.PIPE_FLOATING_MEMORY_USAGE.toString(), + Tag.NAME.toString(), + operator.getPipeName(), + Tag.CREATION_TIME.toString(), + String.valueOf(operator.getCreationTime())); + metricService.remove( + MetricType.AUTO_GAUGE, + Metric.PIPE_LINKED_TSFILE_COUNT.toString(), + Tag.NAME.toString(), + operator.getPipeName(), + Tag.CREATION_TIME.toString(), + String.valueOf(operator.getCreationTime())); + metricService.remove( + MetricType.AUTO_GAUGE, + Metric.PIPE_LINKED_TSFILE_SIZE.toString(), + Tag.NAME.toString(), + operator.getPipeName(), + Tag.CREATION_TIME.toString(), + String.valueOf(operator.getCreationTime())); + metricService.remove( + MetricType.TIMER, + Metric.PIPE_INSERT_NODE_EVENT_TRANSFER_TIME.toString(), + Tag.NAME.toString(), + operator.getPipeName()); + metricService.remove( + MetricType.TIMER, + Metric.PIPE_TSFILE_EVENT_TRANSFER_TIME.toString(), + Tag.NAME.toString(), + operator.getPipeName()); + remainingEventAndTimeOperatorMap.remove(pipeID); + } + + //////////////////////////// register & deregister (pipe integration) //////////////////////////// + + public void register(final IoTDBDataRegionSource extractor) { + // The metric is global thus the regionId is omitted + final String pipeID = extractor.getPipeName() + "_" + extractor.getCreationTime(); + remainingEventAndTimeOperatorMap.computeIfAbsent( + pipeID, + k -> + new PipeDataNodeRemainingEventAndTimeOperator( + extractor.getPipeName(), extractor.getCreationTime())); + if (Objects.nonNull(metricService)) { + createMetrics(pipeID); + } + } + + public void register(final IoTDBSchemaRegionSource extractor) { + // The metric is global thus the regionId is omitted + final String pipeID = extractor.getPipeName() + "_" + extractor.getCreationTime(); + remainingEventAndTimeOperatorMap + .computeIfAbsent( + pipeID, + k -> + new PipeDataNodeRemainingEventAndTimeOperator( + extractor.getPipeName(), extractor.getCreationTime())) + .register(extractor); + if (Objects.nonNull(metricService)) { + createMetrics(pipeID); + } + } + + public void increaseInsertNodeEventCount(final String pipeName, final long creationTime) { + remainingEventAndTimeOperatorMap + .computeIfAbsent( + pipeName + "_" + creationTime, + k -> new PipeDataNodeRemainingEventAndTimeOperator(pipeName, creationTime)) + .increaseInsertNodeEventCount(); + } + + public void decreaseInsertNodeEventCount( + final String pipeName, final long creationTime, final long transferTime) { + PipeDataNodeRemainingEventAndTimeOperator operator = + remainingEventAndTimeOperatorMap.computeIfAbsent( + pipeName + "_" + creationTime, + k -> new PipeDataNodeRemainingEventAndTimeOperator(pipeName, creationTime)); + operator.decreaseInsertNodeEventCount(); + } + + public void increaseRawTabletEventCount(final String pipeName, final long creationTime) { + remainingEventAndTimeOperatorMap + .computeIfAbsent( + pipeName + "_" + creationTime, + k -> new PipeDataNodeRemainingEventAndTimeOperator(pipeName, creationTime)) + .increaseRawTabletEventCount(); + } + + public void decreaseRawTabletEventCount(final String pipeName, final long creationTime) { + remainingEventAndTimeOperatorMap + .computeIfAbsent( + pipeName + "_" + creationTime, + k -> new PipeDataNodeRemainingEventAndTimeOperator(pipeName, creationTime)) + .decreaseRawTabletEventCount(); + } + + public void increaseTsFileEventCount(final String pipeName, final long creationTime) { + remainingEventAndTimeOperatorMap + .computeIfAbsent( + pipeName + "_" + creationTime, + k -> new PipeDataNodeRemainingEventAndTimeOperator(pipeName, creationTime)) + .increaseTsFileEventCount(); + } + + public void decreaseTsFileEventCount( + final String pipeName, final long creationTime, final long transferTime) { + final PipeDataNodeRemainingEventAndTimeOperator operator = + remainingEventAndTimeOperatorMap.computeIfAbsent( + pipeName + "_" + creationTime, + k -> new PipeDataNodeRemainingEventAndTimeOperator(pipeName, creationTime)); + + operator.decreaseTsFileEventCount(); + } + + public void increaseHeartbeatEventCount(final String pipeName, final long creationTime) { + remainingEventAndTimeOperatorMap + .computeIfAbsent( + pipeName + "_" + creationTime, + k -> new PipeDataNodeRemainingEventAndTimeOperator(pipeName, creationTime)) + .increaseHeartbeatEventCount(); + } + + public void decreaseHeartbeatEventCount(final String pipeName, final long creationTime) { + remainingEventAndTimeOperatorMap + .computeIfAbsent( + pipeName + "_" + creationTime, + k -> new PipeDataNodeRemainingEventAndTimeOperator(pipeName, creationTime)) + .decreaseHeartbeatEventCount(); + } + + public void thawRate(final String pipeID) { + if (!remainingEventAndTimeOperatorMap.containsKey(pipeID)) { + // In dataNode, the "thawRate" may be called when there are no subtasks, and we call + // "startPipe". + // We thaw it later in "startPipeTask". + return; + } + remainingEventAndTimeOperatorMap.get(pipeID).thawRate(true); + } + + public void freezeRate(final String pipeID) { + if (!remainingEventAndTimeOperatorMap.containsKey(pipeID)) { + // In dataNode, the "freezeRate" may be called when there are no subtasks, and we call + // "stopPipe" after calling "startPipe". + // We do nothing because in that case the rate is not thawed initially + return; + } + remainingEventAndTimeOperatorMap.get(pipeID).freezeRate(true); + } + + public void deregister(final String pipeID) { + if (!remainingEventAndTimeOperatorMap.containsKey(pipeID)) { + LOGGER.warn( + "Failed to deregister pipe remaining event and time metrics, RemainingEventAndTimeOperator({}) does not exist", + pipeID); + return; + } + if (Objects.nonNull(metricService)) { + removeMetrics(pipeID); + } + } + + public void markRegionCommit(final String pipeID, final boolean isDataRegion) { + if (Objects.isNull(metricService)) { + return; + } + final PipeDataNodeRemainingEventAndTimeOperator operator = + remainingEventAndTimeOperatorMap.get(pipeID); + if (Objects.isNull(operator)) { + LOGGER.warn( + "Failed to mark pipe region commit, RemainingEventAndTimeOperator({}) does not exist", + pipeID); + return; + } + + if (isDataRegion) { + operator.markDataRegionCommit(); + } else { + operator.markSchemaRegionCommit(); + } + } + + public void markTsFileCollectInvocationCount( + final String pipeID, final long collectInvocationCount) { + if (Objects.isNull(metricService)) { + return; + } + final PipeDataNodeRemainingEventAndTimeOperator operator = + remainingEventAndTimeOperatorMap.get(pipeID); + if (Objects.isNull(operator)) { + return; + } + + operator.markTsFileCollectInvocationCount(collectInvocationCount); + } + + //////////////////////////// Show pipes //////////////////////////// + + public Pair getRemainingEventAndTime( + final String pipeName, final long creationTime) { + final PipeDataNodeRemainingEventAndTimeOperator operator = + remainingEventAndTimeOperatorMap.computeIfAbsent( + pipeName + "_" + creationTime, + k -> new PipeDataNodeRemainingEventAndTimeOperator(pipeName, creationTime)); + return new Pair<>(operator.getRemainingNonHeartbeatEvents(), operator.getRemainingTime()); + } + + //////////////////////////// singleton //////////////////////////// + + private static class PipeDataNodeSinglePipeMetricsHolder { + + private static final PipeDataNodeSinglePipeMetrics INSTANCE = + new PipeDataNodeSinglePipeMetrics(); + + private PipeDataNodeSinglePipeMetricsHolder() { + // Empty constructor + } + } + + public static PipeDataNodeSinglePipeMetrics getInstance() { + return PipeDataNodeSinglePipeMetricsHolder.INSTANCE; + } + + private PipeDataNodeSinglePipeMetrics() { + PipeEventCommitManager.getInstance().setCommitRateMarker(this::markRegionCommit); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeHeartbeatEventMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeHeartbeatEventMetrics.java similarity index 98% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeHeartbeatEventMetrics.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeHeartbeatEventMetrics.java index 882fc51aaa5ab..e9d8842f2f11e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeHeartbeatEventMetrics.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeHeartbeatEventMetrics.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.pipe.metric; +package org.apache.iotdb.db.pipe.metric.overview; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeResourceMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeResourceMetrics.java similarity index 50% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeResourceMetrics.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeResourceMetrics.java index 04fb647a8259e..19a8060f89c2f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeResourceMetrics.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeResourceMetrics.java @@ -17,24 +17,33 @@ * under the License. */ -package org.apache.iotdb.db.pipe.metric; +package org.apache.iotdb.db.pipe.metric.overview; +import org.apache.iotdb.commons.pipe.resource.ref.PipePhantomReferenceManager; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; +import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryManager; -import org.apache.iotdb.db.pipe.resource.tsfile.PipeTsFileResourceManager; -import org.apache.iotdb.db.pipe.resource.wal.PipeWALResourceManager; import org.apache.iotdb.metrics.AbstractMetricService; +import org.apache.iotdb.metrics.impl.DoNothingMetricManager; import org.apache.iotdb.metrics.metricsets.IMetricSet; +import org.apache.iotdb.metrics.type.Counter; import org.apache.iotdb.metrics.utils.MetricLevel; import org.apache.iotdb.metrics.utils.MetricType; public class PipeResourceMetrics implements IMetricSet { private static final String PIPE_USED_MEMORY = "PipeUsedMemory"; + private static final String PIPE_USED_FLOATING_MEMORY = "PipeUsedFloatingMemory"; + + private static final String PIPE_TABLET_USED_MEMORY = "PipeTabletUsedMemory"; + + private static final String PIPE_TS_FILE_USED_MEMORY = "PipeTsFileUsedMemory"; private static final String PIPE_TOTAL_MEMORY = "PipeTotalMemory"; + private static final String PIPE_FLOATING_MEMORY = "PipeFloatingMemory"; + private Counter diskIOCounter = DoNothingMetricManager.DO_NOTHING_COUNTER; //////////////////////////// bindTo & unbindFrom (metric framework) //////////////////////////// @@ -52,20 +61,47 @@ public void bindTo(final AbstractMetricService metricService) { Metric.PIPE_MEM.toString(), MetricLevel.IMPORTANT, PipeDataNodeResourceManager.memory(), - PipeMemoryManager::getTotalMemorySizeInBytes, + PipeMemoryManager::getUsedMemorySizeInBytesOfTablets, + Tag.NAME.toString(), + PIPE_TABLET_USED_MEMORY); + metricService.createAutoGauge( + Metric.PIPE_MEM.toString(), + MetricLevel.IMPORTANT, + PipeDataNodeResourceManager.memory(), + PipeMemoryManager::getUsedMemorySizeInBytesOfTsFiles, + Tag.NAME.toString(), + PIPE_TS_FILE_USED_MEMORY); + metricService.createAutoGauge( + Metric.PIPE_MEM.toString(), + MetricLevel.IMPORTANT, + PipeDataNodeResourceManager.memory(), + o -> PipeMemoryManager.getTotalNonFloatingMemorySizeInBytes(), Tag.NAME.toString(), PIPE_TOTAL_MEMORY); - // resource reference count metricService.createAutoGauge( - Metric.PIPE_PINNED_MEMTABLE_COUNT.toString(), + Metric.PIPE_MEM.toString(), + MetricLevel.IMPORTANT, + PipeDataNodeResourceManager.memory(), + o -> PipeMemoryManager.getTotalFloatingMemorySizeInBytes(), + Tag.NAME.toString(), + PIPE_FLOATING_MEMORY); + metricService.createAutoGauge( + Metric.PIPE_MEM.toString(), MetricLevel.IMPORTANT, - PipeDataNodeResourceManager.wal(), - PipeWALResourceManager::getPinnedWalCount); + PipeDataNodeResourceManager.memory(), + o -> PipeDataNodeAgent.task().getAllFloatingMemoryUsageInByte(), + Tag.NAME.toString(), + PIPE_USED_FLOATING_MEMORY); + // phantom reference count metricService.createAutoGauge( - Metric.PIPE_LINKED_TSFILE_COUNT.toString(), + Metric.PIPE_PHANTOM_REFERENCE_COUNT.toString(), MetricLevel.IMPORTANT, - PipeDataNodeResourceManager.tsfile(), - PipeTsFileResourceManager::getLinkedTsfileCount); + PipeDataNodeResourceManager.ref(), + PipePhantomReferenceManager::getPhantomReferenceCount); + // tsFile send rate + diskIOCounter = + metricService.getOrCreateCounter( + Metric.PIPE_TSFILE_SEND_DISK_IO.toString(), MetricLevel.IMPORTANT); } @Override @@ -73,11 +109,39 @@ public void unbindFrom(final AbstractMetricService metricService) { // pipe memory related metricService.remove( MetricType.AUTO_GAUGE, Metric.PIPE_MEM.toString(), Tag.NAME.toString(), PIPE_USED_MEMORY); + metricService.remove( + MetricType.AUTO_GAUGE, + Metric.PIPE_MEM.toString(), + Tag.NAME.toString(), + PIPE_TABLET_USED_MEMORY); + metricService.remove( + MetricType.AUTO_GAUGE, + Metric.PIPE_MEM.toString(), + Tag.NAME.toString(), + PIPE_TS_FILE_USED_MEMORY); metricService.remove( MetricType.AUTO_GAUGE, Metric.PIPE_MEM.toString(), Tag.NAME.toString(), PIPE_TOTAL_MEMORY); + metricService.remove( + MetricType.AUTO_GAUGE, + Metric.PIPE_MEM.toString(), + Tag.NAME.toString(), + PIPE_FLOATING_MEMORY); + metricService.remove( + MetricType.AUTO_GAUGE, + Metric.PIPE_MEM.toString(), + Tag.NAME.toString(), + PIPE_USED_FLOATING_MEMORY); // resource reference count - metricService.remove(MetricType.AUTO_GAUGE, Metric.PIPE_PINNED_MEMTABLE_COUNT.toString()); metricService.remove(MetricType.AUTO_GAUGE, Metric.PIPE_LINKED_TSFILE_COUNT.toString()); + metricService.remove(MetricType.AUTO_GAUGE, Metric.PIPE_LINKED_TSFILE_SIZE.toString()); + // phantom reference count + metricService.remove(MetricType.AUTO_GAUGE, Metric.PIPE_PHANTOM_REFERENCE_COUNT.toString()); + + metricService.remove(MetricType.RATE, Metric.PIPE_TSFILE_SEND_DISK_IO.toString()); + } + + public void recordDiskIO(final long bytes) { + diskIOCounter.inc(bytes); } //////////////////////////// singleton //////////////////////////// diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeTsFileToTabletsMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeTsFileToTabletsMetrics.java new file mode 100644 index 0000000000000..4f2159f356c35 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeTsFileToTabletsMetrics.java @@ -0,0 +1,170 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.metric.overview; + +import org.apache.iotdb.commons.service.metric.enums.Metric; +import org.apache.iotdb.commons.service.metric.enums.Tag; +import org.apache.iotdb.db.pipe.source.dataregion.IoTDBDataRegionSource; +import org.apache.iotdb.metrics.AbstractMetricService; +import org.apache.iotdb.metrics.metricsets.IMetricSet; +import org.apache.iotdb.metrics.type.Rate; +import org.apache.iotdb.metrics.type.Timer; +import org.apache.iotdb.metrics.utils.MetricLevel; +import org.apache.iotdb.metrics.utils.MetricType; + +import com.google.common.collect.ImmutableSet; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentSkipListSet; + +public class PipeTsFileToTabletsMetrics implements IMetricSet { + + private static final Logger LOGGER = LoggerFactory.getLogger(PipeTsFileToTabletsMetrics.class); + + @SuppressWarnings("java:S3077") + private volatile AbstractMetricService metricService; + + private final ConcurrentSkipListSet pipe = new ConcurrentSkipListSet<>(); + private final Map pipeTimerMap = new ConcurrentHashMap<>(); + private final Map pipeRateMap = new ConcurrentHashMap<>(); + + //////////////////////////// bindTo & unbindFrom (metric framework) //////////////////////////// + + @Override + public void bindTo(final AbstractMetricService metricService) { + this.metricService = metricService; + ImmutableSet.copyOf(pipe).forEach(this::createMetrics); + } + + private void createMetrics(final String pipeID) { + pipeTimerMap.putIfAbsent( + pipeID, + metricService.getOrCreateTimer( + Metric.PIPE_TSFILE_TO_TABLETS_TIME.toString(), + MetricLevel.IMPORTANT, + Tag.NAME.toString(), + pipeID)); + pipeRateMap.putIfAbsent( + pipeID, + metricService.getOrCreateRate( + Metric.PIPE_TSFILE_TO_TABLETS_RATE.toString(), + MetricLevel.IMPORTANT, + Tag.NAME.toString(), + pipeID)); + } + + @Override + public void unbindFrom(final AbstractMetricService metricService) { + ImmutableSet.copyOf(pipe).forEach(this::deregister); + if (!pipe.isEmpty()) { + LOGGER.warn( + "Failed to unbind from pipe tsfile to tablets metrics, pipe map is not empty, pipe: {}", + pipe); + } + } + + private void removeMetrics(final String pipeID) { + metricService.remove( + MetricType.TIMER, + Metric.PIPE_TSFILE_TO_TABLETS_TIME.toString(), + Tag.NAME.toString(), + pipeID); + pipeTimerMap.remove(pipeID); + + metricService.remove( + MetricType.RATE, + Metric.PIPE_TSFILE_TO_TABLETS_RATE.toString(), + Tag.NAME.toString(), + pipeID); + pipeRateMap.remove(pipeID); + } + + //////////////////////////// register & deregister //////////////////////////// + + public void register(final IoTDBDataRegionSource extractor) { + final String pipeID = extractor.getPipeName() + "_" + extractor.getCreationTime(); + pipe.add(pipeID); + if (Objects.nonNull(metricService)) { + createMetrics(pipeID); + } + } + + public void deregister(final String pipeID) { + if (!pipe.contains(pipeID)) { + LOGGER.warn( + "Failed to deregister pipe tsfile to tablets metrics, pipeID({}) does not exist", pipeID); + return; + } + try { + if (Objects.nonNull(metricService)) { + removeMetrics(pipeID); + } + } finally { + pipe.remove(pipeID); + } + } + + //////////////////////////// pipe integration //////////////////////////// + + public void markTsFileToTabletInvocation(final String taskID) { + if (Objects.isNull(metricService)) { + return; + } + final Rate rate = pipeRateMap.get(taskID); + if (rate == null) { + LOGGER.info( + "Failed to mark pipe tsfile to tablets invocation, pipeID({}) does not exist", taskID); + return; + } + rate.mark(); + } + + public void recordTsFileToTabletTime(final String taskID, long costTimeInNanos) { + if (Objects.isNull(metricService)) { + return; + } + final Timer timer = pipeTimerMap.get(taskID); + if (timer == null) { + LOGGER.info( + "Failed to record pipe tsfile to tablets time, pipeID({}) does not exist", taskID); + return; + } + timer.updateNanos(costTimeInNanos); + } + + //////////////////////////// singleton //////////////////////////// + + private static class Holder { + + private static final PipeTsFileToTabletsMetrics INSTANCE = new PipeTsFileToTabletsMetrics(); + + private Holder() { + // Empty constructor + } + } + + public static PipeTsFileToTabletsMetrics getInstance() { + return Holder.INSTANCE; + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeProcessorMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/processor/PipeProcessorMetrics.java similarity index 98% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeProcessorMetrics.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/processor/PipeProcessorMetrics.java index d29defdc1fb1b..dd7adc6d9c045 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeProcessorMetrics.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/processor/PipeProcessorMetrics.java @@ -17,11 +17,11 @@ * under the License. */ -package org.apache.iotdb.db.pipe.metric; +package org.apache.iotdb.db.pipe.metric.processor; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; -import org.apache.iotdb.db.pipe.task.subtask.processor.PipeProcessorSubtask; +import org.apache.iotdb.db.pipe.agent.task.subtask.processor.PipeProcessorSubtask; import org.apache.iotdb.metrics.AbstractMetricService; import org.apache.iotdb.metrics.metricsets.IMetricSet; import org.apache.iotdb.metrics.type.Rate; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeReceiverMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/receiver/PipeDataNodeReceiverMetrics.java similarity index 84% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeReceiverMetrics.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/receiver/PipeDataNodeReceiverMetrics.java index eddad98804130..741fa302aec61 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeReceiverMetrics.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/receiver/PipeDataNodeReceiverMetrics.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.pipe.metric; +package org.apache.iotdb.db.pipe.metric.receiver; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; @@ -45,6 +45,9 @@ public class PipeDataNodeReceiverMetrics implements IMetricSet { private Timer transferSchemaPlanTimer = DoNothingMetricManager.DO_NOTHING_TIMER; private Timer transferSchemaSnapshotPieceTimer = DoNothingMetricManager.DO_NOTHING_TIMER; private Timer transferSchemaSnapshotSealTimer = DoNothingMetricManager.DO_NOTHING_TIMER; + private Timer transferConfigPlanTimer = DoNothingMetricManager.DO_NOTHING_TIMER; + private Timer transferCompressedTimer = DoNothingMetricManager.DO_NOTHING_TIMER; + private Timer transferSliceTimer = DoNothingMetricManager.DO_NOTHING_TIMER; private static final String RECEIVER = "pipeDataNodeReceiver"; @@ -102,6 +105,18 @@ public void recordTransferSchemaSnapshotSealTimer(long costTimeInNanos) { transferSchemaSnapshotSealTimer.updateNanos(costTimeInNanos); } + public void recordTransferConfigPlanTimer(long costTimeInNanos) { + transferConfigPlanTimer.updateNanos(costTimeInNanos); + } + + public void recordTransferCompressedTimer(long costTimeInNanos) { + transferCompressedTimer.updateNanos(costTimeInNanos); + } + + public void recordTransferSliceTimer(long costTimeInNanos) { + transferSliceTimer.updateNanos(costTimeInNanos); + } + @Override public void bindTo(AbstractMetricService metricService) { bindToTimer(metricService); @@ -212,6 +227,30 @@ private void bindToTimer(AbstractMetricService metricService) { RECEIVER, Tag.TYPE.toString(), "transferSchemaSnapshotSeal"); + transferConfigPlanTimer = + metricService.getOrCreateTimer( + Metric.PIPE_DATANODE_RECEIVER.toString(), + MetricLevel.IMPORTANT, + Tag.NAME.toString(), + RECEIVER, + Tag.TYPE.toString(), + "transferConfigPlan"); + transferCompressedTimer = + metricService.getOrCreateTimer( + Metric.PIPE_DATANODE_RECEIVER.toString(), + MetricLevel.IMPORTANT, + Tag.NAME.toString(), + RECEIVER, + Tag.TYPE.toString(), + "transferCompressed"); + transferSliceTimer = + metricService.getOrCreateTimer( + Metric.PIPE_DATANODE_RECEIVER.toString(), + MetricLevel.IMPORTANT, + Tag.NAME.toString(), + RECEIVER, + Tag.TYPE.toString(), + "transferSlice"); } @Override @@ -233,6 +272,9 @@ private void unbind(AbstractMetricService metricService) { transferSchemaPlanTimer = DoNothingMetricManager.DO_NOTHING_TIMER; transferSchemaSnapshotPieceTimer = DoNothingMetricManager.DO_NOTHING_TIMER; transferSchemaSnapshotSealTimer = DoNothingMetricManager.DO_NOTHING_TIMER; + transferConfigPlanTimer = DoNothingMetricManager.DO_NOTHING_TIMER; + transferCompressedTimer = DoNothingMetricManager.DO_NOTHING_TIMER; + transferSliceTimer = DoNothingMetricManager.DO_NOTHING_TIMER; metricService.remove( MetricType.TIMER, @@ -325,6 +367,27 @@ private void unbind(AbstractMetricService metricService) { RECEIVER, Tag.TYPE.toString(), "transferSchemaSnapshotSeal"); + metricService.remove( + MetricType.TIMER, + Metric.PIPE_DATANODE_RECEIVER.toString(), + Tag.NAME.toString(), + RECEIVER, + Tag.TYPE.toString(), + "transferConfigPlan"); + metricService.remove( + MetricType.TIMER, + Metric.PIPE_DATANODE_RECEIVER.toString(), + Tag.NAME.toString(), + RECEIVER, + Tag.TYPE.toString(), + "transferCompressed"); + metricService.remove( + MetricType.TIMER, + Metric.PIPE_DATANODE_RECEIVER.toString(), + Tag.NAME.toString(), + RECEIVER, + Tag.TYPE.toString(), + "transferSlice"); } public static PipeDataNodeReceiverMetrics getInstance() { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeSchemaRegionListenerMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/schema/PipeSchemaRegionListenerMetrics.java similarity index 97% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeSchemaRegionListenerMetrics.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/schema/PipeSchemaRegionListenerMetrics.java index ada38e46f2fa2..ee2b6c099891c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeSchemaRegionListenerMetrics.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/schema/PipeSchemaRegionListenerMetrics.java @@ -17,11 +17,11 @@ * under the License. */ -package org.apache.iotdb.db.pipe.metric; +package org.apache.iotdb.db.pipe.metric.schema; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; -import org.apache.iotdb.db.pipe.extractor.schemaregion.SchemaRegionListeningQueue; +import org.apache.iotdb.db.pipe.source.schemaregion.SchemaRegionListeningQueue; import org.apache.iotdb.metrics.AbstractMetricService; import org.apache.iotdb.metrics.metricsets.IMetricSet; import org.apache.iotdb.metrics.utils.MetricLevel; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeSchemaRegionConnectorMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/schema/PipeSchemaRegionSinkMetrics.java similarity index 79% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeSchemaRegionConnectorMetrics.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/schema/PipeSchemaRegionSinkMetrics.java index 482317b47906f..940b4404086a6 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeSchemaRegionConnectorMetrics.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/schema/PipeSchemaRegionSinkMetrics.java @@ -17,11 +17,11 @@ * under the License. */ -package org.apache.iotdb.db.pipe.metric; +package org.apache.iotdb.db.pipe.metric.schema; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; -import org.apache.iotdb.db.pipe.task.subtask.connector.PipeConnectorSubtask; +import org.apache.iotdb.db.pipe.agent.task.subtask.sink.PipeSinkSubtask; import org.apache.iotdb.metrics.AbstractMetricService; import org.apache.iotdb.metrics.metricsets.IMetricSet; import org.apache.iotdb.metrics.type.Rate; @@ -37,16 +37,14 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -public class PipeSchemaRegionConnectorMetrics implements IMetricSet { +public class PipeSchemaRegionSinkMetrics implements IMetricSet { - private static final Logger LOGGER = - LoggerFactory.getLogger(PipeSchemaRegionConnectorMetrics.class); + private static final Logger LOGGER = LoggerFactory.getLogger(PipeSchemaRegionSinkMetrics.class); @SuppressWarnings("java:S3077") private volatile AbstractMetricService metricService; - private final ConcurrentMap connectorMap = - new ConcurrentHashMap<>(); + private final ConcurrentMap connectorMap = new ConcurrentHashMap<>(); private final ConcurrentMap schemaRateMap = new ConcurrentHashMap<>(); //////////////////////////// bindTo & unbindFrom (metric framework) //////////////////////////// @@ -62,7 +60,7 @@ private void createMetrics(final String taskID) { } private void createRate(final String taskID) { - final PipeConnectorSubtask connector = connectorMap.get(taskID); + final PipeSinkSubtask connector = connectorMap.get(taskID); // Transfer event rate schemaRateMap.put( taskID, @@ -89,7 +87,7 @@ private void removeMetrics(final String taskID) { } private void removeRate(final String taskID) { - final PipeConnectorSubtask connector = connectorMap.get(taskID); + final PipeSinkSubtask connector = connectorMap.get(taskID); // Transfer event rate metricService.remove( MetricType.RATE, @@ -103,9 +101,9 @@ private void removeRate(final String taskID) { //////////////////////////// Register & deregister (pipe integration) //////////////////////////// - public void register(@NonNull final PipeConnectorSubtask pipeConnectorSubtask) { - final String taskID = pipeConnectorSubtask.getTaskID(); - connectorMap.putIfAbsent(taskID, pipeConnectorSubtask); + public void register(@NonNull final PipeSinkSubtask pipeSinkSubtask) { + final String taskID = pipeSinkSubtask.getTaskID(); + connectorMap.putIfAbsent(taskID, pipeSinkSubtask); if (Objects.nonNull(metricService)) { createMetrics(taskID); } @@ -140,21 +138,20 @@ public void markSchemaEvent(final String taskID) { //////////////////////////// singleton //////////////////////////// - private static class PipeSchemaRegionConnectorMetricsHolder { + private static class PipeSchemaRegionSinkMetricsHolder { - private static final PipeSchemaRegionConnectorMetrics INSTANCE = - new PipeSchemaRegionConnectorMetrics(); + private static final PipeSchemaRegionSinkMetrics INSTANCE = new PipeSchemaRegionSinkMetrics(); - private PipeSchemaRegionConnectorMetricsHolder() { + private PipeSchemaRegionSinkMetricsHolder() { // Empty constructor } } - public static PipeSchemaRegionConnectorMetrics getInstance() { - return PipeSchemaRegionConnectorMetricsHolder.INSTANCE; + public static PipeSchemaRegionSinkMetrics getInstance() { + return PipeSchemaRegionSinkMetricsHolder.INSTANCE; } - private PipeSchemaRegionConnectorMetrics() { + private PipeSchemaRegionSinkMetrics() { // Empty constructor } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeSchemaRegionExtractorMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/schema/PipeSchemaRegionSourceMetrics.java similarity index 78% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeSchemaRegionExtractorMetrics.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/schema/PipeSchemaRegionSourceMetrics.java index 6301494932b16..fbf3a6f1c9ea6 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeSchemaRegionExtractorMetrics.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/schema/PipeSchemaRegionSourceMetrics.java @@ -17,11 +17,11 @@ * under the License. */ -package org.apache.iotdb.db.pipe.metric; +package org.apache.iotdb.db.pipe.metric.schema; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; -import org.apache.iotdb.db.pipe.extractor.schemaregion.IoTDBSchemaRegionExtractor; +import org.apache.iotdb.db.pipe.source.schemaregion.IoTDBSchemaRegionSource; import org.apache.iotdb.metrics.AbstractMetricService; import org.apache.iotdb.metrics.metricsets.IMetricSet; import org.apache.iotdb.metrics.utils.MetricLevel; @@ -36,15 +36,14 @@ import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; -public class PipeSchemaRegionExtractorMetrics implements IMetricSet { +public class PipeSchemaRegionSourceMetrics implements IMetricSet { - private static final Logger LOGGER = - LoggerFactory.getLogger(PipeSchemaRegionExtractorMetrics.class); + private static final Logger LOGGER = LoggerFactory.getLogger(PipeSchemaRegionSourceMetrics.class); @SuppressWarnings("java:S3077") private volatile AbstractMetricService metricService; - private final Map extractorMap = new ConcurrentHashMap<>(); + private final Map extractorMap = new ConcurrentHashMap<>(); //////////////////////////// bindTo & unbindFrom (metric framework) //////////////////////////// @@ -59,12 +58,12 @@ private void createMetrics(final String taskID) { } private void createAutoGauge(final String taskID) { - final IoTDBSchemaRegionExtractor extractor = extractorMap.get(taskID); + final IoTDBSchemaRegionSource extractor = extractorMap.get(taskID); metricService.createAutoGauge( Metric.UNTRANSFERRED_SCHEMA_COUNT.toString(), MetricLevel.IMPORTANT, extractorMap.get(taskID), - IoTDBSchemaRegionExtractor::getUnTransferredEventCount, + IoTDBSchemaRegionSource::getUnTransferredEventCount, Tag.NAME.toString(), extractor.getPipeName(), Tag.REGION.toString(), @@ -87,7 +86,7 @@ private void removeMetrics(final String taskID) { } private void removeAutoGauge(final String taskID) { - final IoTDBSchemaRegionExtractor extractor = extractorMap.get(taskID); + final IoTDBSchemaRegionSource extractor = extractorMap.get(taskID); // pending event count metricService.remove( MetricType.AUTO_GAUGE, @@ -102,7 +101,7 @@ private void removeAutoGauge(final String taskID) { //////////////////////////// register & deregister (pipe integration) //////////////////////////// - public void register(@NonNull final IoTDBSchemaRegionExtractor extractor) { + public void register(@NonNull final IoTDBSchemaRegionSource extractor) { final String taskID = extractor.getTaskID(); extractorMap.putIfAbsent(taskID, extractor); if (Objects.nonNull(metricService)) { @@ -125,21 +124,21 @@ public void deregister(final String taskID) { //////////////////////////// singleton //////////////////////////// - private static class PipeSchemaRegionExtractorMetricsHolder { + private static class PipeSchemaRegionSourceMetricsHolder { - private static final PipeSchemaRegionExtractorMetrics INSTANCE = - new PipeSchemaRegionExtractorMetrics(); + private static final PipeSchemaRegionSourceMetrics INSTANCE = + new PipeSchemaRegionSourceMetrics(); - private PipeSchemaRegionExtractorMetricsHolder() { + private PipeSchemaRegionSourceMetricsHolder() { // Empty constructor } } - public static PipeSchemaRegionExtractorMetrics getInstance() { - return PipeSchemaRegionExtractorMetricsHolder.INSTANCE; + public static PipeSchemaRegionSourceMetrics getInstance() { + return PipeSchemaRegionSourceMetricsHolder.INSTANCE; } - private PipeSchemaRegionExtractorMetrics() { + private PipeSchemaRegionSourceMetrics() { // Empty constructor } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataRegionConnectorMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/sink/PipeDataRegionSinkMetrics.java similarity index 54% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataRegionConnectorMetrics.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/sink/PipeDataRegionSinkMetrics.java index bf2ce7b5763e3..23024424b9290 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataRegionConnectorMetrics.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/sink/PipeDataRegionSinkMetrics.java @@ -17,14 +17,16 @@ * under the License. */ -package org.apache.iotdb.db.pipe.metric; +package org.apache.iotdb.db.pipe.metric.sink; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; -import org.apache.iotdb.db.pipe.task.subtask.connector.PipeConnectorSubtask; +import org.apache.iotdb.db.pipe.agent.task.subtask.sink.PipeSinkSubtask; import org.apache.iotdb.metrics.AbstractMetricService; import org.apache.iotdb.metrics.metricsets.IMetricSet; +import org.apache.iotdb.metrics.type.Histogram; import org.apache.iotdb.metrics.type.Rate; +import org.apache.iotdb.metrics.type.Timer; import org.apache.iotdb.metrics.utils.MetricLevel; import org.apache.iotdb.metrics.utils.MetricType; @@ -38,15 +40,14 @@ import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; -public class PipeDataRegionConnectorMetrics implements IMetricSet { +public class PipeDataRegionSinkMetrics implements IMetricSet { - private static final Logger LOGGER = - LoggerFactory.getLogger(PipeDataRegionConnectorMetrics.class); + private static final Logger LOGGER = LoggerFactory.getLogger(PipeDataRegionSinkMetrics.class); @SuppressWarnings("java:S3077") private volatile AbstractMetricService metricService; - private final Map connectorMap = new HashMap<>(); + private final Map connectorMap = new HashMap<>(); private final Map tabletRateMap = new ConcurrentHashMap<>(); @@ -54,6 +55,8 @@ public class PipeDataRegionConnectorMetrics implements IMetricSet { private final Map pipeHeartbeatRateMap = new ConcurrentHashMap<>(); + private final Map compressionTimerMap = new ConcurrentHashMap<>(); + //////////////////////////// bindTo & unbindFrom (metric framework) //////////////////////////// @Override @@ -68,16 +71,18 @@ public void bindTo(final AbstractMetricService metricService) { private void createMetrics(final String taskID) { createAutoGauge(taskID); createRate(taskID); + createTimer(taskID); + createHistogram(taskID); } private void createAutoGauge(final String taskID) { - final PipeConnectorSubtask connector = connectorMap.get(taskID); + final PipeSinkSubtask connector = connectorMap.get(taskID); // Pending event count metricService.createAutoGauge( Metric.UNTRANSFERRED_TABLET_COUNT.toString(), MetricLevel.IMPORTANT, connector, - PipeConnectorSubtask::getTabletInsertionEventCount, + PipeSinkSubtask::getTabletInsertionEventCount, Tag.NAME.toString(), connector.getAttributeSortedString(), Tag.INDEX.toString(), @@ -88,7 +93,7 @@ private void createAutoGauge(final String taskID) { Metric.UNTRANSFERRED_TSFILE_COUNT.toString(), MetricLevel.IMPORTANT, connector, - PipeConnectorSubtask::getTsFileInsertionEventCount, + PipeSinkSubtask::getTsFileInsertionEventCount, Tag.NAME.toString(), connector.getAttributeSortedString(), Tag.INDEX.toString(), @@ -99,7 +104,7 @@ private void createAutoGauge(final String taskID) { Metric.UNTRANSFERRED_HEARTBEAT_COUNT.toString(), MetricLevel.IMPORTANT, connector, - PipeConnectorSubtask::getPipeHeartbeatEventCount, + PipeSinkSubtask::getPipeHeartbeatEventCount, Tag.NAME.toString(), connector.getAttributeSortedString(), Tag.INDEX.toString(), @@ -111,7 +116,41 @@ private void createAutoGauge(final String taskID) { Metric.PIPE_ASYNC_CONNECTOR_RETRY_EVENT_QUEUE_SIZE.toString(), MetricLevel.IMPORTANT, connector, - PipeConnectorSubtask::getAsyncConnectorRetryEventQueueSize, + PipeSinkSubtask::getAsyncConnectorRetryEventQueueSize, + Tag.NAME.toString(), + connector.getAttributeSortedString(), + Tag.INDEX.toString(), + String.valueOf(connector.getConnectorIndex()), + Tag.CREATION_TIME.toString(), + String.valueOf(connector.getCreationTime())); + metricService.createAutoGauge( + Metric.PIPE_PENDING_HANDLERS_SIZE.toString(), + MetricLevel.IMPORTANT, + connector, + PipeSinkSubtask::getPendingHandlersSize, + Tag.NAME.toString(), + connector.getAttributeSortedString(), + Tag.INDEX.toString(), + String.valueOf(connector.getConnectorIndex()), + Tag.CREATION_TIME.toString(), + String.valueOf(connector.getCreationTime())); + // Metrics related to IoTDB connector + metricService.createAutoGauge( + Metric.PIPE_TOTAL_UNCOMPRESSED_SIZE.toString(), + MetricLevel.IMPORTANT, + connector, + PipeSinkSubtask::getTotalUncompressedSize, + Tag.NAME.toString(), + connector.getAttributeSortedString(), + Tag.INDEX.toString(), + String.valueOf(connector.getConnectorIndex()), + Tag.CREATION_TIME.toString(), + String.valueOf(connector.getCreationTime())); + metricService.createAutoGauge( + Metric.PIPE_TOTAL_COMPRESSED_SIZE.toString(), + MetricLevel.IMPORTANT, + connector, + PipeSinkSubtask::getTotalCompressedSize, Tag.NAME.toString(), connector.getAttributeSortedString(), Tag.INDEX.toString(), @@ -121,7 +160,7 @@ private void createAutoGauge(final String taskID) { } private void createRate(final String taskID) { - final PipeConnectorSubtask connector = connectorMap.get(taskID); + final PipeSinkSubtask connector = connectorMap.get(taskID); // Transfer event rate tabletRateMap.put( taskID, @@ -158,6 +197,71 @@ private void createRate(final String taskID) { String.valueOf(connector.getCreationTime()))); } + private void createTimer(final String taskID) { + final PipeSinkSubtask connector = connectorMap.get(taskID); + compressionTimerMap.putIfAbsent( + connector.getAttributeSortedString(), + metricService.getOrCreateTimer( + Metric.PIPE_COMPRESSION_TIME.toString(), + MetricLevel.IMPORTANT, + Tag.NAME.toString(), + connector.getAttributeSortedString(), + Tag.CREATION_TIME.toString(), + String.valueOf(connector.getCreationTime()))); + } + + private void createHistogram(final String taskID) { + final PipeSinkSubtask connector = connectorMap.get(taskID); + + final Histogram tabletBatchSizeHistogram = + metricService.getOrCreateHistogram( + Metric.PIPE_INSERT_NODE_BATCH_SIZE.toString(), + MetricLevel.IMPORTANT, + Tag.NAME.toString(), + connector.getAttributeSortedString(), + Tag.CREATION_TIME.toString(), + String.valueOf(connector.getCreationTime())); + connector.setTabletBatchSizeHistogram(tabletBatchSizeHistogram); + + final Histogram tsFileBatchSizeHistogram = + metricService.getOrCreateHistogram( + Metric.PIPE_TSFILE_BATCH_SIZE.toString(), + MetricLevel.IMPORTANT, + Tag.NAME.toString(), + connector.getAttributeSortedString(), + Tag.CREATION_TIME.toString(), + String.valueOf(connector.getCreationTime())); + connector.setTsFileBatchSizeHistogram(tsFileBatchSizeHistogram); + + final Histogram tabletBatchTimeIntervalHistogram = + metricService.getOrCreateHistogram( + Metric.PIPE_INSERT_NODE_BATCH_TIME_COST.toString(), + MetricLevel.IMPORTANT, + Tag.NAME.toString(), + connector.getAttributeSortedString(), + Tag.CREATION_TIME.toString(), + String.valueOf(connector.getCreationTime())); + connector.setTabletBatchTimeIntervalHistogram(tabletBatchTimeIntervalHistogram); + + final Histogram tsFileBatchTimeIntervalHistogram = + metricService.getOrCreateHistogram( + Metric.PIPE_TSFILE_BATCH_TIME_COST.toString(), + MetricLevel.IMPORTANT, + Tag.NAME.toString(), + connector.getAttributeSortedString(), + Tag.CREATION_TIME.toString(), + String.valueOf(connector.getCreationTime())); + connector.setTsFileBatchTimeIntervalHistogram(tsFileBatchTimeIntervalHistogram); + + Histogram eventSizeHistogram = + metricService.getOrCreateHistogram( + Metric.PIPE_CONNECTOR_BATCH_SIZE.toString(), + MetricLevel.IMPORTANT, + Tag.NAME.toString(), + connector.getAttributeSortedString()); + connector.setEventSizeHistogram(eventSizeHistogram); + } + @Override public void unbindFrom(final AbstractMetricService metricService) { final ImmutableSet taskIDs = ImmutableSet.copyOf(connectorMap.keySet()); @@ -173,10 +277,12 @@ public void unbindFrom(final AbstractMetricService metricService) { private void removeMetrics(final String taskID) { removeAutoGauge(taskID); removeRate(taskID); + removeTimer(taskID); + removeHistogram(taskID); } private void removeAutoGauge(final String taskID) { - final PipeConnectorSubtask connector = connectorMap.get(taskID); + final PipeSinkSubtask connector = connectorMap.get(taskID); // Pending event count metricService.remove( MetricType.AUTO_GAUGE, @@ -215,10 +321,38 @@ private void removeAutoGauge(final String taskID) { String.valueOf(connector.getConnectorIndex()), Tag.CREATION_TIME.toString(), String.valueOf(connector.getCreationTime())); + metricService.remove( + MetricType.AUTO_GAUGE, + Metric.PIPE_PENDING_HANDLERS_SIZE.toString(), + Tag.NAME.toString(), + connector.getAttributeSortedString(), + Tag.INDEX.toString(), + String.valueOf(connector.getConnectorIndex()), + Tag.CREATION_TIME.toString(), + String.valueOf(connector.getCreationTime())); + // Metrics related to IoTDB connector + metricService.remove( + MetricType.AUTO_GAUGE, + Metric.PIPE_TOTAL_UNCOMPRESSED_SIZE.toString(), + Tag.NAME.toString(), + connector.getAttributeSortedString(), + Tag.INDEX.toString(), + String.valueOf(connector.getConnectorIndex()), + Tag.CREATION_TIME.toString(), + String.valueOf(connector.getCreationTime())); + metricService.remove( + MetricType.AUTO_GAUGE, + Metric.PIPE_TOTAL_COMPRESSED_SIZE.toString(), + Tag.NAME.toString(), + connector.getAttributeSortedString(), + Tag.INDEX.toString(), + String.valueOf(connector.getConnectorIndex()), + Tag.CREATION_TIME.toString(), + String.valueOf(connector.getCreationTime())); } private void removeRate(final String taskID) { - final PipeConnectorSubtask connector = connectorMap.get(taskID); + final PipeSinkSubtask connector = connectorMap.get(taskID); // Transfer event rate metricService.remove( MetricType.RATE, @@ -252,11 +386,61 @@ private void removeRate(final String taskID) { pipeHeartbeatRateMap.remove(taskID); } + private void removeTimer(final String taskID) { + final PipeSinkSubtask connector = connectorMap.get(taskID); + metricService.remove( + MetricType.TIMER, + Metric.PIPE_COMPRESSION_TIME.toString(), + Tag.NAME.toString(), + connector.getAttributeSortedString(), + Tag.CREATION_TIME.toString(), + String.valueOf(connector.getCreationTime())); + compressionTimerMap.remove(connector.getAttributeSortedString()); + } + + private void removeHistogram(final String taskID) { + final PipeSinkSubtask connector = connectorMap.get(taskID); + metricService.remove( + MetricType.HISTOGRAM, + Metric.PIPE_INSERT_NODE_BATCH_SIZE.toString(), + Tag.NAME.toString(), + connector.getAttributeSortedString(), + Tag.CREATION_TIME.toString(), + String.valueOf(connector.getCreationTime())); + metricService.remove( + MetricType.HISTOGRAM, + Metric.PIPE_TSFILE_BATCH_SIZE.toString(), + Tag.NAME.toString(), + connector.getAttributeSortedString(), + Tag.CREATION_TIME.toString(), + String.valueOf(connector.getCreationTime())); + metricService.remove( + MetricType.HISTOGRAM, + Metric.PIPE_INSERT_NODE_BATCH_TIME_COST.toString(), + Tag.NAME.toString(), + connector.getAttributeSortedString(), + Tag.CREATION_TIME.toString(), + String.valueOf(connector.getCreationTime())); + metricService.remove( + MetricType.HISTOGRAM, + Metric.PIPE_TSFILE_BATCH_TIME_COST.toString(), + Tag.NAME.toString(), + connector.getAttributeSortedString(), + Tag.CREATION_TIME.toString(), + String.valueOf(connector.getCreationTime())); + + metricService.remove( + MetricType.HISTOGRAM, + Metric.PIPE_CONNECTOR_BATCH_SIZE.toString(), + Tag.NAME.toString(), + connector.getAttributeSortedString()); + } + //////////////////////////// register & deregister (pipe integration) //////////////////////////// - public void register(@NonNull final PipeConnectorSubtask pipeConnectorSubtask) { - final String taskID = pipeConnectorSubtask.getTaskID(); - connectorMap.putIfAbsent(taskID, pipeConnectorSubtask); + public void register(@NonNull final PipeSinkSubtask pipeSinkSubtask) { + final String taskID = pipeSinkSubtask.getTaskID(); + connectorMap.putIfAbsent(taskID, pipeSinkSubtask); if (Objects.nonNull(metricService)) { createMetrics(taskID); } @@ -315,23 +499,26 @@ public void markPipeHeartbeatEvent(final String taskID) { rate.mark(); } + public Timer getCompressionTimer(final String attributeSortedString) { + return Objects.isNull(metricService) ? null : compressionTimerMap.get(attributeSortedString); + } + //////////////////////////// singleton //////////////////////////// - private static class PipeConnectorMetricsHolder { + private static class PipeSinkMetricsHolder { - private static final PipeDataRegionConnectorMetrics INSTANCE = - new PipeDataRegionConnectorMetrics(); + private static final PipeDataRegionSinkMetrics INSTANCE = new PipeDataRegionSinkMetrics(); - private PipeConnectorMetricsHolder() { + private PipeSinkMetricsHolder() { // Empty constructor } } - public static PipeDataRegionConnectorMetrics getInstance() { - return PipeDataRegionConnectorMetrics.PipeConnectorMetricsHolder.INSTANCE; + public static PipeDataRegionSinkMetrics getInstance() { + return PipeSinkMetricsHolder.INSTANCE; } - private PipeDataRegionConnectorMetrics() { + private PipeDataRegionSinkMetrics() { // Empty constructor } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeAssignerMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/source/PipeAssignerMetrics.java similarity index 97% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeAssignerMetrics.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/source/PipeAssignerMetrics.java index 6a3dbb9d1355e..43bf41dfb8361 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeAssignerMetrics.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/source/PipeAssignerMetrics.java @@ -17,11 +17,11 @@ * under the License. */ -package org.apache.iotdb.db.pipe.metric; +package org.apache.iotdb.db.pipe.metric.source; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.assigner.PipeDataRegionAssigner; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.assigner.PipeDataRegionAssigner; import org.apache.iotdb.metrics.AbstractMetricService; import org.apache.iotdb.metrics.metricsets.IMetricSet; import org.apache.iotdb.metrics.utils.MetricLevel; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataRegionEventCounter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/source/PipeDataRegionEventCounter.java similarity index 94% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataRegionEventCounter.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/source/PipeDataRegionEventCounter.java index 0aed61801ef9d..e2db681b1df9c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataRegionEventCounter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/source/PipeDataRegionEventCounter.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.pipe.metric; +package org.apache.iotdb.db.pipe.metric.source; import org.apache.iotdb.commons.pipe.metric.PipeEventCounter; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; @@ -36,17 +36,17 @@ public class PipeDataRegionEventCounter extends PipeEventCounter { private final AtomicInteger pipeHeartbeatEventCount = new AtomicInteger(0); @Override - public Integer getTsFileInsertionEventCount() { + public int getTsFileInsertionEventCount() { return tsFileInsertionEventCount.get(); } @Override - public Integer getTabletInsertionEventCount() { + public int getTabletInsertionEventCount() { return tabletInsertionEventCount.get(); } @Override - public Integer getPipeHeartbeatEventCount() { + public int getPipeHeartbeatEventCount() { return pipeHeartbeatEventCount.get(); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataRegionExtractorMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/source/PipeDataRegionSourceMetrics.java similarity index 87% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataRegionExtractorMetrics.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/source/PipeDataRegionSourceMetrics.java index dc90007f45923..c2f7da4c490b9 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataRegionExtractorMetrics.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/source/PipeDataRegionSourceMetrics.java @@ -17,12 +17,12 @@ * under the License. */ -package org.apache.iotdb.db.pipe.metric; +package org.apache.iotdb.db.pipe.metric.source; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; -import org.apache.iotdb.db.pipe.extractor.dataregion.IoTDBDataRegionExtractor; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.epoch.TsFileEpoch; +import org.apache.iotdb.db.pipe.source.dataregion.IoTDBDataRegionSource; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.epoch.TsFileEpoch; import org.apache.iotdb.metrics.AbstractMetricService; import org.apache.iotdb.metrics.metricsets.IMetricSet; import org.apache.iotdb.metrics.type.Gauge; @@ -39,15 +39,14 @@ import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; -public class PipeDataRegionExtractorMetrics implements IMetricSet { +public class PipeDataRegionSourceMetrics implements IMetricSet { - private static final Logger LOGGER = - LoggerFactory.getLogger(PipeDataRegionExtractorMetrics.class); + private static final Logger LOGGER = LoggerFactory.getLogger(PipeDataRegionSourceMetrics.class); @SuppressWarnings("java:S3077") private volatile AbstractMetricService metricService; - private final Map extractorMap = new ConcurrentHashMap<>(); + private final Map extractorMap = new ConcurrentHashMap<>(); private final Map tabletRateMap = new ConcurrentHashMap<>(); @@ -57,7 +56,7 @@ public class PipeDataRegionExtractorMetrics implements IMetricSet { private final Map recentProcessedTsFileEpochStateMap = new ConcurrentHashMap<>(); - public Map getExtractorMap() { + public Map getExtractorMap() { return extractorMap; } @@ -79,13 +78,13 @@ private void createMetrics(final String taskID) { } private void createAutoGauge(final String taskID) { - final IoTDBDataRegionExtractor extractor = extractorMap.get(taskID); + final IoTDBDataRegionSource extractor = extractorMap.get(taskID); // Pending event count metricService.createAutoGauge( Metric.UNPROCESSED_HISTORICAL_TSFILE_COUNT.toString(), MetricLevel.IMPORTANT, extractor, - IoTDBDataRegionExtractor::getHistoricalTsFileInsertionEventCount, + IoTDBDataRegionSource::getHistoricalTsFileInsertionEventCount, Tag.NAME.toString(), extractor.getPipeName(), Tag.REGION.toString(), @@ -96,7 +95,7 @@ private void createAutoGauge(final String taskID) { Metric.UNPROCESSED_REALTIME_TSFILE_COUNT.toString(), MetricLevel.IMPORTANT, extractor, - IoTDBDataRegionExtractor::getRealtimeTsFileInsertionEventCount, + IoTDBDataRegionSource::getRealtimeTsFileInsertionEventCount, Tag.NAME.toString(), extractor.getPipeName(), Tag.REGION.toString(), @@ -107,7 +106,7 @@ private void createAutoGauge(final String taskID) { Metric.UNPROCESSED_TABLET_COUNT.toString(), MetricLevel.IMPORTANT, extractor, - IoTDBDataRegionExtractor::getTabletInsertionEventCount, + IoTDBDataRegionSource::getTabletInsertionEventCount, Tag.NAME.toString(), extractor.getPipeName(), Tag.REGION.toString(), @@ -118,7 +117,7 @@ private void createAutoGauge(final String taskID) { Metric.UNPROCESSED_HEARTBEAT_COUNT.toString(), MetricLevel.IMPORTANT, extractor, - IoTDBDataRegionExtractor::getPipeHeartbeatEventCount, + IoTDBDataRegionSource::getPipeHeartbeatEventCount, Tag.NAME.toString(), extractor.getPipeName(), Tag.REGION.toString(), @@ -128,7 +127,7 @@ private void createAutoGauge(final String taskID) { } private void createRate(final String taskID) { - final IoTDBDataRegionExtractor extractor = extractorMap.get(taskID); + final IoTDBDataRegionSource extractor = extractorMap.get(taskID); // Supply event rate tabletRateMap.put( taskID, @@ -166,7 +165,7 @@ private void createRate(final String taskID) { } private void createGauge(final String taskID) { - final IoTDBDataRegionExtractor extractor = extractorMap.get(taskID); + final IoTDBDataRegionSource extractor = extractorMap.get(taskID); // Tsfile epoch state recentProcessedTsFileEpochStateMap.put( taskID, @@ -199,7 +198,7 @@ private void removeMetrics(final String taskID) { } private void removeAutoGauge(final String taskID) { - final IoTDBDataRegionExtractor extractor = extractorMap.get(taskID); + final IoTDBDataRegionSource extractor = extractorMap.get(taskID); // pending event count metricService.remove( MetricType.AUTO_GAUGE, @@ -240,7 +239,7 @@ private void removeAutoGauge(final String taskID) { } private void removeRate(final String taskID) { - final IoTDBDataRegionExtractor extractor = extractorMap.get(taskID); + final IoTDBDataRegionSource extractor = extractorMap.get(taskID); // supply event rate metricService.remove( MetricType.RATE, @@ -275,7 +274,7 @@ private void removeRate(final String taskID) { } private void removeGauge(final String taskID) { - final IoTDBDataRegionExtractor extractor = extractorMap.get(taskID); + final IoTDBDataRegionSource extractor = extractorMap.get(taskID); // Tsfile epoch state metricService.remove( MetricType.GAUGE, @@ -290,7 +289,7 @@ private void removeGauge(final String taskID) { //////////////////////////// register & deregister (pipe integration) //////////////////////////// - public void register(@NonNull final IoTDBDataRegionExtractor extractor) { + public void register(@NonNull final IoTDBDataRegionSource extractor) { final String taskID = extractor.getTaskID(); extractorMap.putIfAbsent(taskID, extractor); if (Objects.nonNull(metricService)) { @@ -370,21 +369,20 @@ public void setRecentProcessedTsFileEpochState( //////////////////////////// singleton //////////////////////////// - private static class PipeExtractorMetricsHolder { + private static class PipeSourceMetricsHolder { - private static final PipeDataRegionExtractorMetrics INSTANCE = - new PipeDataRegionExtractorMetrics(); + private static final PipeDataRegionSourceMetrics INSTANCE = new PipeDataRegionSourceMetrics(); - private PipeExtractorMetricsHolder() { + private PipeSourceMetricsHolder() { // Empty constructor } } - public static PipeDataRegionExtractorMetrics getInstance() { - return PipeDataRegionExtractorMetrics.PipeExtractorMetricsHolder.INSTANCE; + public static PipeDataRegionSourceMetrics getInstance() { + return PipeSourceMetricsHolder.INSTANCE; } - private PipeDataRegionExtractorMetrics() { + private PipeDataRegionSourceMetrics() { // Empty constructor } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/AggregateProcessor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/AggregateProcessor.java index 3e6ed81f7136a..dc5a7e4390f76 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/AggregateProcessor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/AggregateProcessor.java @@ -25,9 +25,9 @@ import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex; import org.apache.iotdb.commons.consensus.index.impl.TimeWindowStateProgressIndex; import org.apache.iotdb.commons.exception.IllegalPathException; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskProcessorRuntimeEnvironment; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; import org.apache.iotdb.commons.utils.PathUtils; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; import org.apache.iotdb.db.pipe.agent.plugin.dataregion.PipeDataRegionPluginAgent; @@ -378,17 +378,21 @@ public void process( .set(System.currentTimeMillis()); final AtomicReference exception = new AtomicReference<>(); - final TimeWindowStateProgressIndex progressIndex = - new TimeWindowStateProgressIndex(new ConcurrentHashMap<>()); + final TimeWindowStateProgressIndex[] progressIndex = { + new TimeWindowStateProgressIndex(new ConcurrentHashMap<>()) + }; final Iterable outputEvents = tabletInsertionEvent.processRowByRow( (row, rowCollector) -> - progressIndex.updateToMinimumEqualOrIsAfterProgressIndex( - new TimeWindowStateProgressIndex(processRow(row, rowCollector, exception)))); + progressIndex[0] = + (TimeWindowStateProgressIndex) + progressIndex[0].updateToMinimumEqualOrIsAfterProgressIndex( + new TimeWindowStateProgressIndex( + processRow(row, rowCollector, exception)))); // Must reset progressIndex before collection - ((EnrichedEvent) tabletInsertionEvent).bindProgressIndex(progressIndex); + ((EnrichedEvent) tabletInsertionEvent).bindProgressIndex(progressIndex[0]); outputEvents.forEach( event -> { @@ -508,9 +512,26 @@ public void process( final TsFileInsertionEvent tsFileInsertionEvent, final EventCollector eventCollector) throws Exception { try { - for (final TabletInsertionEvent tabletInsertionEvent : - tsFileInsertionEvent.toTabletInsertionEvents()) { - process(tabletInsertionEvent, eventCollector); + if (tsFileInsertionEvent instanceof PipeTsFileInsertionEvent) { + final AtomicReference ex = new AtomicReference<>(); + ((PipeTsFileInsertionEvent) tsFileInsertionEvent) + .consumeTabletInsertionEventsWithRetry( + event -> { + try { + process(event, eventCollector); + } catch (Exception e) { + ex.set(e); + } + }, + "AggregateProcessor::process"); + if (ex.get() != null) { + throw ex.get(); + } + } else { + for (final TabletInsertionEvent tabletInsertionEvent : + tsFileInsertionEvent.toTabletInsertionEvents()) { + process(tabletInsertionEvent, eventCollector); + } } } finally { tsFileInsertionEvent.close(); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/aggregatedresult/standardstatistics/MaxValueOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/aggregatedresult/standardstatistics/MaxValueOperator.java new file mode 100644 index 0000000000000..7967771a069a7 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/aggregatedresult/standardstatistics/MaxValueOperator.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.processor.aggregate.operator.aggregatedresult.standardstatistics; + +import org.apache.iotdb.db.pipe.processor.aggregate.operator.aggregatedresult.AggregatedResultOperator; +import org.apache.iotdb.db.pipe.processor.aggregate.operator.intermediateresult.CustomizedReadableIntermediateResults; + +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.utils.Pair; + +import java.util.Collections; +import java.util.Map; +import java.util.Set; + +public class MaxValueOperator implements AggregatedResultOperator { + @Override + public String getName() { + return "max"; + } + + @Override + public void configureSystemParameters(final Map systemParams) { + // Do nothing + } + + @Override + public Set getDeclaredIntermediateValueNames() { + return Collections.singleton("max"); + } + + @Override + public Pair terminateWindow( + final TSDataType measurementDataType, + final CustomizedReadableIntermediateResults intermediateResults) { + return new Pair<>(measurementDataType, intermediateResults.getObject("max")); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/aggregatedresult/standardstatistics/MinValueOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/aggregatedresult/standardstatistics/MinValueOperator.java new file mode 100644 index 0000000000000..0bed3a0dee003 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/aggregatedresult/standardstatistics/MinValueOperator.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.processor.aggregate.operator.aggregatedresult.standardstatistics; + +import org.apache.iotdb.db.pipe.processor.aggregate.operator.aggregatedresult.AggregatedResultOperator; +import org.apache.iotdb.db.pipe.processor.aggregate.operator.intermediateresult.CustomizedReadableIntermediateResults; + +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.utils.Pair; + +import java.util.Collections; +import java.util.Map; +import java.util.Set; + +public class MinValueOperator implements AggregatedResultOperator { + @Override + public String getName() { + return "min"; + } + + @Override + public void configureSystemParameters(final Map systemParams) { + // Do nothing + } + + @Override + public Set getDeclaredIntermediateValueNames() { + return Collections.singleton("min"); + } + + @Override + public Pair terminateWindow( + final TSDataType measurementDataType, + final CustomizedReadableIntermediateResults intermediateResults) { + return new Pair<>(measurementDataType, intermediateResults.getObject("min")); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/CustomizedReadableIntermediateResults.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/CustomizedReadableIntermediateResults.java index f9c9c5f828809..c915bcbc17418 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/CustomizedReadableIntermediateResults.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/CustomizedReadableIntermediateResults.java @@ -66,6 +66,10 @@ public int getInt(final String key) { return (int) (float) value; case DOUBLE: return (int) (double) value; + case TEXT: + case BLOB: + case BOOLEAN: + case STRING: default: throw new UnsupportedOperationException( String.format("The type %s cannot be casted to int.", typeResultPair.getLeft())); @@ -89,6 +93,10 @@ public long getLong(final String key) { return (long) (float) value; case DOUBLE: return (long) (double) value; + case BOOLEAN: + case STRING: + case TEXT: + case BLOB: default: throw new UnsupportedOperationException( String.format("The type %s cannot be casted to long.", typeResultPair.getLeft())); @@ -112,6 +120,10 @@ public float getFloat(final String key) { return (float) value; case DOUBLE: return (float) (double) value; + case TEXT: + case BLOB: + case BOOLEAN: + case STRING: default: throw new UnsupportedOperationException( String.format("The type %s cannot be casted to float.", typeResultPair.getLeft())); @@ -135,6 +147,10 @@ public double getDouble(final String key) { return (float) value; case DOUBLE: return (double) value; + case BOOLEAN: + case STRING: + case TEXT: + case BLOB: default: throw new UnsupportedOperationException( String.format("The type %s cannot be casted to double.", typeResultPair.getLeft())); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/sametype/numeric/AbsoluteMaxOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/sametype/numeric/AbsoluteMaxOperator.java index db9e16f84a356..b121e4c99513f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/sametype/numeric/AbsoluteMaxOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/sametype/numeric/AbsoluteMaxOperator.java @@ -26,46 +26,46 @@ public String getName() { } @Override - public boolean initAndGetIsSupport(int initialInput, long initialTimestamp) { + public boolean initAndGetIsSupport(final int initialInput, final long initialTimestamp) { intValue = Math.abs(initialInput); return super.initAndGetIsSupport(initialInput, initialTimestamp); } @Override - public boolean initAndGetIsSupport(long initialInput, long initialTimestamp) { + public boolean initAndGetIsSupport(final long initialInput, final long initialTimestamp) { longValue = Math.abs(initialInput); return super.initAndGetIsSupport(initialInput, initialTimestamp); } @Override - public boolean initAndGetIsSupport(float initialInput, long initialTimestamp) { + public boolean initAndGetIsSupport(final float initialInput, final long initialTimestamp) { floatValue = Math.abs(initialInput); return super.initAndGetIsSupport(initialInput, initialTimestamp); } @Override - public boolean initAndGetIsSupport(double initialInput, long initialTimestamp) { + public boolean initAndGetIsSupport(final double initialInput, final long initialTimestamp) { doubleValue = Math.abs(initialInput); return super.initAndGetIsSupport(initialInput, initialTimestamp); } @Override - public void updateValue(int input, long timestamp) { + public void updateValue(final int input, final long timestamp) { intValue = Math.max(intValue, Math.abs(input)); } @Override - public void updateValue(long input, long timestamp) { + public void updateValue(final long input, final long timestamp) { longValue = Math.max(longValue, Math.abs(input)); } @Override - public void updateValue(float input, long timestamp) { + public void updateValue(final float input, final long timestamp) { floatValue = Math.max(floatValue, Math.abs(input)); } @Override - public void updateValue(double input, long timestamp) { + public void updateValue(final double input, final long timestamp) { doubleValue = Math.max(doubleValue, Math.abs(input)); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/sametype/numeric/AbstractSameTypeNumericOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/sametype/numeric/AbstractSameTypeNumericOperator.java index eab8b64256047..2e31365647681 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/sametype/numeric/AbstractSameTypeNumericOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/sametype/numeric/AbstractSameTypeNumericOperator.java @@ -128,6 +128,12 @@ public Pair getResult() { return new Pair<>(TSDataType.FLOAT, floatValue); case DOUBLE: return new Pair<>(TSDataType.DOUBLE, doubleValue); + case BLOB: + case TEXT: + case BOOLEAN: + case STRING: + case TIMESTAMP: + case DATE: default: return null; } @@ -149,6 +155,12 @@ public void serialize(final DataOutputStream outputStream) throws IOException { case DOUBLE: ReadWriteIOUtils.write(doubleValue, outputStream); break; + case TIMESTAMP: + case DATE: + case BOOLEAN: + case STRING: + case TEXT: + case BLOB: default: throw new IOException(String.format("Unsupported output datatype %s", outPutDataType)); } @@ -170,6 +182,12 @@ public void deserialize(final ByteBuffer byteBuffer) throws IOException { case DOUBLE: doubleValue = ReadWriteIOUtils.readDouble(byteBuffer); break; + case TEXT: + case BLOB: + case BOOLEAN: + case STRING: + case DATE: + case TIMESTAMP: default: throw new IOException(String.format("Unsupported output datatype %s", outPutDataType)); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/sametype/numeric/MaxOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/sametype/numeric/MaxOperator.java new file mode 100644 index 0000000000000..f994a652941e9 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/sametype/numeric/MaxOperator.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.processor.aggregate.operator.intermediateresult.sametype.numeric; + +public class MaxOperator extends AbstractSameTypeNumericOperator { + @Override + public String getName() { + return "max"; + } + + @Override + public boolean initAndGetIsSupport(int initialInput, long initialTimestamp) { + intValue = initialInput; + return super.initAndGetIsSupport(initialInput, initialTimestamp); + } + + @Override + public boolean initAndGetIsSupport(long initialInput, long initialTimestamp) { + longValue = initialInput; + return super.initAndGetIsSupport(initialInput, initialTimestamp); + } + + @Override + public boolean initAndGetIsSupport(final float initialInput, final long initialTimestamp) { + floatValue = initialInput; + return super.initAndGetIsSupport(initialInput, initialTimestamp); + } + + @Override + public boolean initAndGetIsSupport(final double initialInput, final long initialTimestamp) { + doubleValue = initialInput; + return super.initAndGetIsSupport(initialInput, initialTimestamp); + } + + @Override + public void updateValue(final int input, final long timestamp) { + intValue = Math.max(intValue, input); + } + + @Override + public void updateValue(final long input, final long timestamp) { + longValue = Math.max(longValue, input); + } + + @Override + public void updateValue(final float input, final long timestamp) { + floatValue = Math.max(floatValue, input); + } + + @Override + public void updateValue(final double input, final long timestamp) { + doubleValue = Math.max(doubleValue, input); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/sametype/numeric/MinOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/sametype/numeric/MinOperator.java new file mode 100644 index 0000000000000..947eca6ff63c7 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/sametype/numeric/MinOperator.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.processor.aggregate.operator.intermediateresult.sametype.numeric; + +public class MinOperator extends AbstractSameTypeNumericOperator { + @Override + public String getName() { + return "min"; + } + + @Override + public boolean initAndGetIsSupport(final int initialInput, final long initialTimestamp) { + intValue = initialInput; + return super.initAndGetIsSupport(initialInput, initialTimestamp); + } + + @Override + public boolean initAndGetIsSupport(final long initialInput, final long initialTimestamp) { + longValue = initialInput; + return super.initAndGetIsSupport(initialInput, initialTimestamp); + } + + @Override + public boolean initAndGetIsSupport(final float initialInput, final long initialTimestamp) { + floatValue = initialInput; + return super.initAndGetIsSupport(initialInput, initialTimestamp); + } + + @Override + public boolean initAndGetIsSupport(final double initialInput, final long initialTimestamp) { + doubleValue = initialInput; + return super.initAndGetIsSupport(initialInput, initialTimestamp); + } + + @Override + public void updateValue(final int input, final long timestamp) { + intValue = Math.min(intValue, input); + } + + @Override + public void updateValue(final long input, final long timestamp) { + longValue = Math.min(longValue, input); + } + + @Override + public void updateValue(final float input, final long timestamp) { + floatValue = Math.min(floatValue, input); + } + + @Override + public void updateValue(final double input, final long timestamp) { + doubleValue = Math.min(doubleValue, input); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/processor/StandardStatisticsOperatorProcessor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/processor/StandardStatisticsOperatorProcessor.java index fe010a0a1a1a2..3d3250088f8f7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/processor/StandardStatisticsOperatorProcessor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/processor/StandardStatisticsOperatorProcessor.java @@ -25,6 +25,8 @@ import org.apache.iotdb.db.pipe.processor.aggregate.operator.aggregatedresult.standardstatistics.CrestFactorOperator; import org.apache.iotdb.db.pipe.processor.aggregate.operator.aggregatedresult.standardstatistics.FormFactorOperator; import org.apache.iotdb.db.pipe.processor.aggregate.operator.aggregatedresult.standardstatistics.KurtosisOperator; +import org.apache.iotdb.db.pipe.processor.aggregate.operator.aggregatedresult.standardstatistics.MaxValueOperator; +import org.apache.iotdb.db.pipe.processor.aggregate.operator.aggregatedresult.standardstatistics.MinValueOperator; import org.apache.iotdb.db.pipe.processor.aggregate.operator.aggregatedresult.standardstatistics.PeakOperator; import org.apache.iotdb.db.pipe.processor.aggregate.operator.aggregatedresult.standardstatistics.PulseFactorOperator; import org.apache.iotdb.db.pipe.processor.aggregate.operator.aggregatedresult.standardstatistics.RootMeanSquareOperator; @@ -33,6 +35,8 @@ import org.apache.iotdb.db.pipe.processor.aggregate.operator.intermediateresult.IntermediateResultOperator; import org.apache.iotdb.db.pipe.processor.aggregate.operator.intermediateresult.sametype.numeric.AbsoluteMaxOperator; import org.apache.iotdb.db.pipe.processor.aggregate.operator.intermediateresult.sametype.numeric.IntegralPoweredSumOperator; +import org.apache.iotdb.db.pipe.processor.aggregate.operator.intermediateresult.sametype.numeric.MaxOperator; +import org.apache.iotdb.db.pipe.processor.aggregate.operator.intermediateresult.sametype.numeric.MinOperator; import org.apache.iotdb.db.pipe.processor.aggregate.operator.intermediateresult.specifictype.doubletype.FractionPoweredSumOperator; import org.apache.iotdb.db.pipe.processor.aggregate.operator.intermediateresult.specifictype.integertype.CountOperator; @@ -54,6 +58,8 @@ public Set getAggregatorOperatorSet() { new FormFactorOperator(), new KurtosisOperator(), new PeakOperator(), + new MaxValueOperator(), + new MinValueOperator(), new PulseFactorOperator(), new RootMeanSquareOperator(), new SkewnessOperator(), @@ -71,6 +77,8 @@ public Set> getIntermediateResultOperatorSu () -> new IntegralPoweredSumOperator(1), () -> new IntegralPoweredSumOperator(2), () -> new IntegralPoweredSumOperator(3), - () -> new IntegralPoweredSumOperator(4)))); + () -> new IntegralPoweredSumOperator(4), + MaxOperator::new, + MinOperator::new))); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/downsampling/DownSamplingProcessor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/downsampling/DownSamplingProcessor.java index fd631772b930a..a8e0c270570bb 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/downsampling/DownSamplingProcessor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/downsampling/DownSamplingProcessor.java @@ -23,6 +23,7 @@ import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskProcessorRuntimeEnvironment; import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent; +import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; import org.apache.iotdb.db.storageengine.StorageEngine; import org.apache.iotdb.pipe.api.PipeProcessor; import org.apache.iotdb.pipe.api.access.Row; @@ -45,7 +46,6 @@ import static org.apache.iotdb.commons.pipe.config.constant.PipeProcessorConstant.PROCESSOR_DOWN_SAMPLING_SPLIT_FILE_KEY; public abstract class DownSamplingProcessor implements PipeProcessor { - protected long memoryLimitInBytes; protected boolean shouldSplitFile; @@ -149,9 +149,26 @@ public void process(TsFileInsertionEvent tsFileInsertionEvent, EventCollector ev throws Exception { if (shouldSplitFile) { try { - for (final TabletInsertionEvent tabletInsertionEvent : - tsFileInsertionEvent.toTabletInsertionEvents()) { - process(tabletInsertionEvent, eventCollector); + if (tsFileInsertionEvent instanceof PipeTsFileInsertionEvent) { + final AtomicReference ex = new AtomicReference<>(); + ((PipeTsFileInsertionEvent) tsFileInsertionEvent) + .consumeTabletInsertionEventsWithRetry( + event -> { + try { + process(event, eventCollector); + } catch (Exception e) { + ex.set(e); + } + }, + "DownSamplingProcessor::process"); + if (ex.get() != null) { + throw ex.get(); + } + } else { + for (final TabletInsertionEvent tabletInsertionEvent : + tsFileInsertionEvent.toTabletInsertionEvents()) { + process(tabletInsertionEvent, eventCollector); + } } } finally { tsFileInsertionEvent.close(); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/combiner/Combiner.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/combiner/Combiner.java index 136e760e44fc7..2ad490b1d032c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/combiner/Combiner.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/combiner/Combiner.java @@ -38,8 +38,6 @@ public class Combiner { private static final Logger LOGGER = LoggerFactory.getLogger(Combiner.class); - private static final long MAX_COMBINER_LIVE_TIME_IN_MS = - PipeConfig.getInstance().getTwoStageAggregateMaxCombinerLiveTimeInMs(); private final long creationTimeInMs; private final Operator operator; @@ -98,7 +96,8 @@ public TSStatus combine(int regionId, State state) { } public boolean isOutdated() { - return System.currentTimeMillis() - creationTimeInMs > MAX_COMBINER_LIVE_TIME_IN_MS; + return System.currentTimeMillis() - creationTimeInMs + > PipeConfig.getInstance().getTwoStageAggregateMaxCombinerLiveTimeInMs(); } public boolean isComplete() { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/CombineRequest.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/CombineRequest.java index cb1ba0b9ad9c1..99c8bb67a1a74 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/CombineRequest.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/CombineRequest.java @@ -19,7 +19,7 @@ package org.apache.iotdb.db.pipe.processor.twostage.exchange.payload; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.IoTDBConnectorRequestVersion; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.IoTDBSinkRequestVersion; import org.apache.iotdb.db.pipe.processor.twostage.state.State; import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; @@ -83,7 +83,7 @@ private CombineRequest convertToTPipeTransferReq( this.state = state; this.combineId = combineId; - this.version = IoTDBConnectorRequestVersion.VERSION_2.getVersion(); + this.version = IoTDBSinkRequestVersion.VERSION_2.getVersion(); this.type = RequestType.COMBINE.getType(); try (final PublicBAOS byteArrayOutputStream = new PublicBAOS(); final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) { @@ -114,7 +114,6 @@ private CombineRequest translateFromTPipeTransferReq(TPipeTransferReq transferRe version = transferReq.version; type = transferReq.type; - body = transferReq.body; return this; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/FetchCombineResultRequest.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/FetchCombineResultRequest.java index b20904a0e2b26..03f8721f17bb2 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/FetchCombineResultRequest.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/FetchCombineResultRequest.java @@ -19,7 +19,7 @@ package org.apache.iotdb.db.pipe.processor.twostage.exchange.payload; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.IoTDBConnectorRequestVersion; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.IoTDBSinkRequestVersion; import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; import org.apache.tsfile.utils.PublicBAOS; @@ -70,7 +70,7 @@ private FetchCombineResultRequest convertToTPipeTransferReq( this.creationTime = creationTime; this.combineIdList = combineIdList; - this.version = IoTDBConnectorRequestVersion.VERSION_2.getVersion(); + this.version = IoTDBSinkRequestVersion.VERSION_2.getVersion(); this.type = RequestType.FETCH_COMBINE_RESULT.getType(); try (final PublicBAOS byteArrayOutputStream = new PublicBAOS(); final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) { @@ -99,7 +99,6 @@ private FetchCombineResultRequest translateFromTPipeTransferReq(TPipeTransferReq version = transferReq.version; type = transferReq.type; - body = transferReq.body; return this; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/receiver/TwoStageAggregateReceiver.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/receiver/TwoStageAggregateReceiver.java index 1e029d7a44279..240f7a89e5fca 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/receiver/TwoStageAggregateReceiver.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/receiver/TwoStageAggregateReceiver.java @@ -19,8 +19,8 @@ package org.apache.iotdb.db.pipe.processor.twostage.exchange.receiver; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.IoTDBConnectorRequestVersion; import org.apache.iotdb.commons.pipe.receiver.IoTDBReceiver; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.IoTDBSinkRequestVersion; import org.apache.iotdb.db.pipe.processor.twostage.combiner.PipeCombineHandlerManager; import org.apache.iotdb.db.pipe.processor.twostage.exchange.payload.CombineRequest; import org.apache.iotdb.db.pipe.processor.twostage.exchange.payload.FetchCombineResultRequest; @@ -38,8 +38,8 @@ public class TwoStageAggregateReceiver implements IoTDBReceiver { private static final Logger LOGGER = LoggerFactory.getLogger(TwoStageAggregateReceiver.class); @Override - public IoTDBConnectorRequestVersion getVersion() { - return IoTDBConnectorRequestVersion.VERSION_2; + public IoTDBSinkRequestVersion getVersion() { + return IoTDBSinkRequestVersion.VERSION_2; } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/sender/TwoStageAggregateSender.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/sender/TwoStageAggregateSender.java index a24b6c4fc3914..bac357368c037 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/sender/TwoStageAggregateSender.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/sender/TwoStageAggregateSender.java @@ -23,7 +23,7 @@ import org.apache.iotdb.commons.client.exception.ClientManagerException; import org.apache.iotdb.commons.client.property.ThriftClientProperty; import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.pipe.connector.client.IoTDBSyncClient; +import org.apache.iotdb.commons.pipe.sink.client.IoTDBSyncClient; import org.apache.iotdb.confignode.rpc.thrift.TDataNodeInfo; import org.apache.iotdb.confignode.rpc.thrift.TShowDataNodesResp; import org.apache.iotdb.db.pipe.processor.twostage.combiner.PipeCombineHandlerManager; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/plugin/TwoStageCountProcessor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/plugin/TwoStageCountProcessor.java index 07f952277aadb..16a20a5f509aa 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/plugin/TwoStageCountProcessor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/plugin/TwoStageCountProcessor.java @@ -24,10 +24,10 @@ import org.apache.iotdb.commons.consensus.index.impl.StateProgressIndex; import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.path.PartialPath; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; import org.apache.iotdb.commons.pipe.config.constant.PipeProcessorConstant; import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskProcessorRuntimeEnvironment; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; import org.apache.iotdb.commons.utils.PathUtils; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; @@ -172,10 +172,8 @@ public void process(TabletInsertionEvent tabletInsertionEvent, EventCollector ev : ((PipeRawTabletInsertionEvent) event).count(); localCount.accumulateAndGet(count, Long::sum); - localCommitProgressIndex.set( - localCommitProgressIndex - .get() - .updateToMinimumEqualOrIsAfterProgressIndex(event.getProgressIndex())); + localCommitProgressIndex.updateAndGet( + index -> index.updateToMinimumEqualOrIsAfterProgressIndex(event.getProgressIndex())); } @Override @@ -199,10 +197,8 @@ public void process(TsFileInsertionEvent tsFileInsertionEvent, EventCollector ev final long count = event.count(true); localCount.accumulateAndGet(count, Long::sum); - localCommitProgressIndex.set( - localCommitProgressIndex - .get() - .updateToMinimumEqualOrIsAfterProgressIndex(event.getProgressIndex())); + localCommitProgressIndex.updateAndGet( + index -> index.updateToMinimumEqualOrIsAfterProgressIndex(event.getProgressIndex())); } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/airgap/IoTDBAirGapReceiver.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/airgap/IoTDBAirGapReceiver.java index 9fd3bcd40927a..3cea6c998f87c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/airgap/IoTDBAirGapReceiver.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/airgap/IoTDBAirGapReceiver.java @@ -22,11 +22,13 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.concurrent.WrappedRunnable; import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.pipe.connector.payload.airgap.AirGapELanguageConstant; -import org.apache.iotdb.commons.pipe.connector.payload.airgap.AirGapOneByteResponse; -import org.apache.iotdb.commons.pipe.connector.payload.airgap.AirGapPseudoTPipeTransferRequest; +import org.apache.iotdb.commons.pipe.sink.payload.airgap.AirGapELanguageConstant; +import org.apache.iotdb.commons.pipe.sink.payload.airgap.AirGapOneByteResponse; +import org.apache.iotdb.commons.pipe.sink.payload.airgap.AirGapPseudoTPipeTransferRequest; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; import org.apache.iotdb.db.pipe.receiver.protocol.thrift.IoTDBDataNodeReceiverAgent; +import org.apache.iotdb.db.protocol.session.ClientSession; +import org.apache.iotdb.db.protocol.session.SessionManager; import org.apache.iotdb.pipe.api.exception.PipeConnectionException; import org.apache.iotdb.rpc.TSStatusCode; import org.apache.iotdb.service.rpc.thrift.TPipeTransferResp; @@ -73,6 +75,8 @@ public void runMayThrow() throws Throwable { LOGGER.info("Pipe air gap receiver {} started. Socket: {}", receiverId, socket); + SessionManager.getInstance().registerSession(new ClientSession(socket)); + try { while (!socket.isClosed()) { isELanguagePayload = false; @@ -90,6 +94,7 @@ public void runMayThrow() throws Throwable { e); throw e; } finally { + // session will be closed and removed here PipeDataNodeAgent.receiver().thrift().handleClientExit(); socket.close(); } @@ -101,9 +106,20 @@ private void receive() throws IOException { try { final byte[] data = readData(inputStream); + // If check sum failed, it indicates that the length we read may not be correct. + // Namely, there may be remaining bytes in the socket stream, which will fail any subsequent + // attempts to read from that. + // We directly close the socket here. if (!checkSum(data)) { - LOGGER.warn("Checksum failed, receiverId: {}", receiverId); - fail(); + LOGGER.warn( + "Pipe air gap receiver {} closed because of checksum failed. Socket: {}", + receiverId, + socket); + try { + fail(); + } finally { + socket.close(); + } return; } @@ -126,21 +142,31 @@ private void receive() throws IOException { || status.getCode() == TSStatusCode.PIPE_RECEIVER_IDEMPOTENT_CONFLICT_EXCEPTION.getStatusCode()) { LOGGER.info( - "TSStatus:{} is encountered at the air gap receiver, will ignore.", resp.getStatus()); + "Pipe air gap receiver {}: TSStatus {} is encountered at the air gap receiver, will ignore.", + receiverId, + resp.getStatus()); ok(); } else { LOGGER.warn( - "Handle data failed, receiverId: {}, status: {}, req: {}", + "Pipe air gap receiver {}: Handle data failed, status: {}, req: {}", receiverId, resp.getStatus(), req); fail(); } } catch (final PipeConnectionException e) { - LOGGER.info("Socket closed when listening to data. Because: {}", e.getMessage()); + LOGGER.info( + "Pipe air gap receiver {}: Socket {} closed when listening to data. Because: {}", + receiverId, + socket, + e.getMessage()); socket.close(); } catch (final Exception e) { - LOGGER.warn("Exception during handling receiving, receiverId: {}", receiverId, e); + LOGGER.warn( + "Pipe air gap receiver {}: Exception during handling receiving. Socket: {}", + receiverId, + socket, + e); fail(); } } @@ -161,7 +187,17 @@ private boolean checkSum(byte[] bytes) { try { final CRC32 crc32 = new CRC32(); crc32.update(bytes, LONG_LEN, bytes.length - LONG_LEN); - return BytesUtils.bytesToLong(BytesUtils.subBytes(bytes, 0, LONG_LEN)) == crc32.getValue(); + + final long expectedChecksum = BytesUtils.bytesToLong(BytesUtils.subBytes(bytes, 0, LONG_LEN)); + final long actualChecksum = crc32.getValue(); + if (expectedChecksum != actualChecksum) { + LOGGER.warn( + "Pipe air gap receiver {}: checksum failed, expected: {}, actual: {}", + receiverId, + expectedChecksum, + actualChecksum); + } + return expectedChecksum == actualChecksum; } catch (final Exception e) { // ArrayIndexOutOfBoundsException when bytes.length < LONG_LEN return false; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/airgap/IoTDBAirGapReceiverAgent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/airgap/IoTDBAirGapReceiverAgent.java index 6db65b00a326a..c28e937c305eb 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/airgap/IoTDBAirGapReceiverAgent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/airgap/IoTDBAirGapReceiverAgent.java @@ -52,8 +52,13 @@ public class IoTDBAirGapReceiverAgent implements IService { public void listen() { try { final Socket socket = serverSocket.accept(); - new Thread(new IoTDBAirGapReceiver(socket, receiverId.incrementAndGet())).start(); - } catch (IOException e) { + final long airGapReceiverId = receiverId.incrementAndGet(); + final Thread airGapReceiverThread = + new Thread(new IoTDBAirGapReceiver(socket, airGapReceiverId)); + airGapReceiverThread.setName( + ThreadName.PIPE_AIR_GAP_RECEIVER.getName() + "-" + airGapReceiverId); + airGapReceiverThread.start(); + } catch (final IOException e) { LOGGER.warn("Unhandled exception during pipe air gap receiver listening", e); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/IoTDBLegacyPipeReceiverAgent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/IoTDBLegacyPipeReceiverAgent.java index 3fb132523d8e9..d8f8cd2ecbe8a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/IoTDBLegacyPipeReceiverAgent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/IoTDBLegacyPipeReceiverAgent.java @@ -26,8 +26,8 @@ import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.db.auth.AuthorityChecker; import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.pipe.connector.payload.legacy.PipeData; -import org.apache.iotdb.db.pipe.connector.payload.legacy.TsFilePipeData; +import org.apache.iotdb.db.pipe.sink.payload.legacy.PipeData; +import org.apache.iotdb.db.pipe.sink.payload.legacy.TsFilePipeData; import org.apache.iotdb.db.protocol.session.SessionManager; import org.apache.iotdb.db.queryengine.common.SessionInfo; import org.apache.iotdb.db.queryengine.plan.Coordinator; @@ -143,9 +143,11 @@ private boolean registerDatabase( "", partitionFetcher, schemaFetcher, - IoTDBDescriptor.getInstance().getConfig().getQueryTimeoutThreshold()); + IoTDBDescriptor.getInstance().getConfig().getQueryTimeoutThreshold(), + false); if (result.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode() - && result.status.code != TSStatusCode.DATABASE_ALREADY_EXISTS.getStatusCode()) { + && result.status.code != TSStatusCode.DATABASE_ALREADY_EXISTS.getStatusCode() + && result.status.code != TSStatusCode.DATABASE_CONFLICT.getStatusCode()) { LOGGER.error( "Create Database error, statement: {}, result status : {}.", statement, result.status); return false; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/loader/DeletionLoader.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/loader/DeletionLoader.java index 19dc268acec4e..fe1255ce8c3cb 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/loader/DeletionLoader.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/loader/DeletionLoader.java @@ -22,7 +22,7 @@ import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.db.auth.AuthorityChecker; import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.exception.LoadFileException; +import org.apache.iotdb.db.exception.load.LoadFileException; import org.apache.iotdb.db.protocol.session.SessionManager; import org.apache.iotdb.db.queryengine.common.SessionInfo; import org.apache.iotdb.db.queryengine.plan.Coordinator; @@ -68,7 +68,8 @@ public void load() throws PipeException { "", PARTITION_FETCHER, SCHEMA_FETCHER, - IoTDBDescriptor.getInstance().getConfig().getQueryTimeoutThreshold()); + IoTDBDescriptor.getInstance().getConfig().getQueryTimeoutThreshold(), + false); if (result.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { LOGGER.error("Delete {} error, statement: {}.", deletion, statement); LOGGER.error("Delete result status : {}.", result.status); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/loader/TsFileLoader.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/loader/TsFileLoader.java index 9b9e0705f2146..e181ec1d5926b 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/loader/TsFileLoader.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/loader/TsFileLoader.java @@ -23,7 +23,7 @@ import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.db.auth.AuthorityChecker; import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.exception.LoadFileException; +import org.apache.iotdb.db.exception.load.LoadFileException; import org.apache.iotdb.db.protocol.session.SessionManager; import org.apache.iotdb.db.queryengine.common.SessionInfo; import org.apache.iotdb.db.queryengine.plan.Coordinator; @@ -56,6 +56,7 @@ public void load() { try { LoadTsFileStatement statement = new LoadTsFileStatement(tsFile.getAbsolutePath()); statement.setDeleteAfterLoad(true); + statement.setConvertOnTypeMismatch(true); statement.setDatabaseLevel(parseSgLevel()); statement.setVerifySchema(true); statement.setAutoCreateDatabase(false); @@ -70,7 +71,8 @@ public void load() { "", PARTITION_FETCHER, SCHEMA_FETCHER, - IoTDBDescriptor.getInstance().getConfig().getQueryTimeoutThreshold()); + IoTDBDescriptor.getInstance().getConfig().getQueryTimeoutThreshold(), + false); if (result.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { LOGGER.error("Load TsFile {} error, statement: {}.", tsFile.getPath(), statement); LOGGER.error("Load TsFile result status : {}.", result.status); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/pipeconsensus/PipeConsensusReceiver.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/pipeconsensus/PipeConsensusReceiver.java index f5a3c004f7f47..dc48e634dbfd9 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/pipeconsensus/PipeConsensusReceiver.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/pipeconsensus/PipeConsensusReceiver.java @@ -24,13 +24,14 @@ import org.apache.iotdb.commons.consensus.DataRegionId; import org.apache.iotdb.commons.consensus.index.ProgressIndex; import org.apache.iotdb.commons.consensus.index.ProgressIndexType; -import org.apache.iotdb.commons.pipe.connector.payload.pipeconsensus.request.PipeConsensusRequestType; -import org.apache.iotdb.commons.pipe.connector.payload.pipeconsensus.request.PipeConsensusRequestVersion; -import org.apache.iotdb.commons.pipe.connector.payload.pipeconsensus.request.PipeConsensusTransferFilePieceReq; -import org.apache.iotdb.commons.pipe.connector.payload.pipeconsensus.response.PipeConsensusTransferFilePieceResp; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.response.PipeTransferFilePieceResp; import org.apache.iotdb.commons.pipe.receiver.IoTDBReceiverAgent; +import org.apache.iotdb.commons.pipe.sink.payload.pipeconsensus.request.PipeConsensusRequestType; +import org.apache.iotdb.commons.pipe.sink.payload.pipeconsensus.request.PipeConsensusRequestVersion; +import org.apache.iotdb.commons.pipe.sink.payload.pipeconsensus.request.PipeConsensusTransferFilePieceReq; +import org.apache.iotdb.commons.pipe.sink.payload.pipeconsensus.response.PipeConsensusTransferFilePieceResp; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.response.PipeTransferFilePieceResp; import org.apache.iotdb.commons.service.metric.MetricService; +import org.apache.iotdb.commons.utils.RetryUtils; import org.apache.iotdb.consensus.exception.ConsensusGroupNotExistException; import org.apache.iotdb.consensus.pipe.PipeConsensus; import org.apache.iotdb.consensus.pipe.PipeConsensusServerImpl; @@ -41,21 +42,21 @@ import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.exception.DiskSpaceInsufficientException; -import org.apache.iotdb.db.exception.LoadFileException; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.payload.request.PipeConsensusTabletBinaryReq; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.payload.request.PipeConsensusTabletInsertNodeReq; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.payload.request.PipeConsensusTsFilePieceReq; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.payload.request.PipeConsensusTsFileSealReq; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.payload.request.PipeConsensusTsFileSealWithModReq; +import org.apache.iotdb.db.exception.load.LoadFileException; import org.apache.iotdb.db.pipe.consensus.PipeConsensusReceiverMetrics; import org.apache.iotdb.db.pipe.event.common.tsfile.TsFileInsertionPointCounter; -import org.apache.iotdb.db.queryengine.execution.load.LoadTsFileManager; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.payload.request.PipeConsensusTabletBinaryReq; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.payload.request.PipeConsensusTabletInsertNodeReq; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.payload.request.PipeConsensusTsFilePieceReq; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.payload.request.PipeConsensusTsFileSealReq; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.payload.request.PipeConsensusTsFileSealWithModReq; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; import org.apache.iotdb.db.storageengine.StorageEngine; import org.apache.iotdb.db.storageengine.dataregion.DataRegion; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResourceStatus; import org.apache.iotdb.db.storageengine.dataregion.utils.TsFileResourceUtils; +import org.apache.iotdb.db.storageengine.load.LoadTsFileManager; import org.apache.iotdb.db.storageengine.rescon.disk.FolderManager; import org.apache.iotdb.db.storageengine.rescon.disk.strategy.DirectoryStrategyType; import org.apache.iotdb.rpc.RpcUtils; @@ -101,8 +102,7 @@ public class PipeConsensusReceiver { private final ConsensusPipeName consensusPipeName; private final List receiverBaseDirsName; // Used to buffer TsFile when transfer TsFile asynchronously. - private final PipeConsensusTsFileWriterPool pipeConsensusTsFileWriterPool = - new PipeConsensusTsFileWriterPool(); + private final PipeConsensusTsFileWriterPool pipeConsensusTsFileWriterPool; private final AtomicReference receiverFileDirWithIdSuffix = new AtomicReference<>(); private final PipeConsensusReceiverMetrics pipeConsensusReceiverMetrics; private final FolderManager folderManager; @@ -114,10 +114,7 @@ public PipeConsensusReceiver( this.pipeConsensus = pipeConsensus; this.consensusGroupId = consensusGroupId; this.pipeConsensusReceiverMetrics = new PipeConsensusReceiverMetrics(this); - this.requestExecutor = - new RequestExecutor(pipeConsensusReceiverMetrics, pipeConsensusTsFileWriterPool); this.consensusPipeName = consensusPipeName; - MetricService.getInstance().addMetricSet(pipeConsensusReceiverMetrics); // Each pipeConsensusReceiver has its own base directories. for example, a default dir path is // data/datanode/system/pipe/consensus/receiver/__consensus.{consensusGroupId}_{leaderDataNodeId}_{followerDataNodeId} @@ -136,10 +133,16 @@ public PipeConsensusReceiver( try { initiateTsFileBufferFolder(); + this.pipeConsensusTsFileWriterPool = + new PipeConsensusTsFileWriterPool( + consensusPipeName, receiverFileDirWithIdSuffix.get().getPath()); } catch (Exception e) { LOGGER.error("Fail to initiate file buffer folder, Error msg: {}", e.getMessage()); throw new RuntimeException(e); } + this.requestExecutor = + new RequestExecutor(pipeConsensusReceiverMetrics, pipeConsensusTsFileWriterPool); + MetricService.getInstance().addMetricSet(pipeConsensusReceiverMetrics); } /** @@ -406,6 +409,10 @@ private TPipeConsensusTransferResp handleTransferFileSeal(final PipeConsensusTsF final String fileAbsolutePath = writingFile.getAbsolutePath(); + // Sync here is necessary to ensure that the data is written to the disk. Or data region may + // load the file before the data is written to the disk and cause unexpected behavior after + // system restart. (e.g., empty file in data region's data directory) + writingFileWriter.getFD().sync(); // 1. The writing file writer must be closed, otherwise it may cause concurrent errors during // the process of loading tsfile when parsing tsfile. // @@ -471,7 +478,7 @@ private TPipeConsensusTransferResp handleTransferFileSeal(final PipeConsensusTsF // If the writing file is not sealed successfully, the writing file will be deleted. // All pieces of the writing file and its mod (if exists) should be retransmitted by the // sender. - closeCurrentWritingFileWriter(tsFileWriter); + closeCurrentWritingFileWriter(tsFileWriter, false); deleteCurrentWritingFile(tsFileWriter); } } @@ -522,6 +529,10 @@ private TPipeConsensusTransferResp handleTransferFileSealWithMods( } } + // Sync here is necessary to ensure that the data is written to the disk. Or data region may + // load the file before the data is written to the disk and cause unexpected behavior after + // system restart. (e.g., empty file in data region's data directory) + writingFileWriter.getFD().sync(); // 1. The writing file writer must be closed, otherwise it may cause concurrent errors during // the process of loading tsfile when parsing tsfile. // @@ -580,7 +591,7 @@ private TPipeConsensusTransferResp handleTransferFileSealWithMods( // If the writing file is not sealed successfully, the writing file will be deleted. // All pieces of the writing file and its mod(if exists) should be retransmitted by the // sender. - closeCurrentWritingFileWriter(tsFileWriter); + closeCurrentWritingFileWriter(tsFileWriter, false); // Clear the directory instead of only deleting the referenced files in seal request // to avoid previously undeleted file being redundant when transferring multi files IoTDBReceiverAgent.cleanPipeReceiverDir(receiverFileDirWithIdSuffix.get()); @@ -630,9 +641,23 @@ private TPipeConsensusTransferResp checkNonFinalFileSeal( private TSStatus loadFileToDataRegion(String filePath, ProgressIndex progressIndex) throws IOException, LoadFileException { - StorageEngine.getInstance() - .getDataRegion(((DataRegionId) consensusGroupId)) - .loadNewTsFile(generateTsFileResource(filePath, progressIndex), true, false); + DataRegion region = + StorageEngine.getInstance().getDataRegion(((DataRegionId) consensusGroupId)); + if (region != null) { + TsFileResource resource = + generateTsFileResource( + filePath, + progressIndex, + IoTDBDescriptor.getInstance().getConfig().isCacheLastValuesForLoad()); + region.loadNewTsFile(resource, true, false, true); + } else { + // Data region is null indicates that dr has been removed or migrated. In those cases, there + // is no need to replicate data. we just return success to avoid leader keeping retry + LOGGER.info( + "PipeConsensus-PipeName-{}: skip load tsfile-{} when sealing, because this region has been removed or migrated.", + consensusPipeName, + filePath); + } return RpcUtils.SUCCESS_STATUS; } @@ -676,13 +701,13 @@ private void updateWritePointCountMetrics(long writePointCount) { dataRegion, databaseName, writePointCount, true)); } - private TsFileResource generateTsFileResource(String filePath, ProgressIndex progressIndex) - throws IOException { + private TsFileResource generateTsFileResource( + String filePath, ProgressIndex progressIndex, boolean cacheLastValues) throws IOException { final File tsFile = new File(filePath); final TsFileResource tsFileResource = new TsFileResource(tsFile); try (final TsFileSequenceReader reader = new TsFileSequenceReader(tsFile.getAbsolutePath())) { - TsFileResourceUtils.updateTsFileResource(reader, tsFileResource); + TsFileResourceUtils.updateTsFileResource(reader, tsFileResource, cacheLastValues); } tsFileResource.setStatus(TsFileResourceStatus.NORMAL); @@ -775,10 +800,14 @@ private boolean isWritingFileOffsetNonCorrect( return !offsetCorrect; } - private void closeCurrentWritingFileWriter(PipeConsensusTsFileWriter tsFileWriter) { + private void closeCurrentWritingFileWriter( + PipeConsensusTsFileWriter tsFileWriter, boolean fsyncAfterClose) { if (tsFileWriter.getWritingFileWriter() != null) { try { tsFileWriter.getWritingFileWriter().close(); + if (fsyncAfterClose) { + tsFileWriter.getWritingFileWriter().getFD().sync(); + } LOGGER.info( "PipeConsensus-PipeName-{}: Current writing file writer {} was closed.", consensusPipeName, @@ -808,7 +837,7 @@ private void closeCurrentWritingFileWriter(PipeConsensusTsFileWriter tsFileWrite private void deleteFile(File file) { if (file.exists()) { try { - FileUtils.delete(file); + RetryUtils.retryOnException(() -> FileUtils.delete(file)); LOGGER.info( "PipeConsensus-PipeName-{}: Original writing file {} was deleted.", consensusPipeName, @@ -859,7 +888,7 @@ private void updateWritingFileIfNeeded( fileName, tsFileWriter.getWritingFile() == null ? "null" : tsFileWriter.getWritingFile().getPath()); - closeCurrentWritingFileWriter(tsFileWriter); + closeCurrentWritingFileWriter(tsFileWriter, !isSingleFile); // If there are multiple files we can not delete the current file // instead they will be deleted after seal request if (tsFileWriter.getWritingFile() != null && isSingleFile) { @@ -882,8 +911,9 @@ private void updateWritingFileIfNeeded( receiverFileDirWithIdSuffix.get().getPath()); } } - - tsFileWriter.setWritingFile(new File(receiverFileDirWithIdSuffix.get(), fileName)); + // Every tsFileWriter has its own writing path. + // 1 Thread --> 1 connection --> 1 tsFileWriter --> 1 path + tsFileWriter.setWritingFile(new File(tsFileWriter.getLocalWritingDirPath(), fileName)); tsFileWriter.setWritingFileWriter(new RandomAccessFile(tsFileWriter.getWritingFile(), "rw")); LOGGER.info( "PipeConsensus-PipeName-{}: Writing file {} was created. Ready to write file pieces.", @@ -901,7 +931,11 @@ private void initiateTsFileBufferFolder() throws DiskSpaceInsufficientException, if (receiverFileDirWithIdSuffix.get() != null) { if (receiverFileDirWithIdSuffix.get().exists()) { try { - FileUtils.deleteDirectory(receiverFileDirWithIdSuffix.get()); + RetryUtils.retryOnException( + () -> { + FileUtils.deleteDirectory(receiverFileDirWithIdSuffix.get()); + return null; + }); LOGGER.info( "PipeConsensus-PipeName-{}: Original receiver file dir {} was deleted successfully.", consensusPipeName, @@ -963,7 +997,11 @@ private void initiateTsFileBufferFolder() throws DiskSpaceInsufficientException, } // Remove exists dir if (newReceiverDir.exists()) { - FileUtils.deleteDirectory(newReceiverDir); + RetryUtils.retryOnException( + () -> { + FileUtils.deleteDirectory(newReceiverDir); + return null; + }); LOGGER.info( "PipeConsensus-PipeName-{}: Origin receiver file dir {} was deleted.", consensusPipeName, @@ -994,7 +1032,11 @@ public synchronized void handleExit() { if (receiverFileDirWithIdSuffix.get() != null) { if (receiverFileDirWithIdSuffix.get().exists()) { try { - FileUtils.deleteDirectory(receiverFileDirWithIdSuffix.get()); + RetryUtils.retryOnException( + () -> { + FileUtils.deleteDirectory(receiverFileDirWithIdSuffix.get()); + return null; + }); LOGGER.info( "PipeConsensus-PipeName-{}: Receiver exit: Original receiver file dir {} was deleted.", consensusPipeName, @@ -1033,10 +1075,16 @@ public synchronized void handleExit() { private static class PipeConsensusTsFileWriterPool { private final Lock lock = new ReentrantLock(); private final List pipeConsensusTsFileWriterPool = new ArrayList<>(); + private final ConsensusPipeName consensusPipeName; - public PipeConsensusTsFileWriterPool() { + public PipeConsensusTsFileWriterPool( + ConsensusPipeName consensusPipeName, String receiverBasePath) throws IOException { + this.consensusPipeName = consensusPipeName; for (int i = 0; i < IOTDB_CONFIG.getPipeConsensusPipelineSize(); i++) { - pipeConsensusTsFileWriterPool.add(new PipeConsensusTsFileWriter(i)); + PipeConsensusTsFileWriter tsFileWriter = + new PipeConsensusTsFileWriter(i, consensusPipeName); + tsFileWriter.setFilePath(receiverBasePath); + pipeConsensusTsFileWriterPool.add(tsFileWriter); } } @@ -1103,7 +1151,9 @@ public void handleExit(ConsensusPipeName consensusPipeName) { } private static class PipeConsensusTsFileWriter { + private final ConsensusPipeName consensusPipeName; private final int index; + private String localWritingDirPath; // whether this buffer is used. this will be updated when first transfer tsFile piece or // when transfer seal. private boolean isUsed = false; @@ -1112,8 +1162,42 @@ private static class PipeConsensusTsFileWriter { private File writingFile; private RandomAccessFile writingFileWriter; - public PipeConsensusTsFileWriter(int index) { + public PipeConsensusTsFileWriter(int index, ConsensusPipeName consensusPipeName) { this.index = index; + this.consensusPipeName = consensusPipeName; + } + + public void setFilePath(String receiverBasePath) throws IOException { + this.localWritingDirPath = receiverBasePath + File.separator + index; + File tsFileWriterDirectory = new File(this.localWritingDirPath); + // Remove exists dir + if (tsFileWriterDirectory.exists()) { + RetryUtils.retryOnException( + () -> { + FileUtils.deleteDirectory(tsFileWriterDirectory); + return null; + }); + LOGGER.info( + "PipeConsensus-PipeName-{}: Origin receiver tsFileWriter-{} file dir {} was deleted.", + consensusPipeName, + index, + tsFileWriterDirectory.getPath()); + } + if (!tsFileWriterDirectory.mkdirs()) { + LOGGER.warn( + "PipeConsensus-PipeName-{}: Failed to create receiver tsFileWriter-{} file dir {}. May because authority or dir already exists etc.", + consensusPipeName, + index, + tsFileWriterDirectory.getPath()); + throw new IOException( + String.format( + "PipeConsensus-PipeName-%s: Failed to create tsFileWriter-%d receiver file dir %s. May because authority or dir already exists etc.", + consensusPipeName, index, tsFileWriterDirectory.getPath())); + } + } + + public String getLocalWritingDirPath() { + return localWritingDirPath; } public File getWritingFile() { @@ -1187,7 +1271,7 @@ public void closeSelf(ConsensusPipeName consensusPipeName) { // close file if (writingFile != null) { try { - FileUtils.delete(writingFile); + RetryUtils.retryOnException(() -> FileUtils.delete(writingFile)); LOGGER.info( "PipeConsensus-PipeName-{}: TsFileWriter exit: Writing file {} was deleted.", consensusPipeName, diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/pipeconsensus/PipeConsensusReceiverAgent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/pipeconsensus/PipeConsensusReceiverAgent.java index ff3307c5f5604..849a8935ea0f2 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/pipeconsensus/PipeConsensusReceiverAgent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/pipeconsensus/PipeConsensusReceiverAgent.java @@ -21,7 +21,7 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.consensus.ConsensusGroupId; -import org.apache.iotdb.commons.pipe.connector.payload.pipeconsensus.request.PipeConsensusRequestVersion; +import org.apache.iotdb.commons.pipe.sink.payload.pipeconsensus.request.PipeConsensusRequestVersion; import org.apache.iotdb.consensus.IConsensus; import org.apache.iotdb.consensus.pipe.PipeConsensus; import org.apache.iotdb.consensus.pipe.consensuspipe.ConsensusPipeName; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java index 12916ba450633..6cd709a379991 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java @@ -20,48 +20,56 @@ package org.apache.iotdb.db.pipe.receiver.protocol.thrift; import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.conf.IoTDBConstant; import org.apache.iotdb.commons.exception.IllegalPathException; +import org.apache.iotdb.commons.exception.pipe.PipeRuntimeOutOfMemoryCriticalException; import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.pipe.connector.PipeReceiverStatusHandler; -import org.apache.iotdb.commons.pipe.connector.payload.airgap.AirGapPseudoTPipeTransferRequest; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeRequestType; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferCompressedReq; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferFileSealReqV1; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferFileSealReqV2; -import org.apache.iotdb.commons.pipe.pattern.IoTDBPipePattern; +import org.apache.iotdb.commons.pipe.config.PipeConfig; +import org.apache.iotdb.commons.pipe.datastructure.pattern.IoTDBPipePattern; import org.apache.iotdb.commons.pipe.receiver.IoTDBFileReceiver; +import org.apache.iotdb.commons.pipe.receiver.PipeReceiverStatusHandler; +import org.apache.iotdb.commons.pipe.sink.payload.airgap.AirGapPseudoTPipeTransferRequest; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.common.PipeTransferSliceReqHandler; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeRequestType; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferCompressedReq; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferFileSealReqV1; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferFileSealReqV2; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferSliceReq; import org.apache.iotdb.db.auth.AuthorityChecker; import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.exception.DiskSpaceInsufficientException; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferDataNodeHandshakeV1Req; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferDataNodeHandshakeV2Req; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferPlanNodeReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferSchemaSnapshotPieceReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferSchemaSnapshotSealReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTabletBatchReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTabletBinaryReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTabletInsertNodeReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTabletRawReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTsFilePieceReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTsFilePieceWithModReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTsFileSealReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTsFileSealWithModReq; import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionSnapshotEvent; -import org.apache.iotdb.db.pipe.metric.PipeDataNodeReceiverMetrics; +import org.apache.iotdb.db.pipe.metric.receiver.PipeDataNodeReceiverMetrics; import org.apache.iotdb.db.pipe.receiver.visitor.PipePlanToStatementVisitor; +import org.apache.iotdb.db.pipe.receiver.visitor.PipeStatementDataTypeConvertExecutionVisitor; import org.apache.iotdb.db.pipe.receiver.visitor.PipeStatementExceptionVisitor; import org.apache.iotdb.db.pipe.receiver.visitor.PipeStatementPatternParseVisitor; import org.apache.iotdb.db.pipe.receiver.visitor.PipeStatementTSStatusVisitor; import org.apache.iotdb.db.pipe.receiver.visitor.PipeStatementToBatchVisitor; +import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; +import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlock; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferDataNodeHandshakeV1Req; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferDataNodeHandshakeV2Req; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferPlanNodeReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferSchemaSnapshotPieceReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferSchemaSnapshotSealReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTabletBatchReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTabletBinaryReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTabletInsertNodeReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTabletRawReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTsFilePieceReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTsFilePieceWithModReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTsFileSealReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTsFileSealWithModReq; +import org.apache.iotdb.db.protocol.basic.BasicOpenSessionResp; +import org.apache.iotdb.db.protocol.session.IClientSession; import org.apache.iotdb.db.protocol.session.SessionManager; -import org.apache.iotdb.db.queryengine.common.SessionInfo; import org.apache.iotdb.db.queryengine.common.header.ColumnHeaderConstant; import org.apache.iotdb.db.queryengine.plan.Coordinator; import org.apache.iotdb.db.queryengine.plan.analyze.ClusterPartitionFetcher; import org.apache.iotdb.db.queryengine.plan.analyze.schema.ClusterSchemaFetcher; -import org.apache.iotdb.db.queryengine.plan.execution.ExecutionResult; import org.apache.iotdb.db.queryengine.plan.execution.config.executor.ClusterConfigTaskExecutor; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metedata.write.view.AlterLogicalViewNode; import org.apache.iotdb.db.queryengine.plan.statement.Statement; @@ -72,10 +80,12 @@ import org.apache.iotdb.db.queryengine.plan.statement.crud.InsertTabletStatement; import org.apache.iotdb.db.queryengine.plan.statement.crud.LoadTsFileStatement; import org.apache.iotdb.db.queryengine.plan.statement.pipe.PipeEnrichedStatement; +import org.apache.iotdb.db.storageengine.load.active.ActiveLoadUtil; import org.apache.iotdb.db.storageengine.rescon.disk.FolderManager; import org.apache.iotdb.db.storageengine.rescon.disk.strategy.DirectoryStrategyType; import org.apache.iotdb.db.tools.schema.SRStatementGenerator; import org.apache.iotdb.db.tools.schema.SchemaRegionSnapshotParser; +import org.apache.iotdb.pipe.api.exception.PipeException; import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.TSStatusCode; import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; @@ -91,6 +101,7 @@ import java.time.ZoneId; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -111,12 +122,15 @@ public class IoTDBDataNodeReceiver extends IoTDBFileReceiver { public static final PipePlanToStatementVisitor PLAN_TO_STATEMENT_VISITOR = new PipePlanToStatementVisitor(); - private static final PipeStatementTSStatusVisitor STATEMENT_STATUS_VISITOR = + public static final PipeStatementTSStatusVisitor STATEMENT_STATUS_VISITOR = new PipeStatementTSStatusVisitor(); - private static final PipeStatementExceptionVisitor STATEMENT_EXCEPTION_VISITOR = + public static final PipeStatementExceptionVisitor STATEMENT_EXCEPTION_VISITOR = new PipeStatementExceptionVisitor(); private static final PipeStatementPatternParseVisitor STATEMENT_PATTERN_PARSE_VISITOR = new PipeStatementPatternParseVisitor(); + private final PipeStatementDataTypeConvertExecutionVisitor + statementDataTypeConvertExecutionVisitor = + new PipeStatementDataTypeConvertExecutionVisitor(this::executeStatement); private final PipeStatementToBatchVisitor batchVisitor = new PipeStatementToBatchVisitor(); // Used for data transfer: confignode (cluster A) -> datanode (cluster B) -> confignode (cluster @@ -127,6 +141,12 @@ public class IoTDBDataNodeReceiver extends IoTDBFileReceiver { private static final AtomicLong CONFIG_RECEIVER_ID_GENERATOR = new AtomicLong(0); protected final AtomicReference configReceiverId = new AtomicReference<>(); + private final PipeTransferSliceReqHandler sliceReqHandler = new PipeTransferSliceReqHandler(); + + private static final SessionManager SESSION_MANAGER = SessionManager.getInstance(); + + private PipeMemoryBlock allocatedMemoryBlock; + static { try { folderManager = @@ -142,111 +162,201 @@ public class IoTDBDataNodeReceiver extends IoTDBFileReceiver { @Override public synchronized TPipeTransferResp receive(final TPipeTransferReq req) { try { - long startTime = System.nanoTime(); + final long startTime = System.nanoTime(); final short rawRequestType = req.getType(); if (PipeRequestType.isValidatedRequestType(rawRequestType)) { - TPipeTransferResp resp; - switch (PipeRequestType.valueOf(rawRequestType)) { + final PipeRequestType requestType = PipeRequestType.valueOf(rawRequestType); + if (requestType != PipeRequestType.TRANSFER_SLICE) { + sliceReqHandler.clear(); + } + switch (requestType) { case HANDSHAKE_DATANODE_V1: - resp = - handleTransferHandshakeV1( + { + try { + if (PipeConfig.getInstance().isPipeEnableMemoryCheck() + && PipeDataNodeResourceManager.memory().getFreeMemorySizeInBytes() + < PipeConfig.getInstance().getPipeMinimumReceiverMemory()) { + return new TPipeTransferResp( + RpcUtils.getStatus( + TSStatusCode.PIPE_HANDSHAKE_ERROR.getStatusCode(), + "The receiver memory is not enough to handle the handshake request from datanode.")); + } + return handleTransferHandshakeV1( PipeTransferDataNodeHandshakeV1Req.fromTPipeTransferReq(req)); - PipeDataNodeReceiverMetrics.getInstance() - .recordHandshakeDatanodeV1Timer(System.nanoTime() - startTime); - return resp; + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordHandshakeDatanodeV1Timer(System.nanoTime() - startTime); + } + } case HANDSHAKE_DATANODE_V2: - resp = - handleTransferHandshakeV2( + { + try { + if (PipeConfig.getInstance().isPipeEnableMemoryCheck() + && PipeDataNodeResourceManager.memory().getFreeMemorySizeInBytes() + < PipeConfig.getInstance().getPipeMinimumReceiverMemory()) { + return new TPipeTransferResp( + RpcUtils.getStatus( + TSStatusCode.PIPE_HANDSHAKE_ERROR.getStatusCode(), + "The receiver memory is not enough to handle the handshake request from datanode.")); + } + return handleTransferHandshakeV2( PipeTransferDataNodeHandshakeV2Req.fromTPipeTransferReq(req)); - PipeDataNodeReceiverMetrics.getInstance() - .recordHandshakeDatanodeV2Timer(System.nanoTime() - startTime); - return resp; + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordHandshakeDatanodeV2Timer(System.nanoTime() - startTime); + } + } case TRANSFER_TABLET_INSERT_NODE: - resp = - handleTransferTabletInsertNode( + { + try { + return handleTransferTabletInsertNode( PipeTransferTabletInsertNodeReq.fromTPipeTransferReq(req)); - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferTabletInsertNodeTimer(System.nanoTime() - startTime); - return resp; + + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferTabletInsertNodeTimer(System.nanoTime() - startTime); + } + } case TRANSFER_TABLET_RAW: - resp = handleTransferTabletRaw(PipeTransferTabletRawReq.fromTPipeTransferReq(req)); - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferTabletRawTimer(System.nanoTime() - startTime); - return resp; + { + try { + return handleTransferTabletRaw(PipeTransferTabletRawReq.fromTPipeTransferReq(req)); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferTabletRawTimer(System.nanoTime() - startTime); + } + } case TRANSFER_TABLET_BINARY: - resp = - handleTransferTabletBinary(PipeTransferTabletBinaryReq.fromTPipeTransferReq(req)); - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferTabletBinaryTimer(System.nanoTime() - startTime); - return resp; + { + try { + return handleTransferTabletBinary( + PipeTransferTabletBinaryReq.fromTPipeTransferReq(req)); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferTabletBinaryTimer(System.nanoTime() - startTime); + } + } case TRANSFER_TABLET_BATCH: - resp = handleTransferTabletBatch(PipeTransferTabletBatchReq.fromTPipeTransferReq(req)); - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferTabletBatchTimer(System.nanoTime() - startTime); - return resp; + { + try { + return handleTransferTabletBatch( + PipeTransferTabletBatchReq.fromTPipeTransferReq(req)); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferTabletBatchTimer(System.nanoTime() - startTime); + } + } case TRANSFER_TS_FILE_PIECE: - resp = - handleTransferFilePiece( + { + try { + return handleTransferFilePiece( PipeTransferTsFilePieceReq.fromTPipeTransferReq(req), req instanceof AirGapPseudoTPipeTransferRequest, true); - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferTsFilePieceTimer(System.nanoTime() - startTime); - return resp; + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferTsFilePieceTimer(System.nanoTime() - startTime); + } + } case TRANSFER_TS_FILE_SEAL: - resp = handleTransferFileSealV1(PipeTransferTsFileSealReq.fromTPipeTransferReq(req)); - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferTsFileSealTimer(System.nanoTime() - startTime); - return resp; + { + try { + return handleTransferFileSealV1( + PipeTransferTsFileSealReq.fromTPipeTransferReq(req)); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferTsFileSealTimer(System.nanoTime() - startTime); + } + } case TRANSFER_TS_FILE_PIECE_WITH_MOD: - resp = - handleTransferFilePiece( + { + try { + return handleTransferFilePiece( PipeTransferTsFilePieceWithModReq.fromTPipeTransferReq(req), req instanceof AirGapPseudoTPipeTransferRequest, false); - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferTsFilePieceWithModTimer(System.nanoTime() - startTime); - return resp; + + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferTsFilePieceWithModTimer(System.nanoTime() - startTime); + } + } case TRANSFER_TS_FILE_SEAL_WITH_MOD: - resp = - handleTransferFileSealV2( + { + try { + return handleTransferFileSealV2( PipeTransferTsFileSealWithModReq.fromTPipeTransferReq(req)); - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferTsFileSealWithModTimer(System.nanoTime() - startTime); - return resp; + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferTsFileSealWithModTimer(System.nanoTime() - startTime); + } + } case TRANSFER_SCHEMA_PLAN: - resp = handleTransferSchemaPlan(PipeTransferPlanNodeReq.fromTPipeTransferReq(req)); - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferSchemaPlanTimer(System.nanoTime() - startTime); - return resp; + { + try { + return handleTransferSchemaPlan(PipeTransferPlanNodeReq.fromTPipeTransferReq(req)); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferSchemaPlanTimer(System.nanoTime() - startTime); + } + } case TRANSFER_SCHEMA_SNAPSHOT_PIECE: - resp = - handleTransferFilePiece( + { + try { + return handleTransferFilePiece( PipeTransferSchemaSnapshotPieceReq.fromTPipeTransferReq(req), req instanceof AirGapPseudoTPipeTransferRequest, false); - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferSchemaSnapshotPieceTimer(System.nanoTime() - startTime); - return resp; + + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferSchemaSnapshotPieceTimer(System.nanoTime() - startTime); + } + } case TRANSFER_SCHEMA_SNAPSHOT_SEAL: - resp = - handleTransferFileSealV2( + { + try { + return handleTransferFileSealV2( PipeTransferSchemaSnapshotSealReq.fromTPipeTransferReq(req)); - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferSchemaSnapshotSealTimer(System.nanoTime() - startTime); - return resp; + + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferSchemaSnapshotSealTimer(System.nanoTime() - startTime); + } + } case HANDSHAKE_CONFIGNODE_V1: case HANDSHAKE_CONFIGNODE_V2: case TRANSFER_CONFIG_PLAN: case TRANSFER_CONFIG_SNAPSHOT_PIECE: case TRANSFER_CONFIG_SNAPSHOT_SEAL: - // Config requests will first be received by the DataNode receiver, - // then transferred to ConfigNode receiver to execute. - resp = handleTransferConfigPlan(req); - return resp; + { + try { + // Config requests will first be received by the DataNode receiver, + // then transferred to ConfigNode receiver to execute. + return handleTransferConfigPlan(req); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferConfigPlanTimer(System.nanoTime() - startTime); + } + } + case TRANSFER_SLICE: + { + try { + return handleTransferSlice(PipeTransferSliceReq.fromTPipeTransferReq(req)); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferSliceTimer(System.nanoTime() - startTime); + } + } case TRANSFER_COMPRESSED: - resp = receive(PipeTransferCompressedReq.fromTPipeTransferReq(req)); - return resp; + { + try { + return receive(PipeTransferCompressedReq.fromTPipeTransferReq(req)); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferCompressedTimer(System.nanoTime() - startTime); + } + } default: break; } @@ -304,10 +414,10 @@ private TPipeTransferResp handleTransferTabletBatch(final PipeTransferTabletBatc Stream.of( statementPair.getLeft().isEmpty() ? RpcUtils.SUCCESS_STATUS - : executeStatementAndAddRedirectInfo(statementPair.getLeft()), + : executeBatchStatementAndAddRedirectInfo(statementPair.getLeft()), statementPair.getRight().isEmpty() ? RpcUtils.SUCCESS_STATUS - : executeStatementAndAddRedirectInfo(statementPair.getRight())) + : executeBatchStatementAndAddRedirectInfo(statementPair.getRight())) .collect(Collectors.toList()))); } @@ -316,16 +426,37 @@ protected String getClusterId() { return IoTDBDescriptor.getInstance().getConfig().getClusterId(); } + @Override + protected boolean shouldLogin() { + // The idle time is updated per request + final IClientSession clientSession = SESSION_MANAGER.getCurrSessionAndUpdateIdleTime(); + return clientSession == null || !clientSession.isLogin() || super.shouldLogin(); + } + @Override protected String getReceiverFileBaseDir() throws DiskSpaceInsufficientException { // Get next receiver file base dir by folder manager return Objects.isNull(folderManager) ? null : folderManager.getNextFolder(); } + @Override + protected String getSenderHost() { + final IClientSession session = SESSION_MANAGER.getCurrSession(); + return session != null ? session.getClientAddress() : "unknown"; + } + + @Override + protected String getSenderPort() { + final IClientSession session = SESSION_MANAGER.getCurrSession(); + return session != null ? String.valueOf(session.getClientPort()) : "unknown"; + } + @Override protected TSStatus loadFileV1(final PipeTransferFileSealReqV1 req, final String fileAbsolutePath) - throws FileNotFoundException { - return loadTsFile(fileAbsolutePath); + throws IOException { + return isUsingAsyncLoadTsFileStrategy.get() + ? loadTsFileAsync(Collections.singletonList(fileAbsolutePath)) + : loadTsFileSync(fileAbsolutePath); } @Override @@ -334,15 +465,25 @@ protected TSStatus loadFileV2( throws IOException, IllegalPathException { return req instanceof PipeTransferTsFileSealWithModReq // TsFile's absolute path will be the second element - ? loadTsFile(fileAbsolutePaths.get(1)) + ? (isUsingAsyncLoadTsFileStrategy.get() + ? loadTsFileAsync(fileAbsolutePaths) + : loadTsFileSync(fileAbsolutePaths.get(1))) : loadSchemaSnapShot(req.getParameters(), fileAbsolutePaths); } - private TSStatus loadTsFile(final String fileAbsolutePath) throws FileNotFoundException { + private TSStatus loadTsFileAsync(final List absolutePaths) throws IOException { + if (!ActiveLoadUtil.loadFilesToActiveDir(null, absolutePaths, true)) { + throw new PipeException("Load active listening pipe dir is not set."); + } + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } + + private TSStatus loadTsFileSync(final String fileAbsolutePath) throws FileNotFoundException { final LoadTsFileStatement statement = new LoadTsFileStatement(fileAbsolutePath); statement.setDeleteAfterLoad(true); - statement.setVerifySchema(true); + statement.setConvertOnTypeMismatch(true); + statement.setVerifySchema(validateTsFile.get()); statement.setAutoCreateDatabase(false); return executeStatementAndClassifyExceptions(statement); @@ -388,13 +529,25 @@ private TSStatus loadSchemaSnapShot( private TPipeTransferResp handleTransferSchemaPlan(final PipeTransferPlanNodeReq req) { // We may be able to skip the alter logical view's exception parsing because // the "AlterLogicalViewNode" is itself idempotent - return req.getPlanNode() instanceof AlterLogicalViewNode - ? new TPipeTransferResp( - ClusterConfigTaskExecutor.getInstance() - .alterLogicalViewByPipe((AlterLogicalViewNode) req.getPlanNode())) - : new TPipeTransferResp( - executeStatementAndClassifyExceptions( - PLAN_TO_STATEMENT_VISITOR.process(req.getPlanNode(), null))); + if (req.getPlanNode() instanceof AlterLogicalViewNode) { + final TSStatus status = + ((AlterLogicalViewNode) req.getPlanNode()).checkPermissionBeforeProcess(username); + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + LOGGER.warn( + "Receiver id = {}: Failed to check authority for statement {}, username = {}, response = {}.", + receiverId.get(), + StatementType.ALTER_LOGICAL_VIEW.name(), + username, + status); + return new TPipeTransferResp(status); + } + return new TPipeTransferResp( + ClusterConfigTaskExecutor.getInstance() + .alterLogicalViewByPipe((AlterLogicalViewNode) req.getPlanNode())); + } + return new TPipeTransferResp( + executeStatementAndClassifyExceptions( + PLAN_TO_STATEMENT_VISITOR.process(req.getPlanNode(), null))); } private TPipeTransferResp handleTransferConfigPlan(final TPipeTransferReq req) { @@ -415,6 +568,25 @@ private String getConfigReceiverId() { return configReceiverId.get(); } + private TPipeTransferResp handleTransferSlice(final PipeTransferSliceReq pipeTransferSliceReq) { + final boolean isInorder = sliceReqHandler.receiveSlice(pipeTransferSliceReq); + if (!isInorder) { + return new TPipeTransferResp( + RpcUtils.getStatus( + TSStatusCode.PIPE_TRANSFER_SLICE_OUT_OF_ORDER, + "Slice request is out of order, please check the request sequence.")); + } + final Optional req = sliceReqHandler.makeReqIfComplete(); + if (!req.isPresent()) { + return new TPipeTransferResp( + RpcUtils.getStatus( + TSStatusCode.SUCCESS_STATUS, + "Slice received, waiting for more slices to complete the request.")); + } + // sliceReqHandler will be cleared in the receive(req) method + return receive(req.get()); + } + /** * For {@link InsertRowsStatement} and {@link InsertMultiTabletsStatement}, the returned {@link * TSStatus} will use sub-status to record the endpoint for redirection. Each sub-status records @@ -423,8 +595,8 @@ private String getConfigReceiverId() { * request. So for each sub-status which needs to redirect, we record the device path using the * message field. */ - private TSStatus executeStatementAndAddRedirectInfo(final InsertBaseStatement statement) { - final TSStatus result = executeStatementAndClassifyExceptions(statement); + private TSStatus executeBatchStatementAndAddRedirectInfo(final InsertBaseStatement statement) { + final TSStatus result = executeStatementAndClassifyExceptions(statement, 5); if (result.getCode() == TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode() && result.getSubStatusSize() > 0) { @@ -460,8 +632,51 @@ private TSStatus executeStatementAndAddRedirectInfo(final InsertBaseStatement st } private TSStatus executeStatementAndClassifyExceptions(final Statement statement) { + return executeStatementAndClassifyExceptions(statement, 1); + } + + private TSStatus executeStatementAndClassifyExceptions( + final Statement statement, final int tryCount) { + long estimatedMemory = 0L; try { - final TSStatus result = executeStatement(statement); + if (statement instanceof InsertBaseStatement) { + estimatedMemory = ((InsertBaseStatement) statement).ramBytesUsed(); + for (int i = 0; i < tryCount; ++i) { + try { + allocatedMemoryBlock = + PipeDataNodeResourceManager.memory() + .forceAllocate( + (long) + (estimatedMemory + * PipeConfig.getInstance() + .getPipeReceiverActualToEstimatedMemoryRatio())); + break; + } catch (final PipeRuntimeOutOfMemoryCriticalException e) { + if (i == tryCount - 1) { + final String message = + String.format( + "Temporarily out of memory when executing statement %s, Requested memory: %s, " + + "used memory: %s, free memory: %s, total non-floating memory: %s", + statement, + estimatedMemory + * PipeConfig.getInstance().getPipeReceiverActualToEstimatedMemoryRatio(), + PipeDataNodeResourceManager.memory().getUsedMemorySizeInBytes(), + PipeDataNodeResourceManager.memory().getFreeMemorySizeInBytes(), + PipeDataNodeResourceManager.memory().getTotalNonFloatingMemorySizeInBytes()); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Receiver id = {}: {}", receiverId.get(), message, e); + } + return new TSStatus( + TSStatusCode.PIPE_RECEIVER_TEMPORARY_UNAVAILABLE_EXCEPTION.getStatusCode()) + .setMessage(message); + } else { + Thread.sleep(100L * (i + 1)); + } + } + } + } + + final TSStatus result = executeStatementWithRetryOnDataTypeMismatch(statement); if (result.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode() || result.getCode() == TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()) { return result; @@ -480,28 +695,83 @@ private TSStatus executeStatementAndClassifyExceptions(final Statement statement statement, e); return statement.accept(STATEMENT_EXCEPTION_VISITOR, e); + } finally { + if (Objects.nonNull(allocatedMemoryBlock)) { + allocatedMemoryBlock.close(); + allocatedMemoryBlock = null; + } } } - private TSStatus executeStatement(Statement statement) { + private TSStatus executeStatementWithRetryOnDataTypeMismatch(final Statement statement) { if (statement == null) { return RpcUtils.getStatus( TSStatusCode.PIPE_TRANSFER_EXECUTE_STATEMENT_ERROR, "Execute null statement."); } - statement = new PipeEnrichedStatement(statement); - - final ExecutionResult result = - Coordinator.getInstance() - .executeForTreeModel( - statement, - SessionManager.getInstance().requestQueryId(), - new SessionInfo(0, AuthorityChecker.SUPER_USER, ZoneId.systemDefault()), - "", - ClusterPartitionFetcher.getInstance(), - ClusterSchemaFetcher.getInstance(), - IoTDBDescriptor.getInstance().getConfig().getQueryTimeoutThreshold()); - return result.status; + final TSStatus status = executeStatement(statement); + + // Try to convert data type if the statement is a tree model statement + // and the status code is not success + return shouldConvertDataTypeOnTypeMismatch + && ((statement instanceof InsertBaseStatement + && ((InsertBaseStatement) statement).hasFailedMeasurements()) + || (status.getCode() != TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode() + && status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode())) + ? statement.accept(statementDataTypeConvertExecutionVisitor, status).orElse(status) + : status; + } + + private TSStatus executeStatement(final Statement statement) { + // Permission check + final TSStatus loginStatus = loginIfNecessary(); + if (loginStatus.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return loginStatus; + } + + final IClientSession clientSession = SESSION_MANAGER.getCurrSession(); + + final TSStatus status = AuthorityChecker.checkAuthority(statement, clientSession); + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + LOGGER.warn( + "Receiver id = {}: Failed to check authority for statement {}, username = {}, response = {}.", + receiverId.get(), + statement.getType().name(), + username, + status); + return RpcUtils.getStatus(status.getCode(), status.getMessage()); + } + + return Coordinator.getInstance() + .executeForTreeModel( + shouldMarkAsPipeRequest.get() ? new PipeEnrichedStatement(statement) : statement, + SessionManager.getInstance().requestQueryId(), + SESSION_MANAGER.getSessionInfo(clientSession), + "", + ClusterPartitionFetcher.getInstance(), + ClusterSchemaFetcher.getInstance(), + IoTDBDescriptor.getInstance().getConfig().getQueryTimeoutThreshold(), + false) + .status; + } + + @Override + protected TSStatus login() { + final IClientSession session = SESSION_MANAGER.getCurrSession(); + + if (session != null && !session.isLogin()) { + final BasicOpenSessionResp openSessionResp = + SESSION_MANAGER.login( + session, + username, + password, + ZoneId.systemDefault().toString(), + SessionManager.CURRENT_RPC_VERSION, + IoTDBConstant.ClientVersion.V_1_0); + return RpcUtils.getStatus(openSessionResp.getCode(), openSessionResp.message); + } + + return AuthorityChecker.checkUser(username, password); } @Override @@ -516,4 +786,13 @@ public synchronized void handleExit() { super.handleExit(); } + + @Override + protected void closeSession() { + final IClientSession session = SESSION_MANAGER.getCurrSession(); + if (session != null) { + SESSION_MANAGER.closeSession(session, Coordinator.getInstance()::cleanupQueryExecution); + } + SESSION_MANAGER.removeCurrSession(); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiverAgent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiverAgent.java index e0016eceac2ed..8a995f7656d93 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiverAgent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiverAgent.java @@ -19,9 +19,9 @@ package org.apache.iotdb.db.pipe.receiver.protocol.thrift; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.IoTDBConnectorRequestVersion; import org.apache.iotdb.commons.pipe.receiver.IoTDBReceiver; import org.apache.iotdb.commons.pipe.receiver.IoTDBReceiverAgent; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.IoTDBSinkRequestVersion; import org.apache.iotdb.db.pipe.processor.twostage.exchange.receiver.TwoStageAggregateReceiver; public class IoTDBDataNodeReceiverAgent extends IoTDBReceiverAgent { @@ -31,9 +31,9 @@ public class IoTDBDataNodeReceiverAgent extends IoTDBReceiverAgent { @Override protected void initConstructors() { RECEIVER_CONSTRUCTORS.put( - IoTDBConnectorRequestVersion.VERSION_1.getVersion(), IoTDBDataNodeReceiver::new); + IoTDBSinkRequestVersion.VERSION_1.getVersion(), IoTDBDataNodeReceiver::new); RECEIVER_CONSTRUCTORS.put( - IoTDBConnectorRequestVersion.VERSION_2.getVersion(), TwoStageAggregateReceiver::new); + IoTDBSinkRequestVersion.VERSION_2.getVersion(), TwoStageAggregateReceiver::new); } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/transform/converter/ArrayConverter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/transform/converter/ArrayConverter.java new file mode 100644 index 0000000000000..3d5a9b0cfb615 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/transform/converter/ArrayConverter.java @@ -0,0 +1,940 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.receiver.transform.converter; + +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.utils.Binary; + +public class ArrayConverter { + + @FunctionalInterface + private interface Converter { + Object convert( + final TSDataType sourceDataType, + final TSDataType targetDataType, + final Object sourceValues); + } + + private static final Converter[][] CONVERTER = + new Converter[TSDataType.values().length][TSDataType.values().length]; + + private static final Converter DO_NOTHING_CONVERTER = + (sourceDataType, targetDataType, sourceValues) -> sourceValues; + + static { + for (final TSDataType sourceDataType : TSDataType.values()) { + for (final TSDataType targetDataType : TSDataType.values()) { + CONVERTER[sourceDataType.ordinal()][targetDataType.ordinal()] = DO_NOTHING_CONVERTER; + } + } + + // BOOLEAN + CONVERTER[TSDataType.BOOLEAN.ordinal()][TSDataType.BOOLEAN.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> sourceValues; + CONVERTER[TSDataType.BOOLEAN.ordinal()][TSDataType.INT32.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final boolean[] boolValues = (boolean[]) sourceValues; + final int[] intValues = new int[boolValues.length]; + for (int i = 0; i < boolValues.length; i++) { + intValues[i] = ValueConverter.convertBooleanToInt32(boolValues[i]); + } + return intValues; + }; + CONVERTER[TSDataType.BOOLEAN.ordinal()][TSDataType.INT64.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final boolean[] boolValues = (boolean[]) sourceValues; + final long[] longValues = new long[boolValues.length]; + for (int i = 0; i < boolValues.length; i++) { + longValues[i] = ValueConverter.convertBooleanToInt64(boolValues[i]); + } + return longValues; + }; + CONVERTER[TSDataType.BOOLEAN.ordinal()][TSDataType.FLOAT.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final boolean[] boolValues = (boolean[]) sourceValues; + final float[] floatValues = new float[boolValues.length]; + for (int i = 0; i < boolValues.length; i++) { + floatValues[i] = ValueConverter.convertBooleanToFloat(boolValues[i]); + } + return floatValues; + }; + CONVERTER[TSDataType.BOOLEAN.ordinal()][TSDataType.DOUBLE.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final boolean[] boolValues = (boolean[]) sourceValues; + final double[] doubleValues = new double[boolValues.length]; + for (int i = 0; i < boolValues.length; i++) { + doubleValues[i] = ValueConverter.convertBooleanToDouble(boolValues[i]); + } + return doubleValues; + }; + CONVERTER[TSDataType.BOOLEAN.ordinal()][TSDataType.TEXT.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final boolean[] boolValues = (boolean[]) sourceValues; + final Binary[] textValues = new Binary[boolValues.length]; + for (int i = 0; i < boolValues.length; i++) { + textValues[i] = ValueConverter.convertBooleanToText(boolValues[i]); + } + return textValues; + }; + CONVERTER[TSDataType.BOOLEAN.ordinal()][TSDataType.VECTOR.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.BOOLEAN.ordinal()][TSDataType.UNKNOWN.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.BOOLEAN.ordinal()][TSDataType.TIMESTAMP.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final boolean[] boolValues = (boolean[]) sourceValues; + final long[] timestampValues = new long[boolValues.length]; + for (int i = 0; i < boolValues.length; i++) { + timestampValues[i] = ValueConverter.convertBooleanToTimestamp(boolValues[i]); + } + return timestampValues; + }; + CONVERTER[TSDataType.BOOLEAN.ordinal()][TSDataType.DATE.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final boolean[] boolValues = (boolean[]) sourceValues; + final int[] dateValues = new int[boolValues.length]; + for (int i = 0; i < boolValues.length; i++) { + dateValues[i] = ValueConverter.convertBooleanToDate(boolValues[i]); + } + return dateValues; + }; + CONVERTER[TSDataType.BOOLEAN.ordinal()][TSDataType.BLOB.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final boolean[] boolValues = (boolean[]) sourceValues; + final Binary[] blobValues = new Binary[boolValues.length]; + for (int i = 0; i < boolValues.length; i++) { + blobValues[i] = ValueConverter.convertBooleanToBlob(boolValues[i]); + } + return blobValues; + }; + CONVERTER[TSDataType.BOOLEAN.ordinal()][TSDataType.STRING.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final boolean[] boolValues = (boolean[]) sourceValues; + final Binary[] stringValues = new Binary[boolValues.length]; + for (int i = 0; i < boolValues.length; i++) { + stringValues[i] = ValueConverter.convertBooleanToString(boolValues[i]); + } + return stringValues; + }; + + // INT32 + CONVERTER[TSDataType.INT32.ordinal()][TSDataType.BOOLEAN.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final int[] intValues = (int[]) sourceValues; + final boolean[] boolValues = new boolean[intValues.length]; + for (int i = 0; i < intValues.length; i++) { + boolValues[i] = ValueConverter.convertInt32ToBoolean(intValues[i]); + } + return boolValues; + }; + CONVERTER[TSDataType.INT32.ordinal()][TSDataType.INT32.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> sourceValues; + CONVERTER[TSDataType.INT32.ordinal()][TSDataType.INT64.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final int[] intValues = (int[]) sourceValues; + final long[] longValues = new long[intValues.length]; + for (int i = 0; i < intValues.length; i++) { + longValues[i] = ValueConverter.convertInt32ToInt64(intValues[i]); + } + return longValues; + }; + CONVERTER[TSDataType.INT32.ordinal()][TSDataType.FLOAT.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final int[] intValues = (int[]) sourceValues; + final float[] floatValues = new float[intValues.length]; + for (int i = 0; i < intValues.length; i++) { + floatValues[i] = ValueConverter.convertInt32ToFloat(intValues[i]); + } + return floatValues; + }; + CONVERTER[TSDataType.INT32.ordinal()][TSDataType.DOUBLE.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final int[] intValues = (int[]) sourceValues; + final double[] doubleValues = new double[intValues.length]; + for (int i = 0; i < intValues.length; i++) { + doubleValues[i] = ValueConverter.convertInt32ToDouble(intValues[i]); + } + return doubleValues; + }; + CONVERTER[TSDataType.INT32.ordinal()][TSDataType.TEXT.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final int[] intValues = (int[]) sourceValues; + final Binary[] textValues = new Binary[intValues.length]; + for (int i = 0; i < intValues.length; i++) { + textValues[i] = ValueConverter.convertInt32ToText(intValues[i]); + } + return textValues; + }; + CONVERTER[TSDataType.INT32.ordinal()][TSDataType.VECTOR.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.INT32.ordinal()][TSDataType.UNKNOWN.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.INT32.ordinal()][TSDataType.TIMESTAMP.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final int[] intValues = (int[]) sourceValues; + final long[] timestampValues = new long[intValues.length]; + for (int i = 0; i < intValues.length; i++) { + timestampValues[i] = ValueConverter.convertInt32ToTimestamp(intValues[i]); + } + return timestampValues; + }; + CONVERTER[TSDataType.INT32.ordinal()][TSDataType.DATE.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final int[] intValues = (int[]) sourceValues; + final int[] dateValues = new int[intValues.length]; + for (int i = 0; i < intValues.length; i++) { + dateValues[i] = ValueConverter.convertInt32ToDate(intValues[i]); + } + return dateValues; + }; + CONVERTER[TSDataType.INT32.ordinal()][TSDataType.BLOB.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final int[] intValues = (int[]) sourceValues; + final Binary[] blobValues = new Binary[intValues.length]; + for (int i = 0; i < intValues.length; i++) { + blobValues[i] = ValueConverter.convertInt32ToBlob(intValues[i]); + } + return blobValues; + }; + CONVERTER[TSDataType.INT32.ordinal()][TSDataType.STRING.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final int[] intValues = (int[]) sourceValues; + final Binary[] stringValues = new Binary[intValues.length]; + for (int i = 0; i < intValues.length; i++) { + stringValues[i] = ValueConverter.convertInt32ToString(intValues[i]); + } + return stringValues; + }; + + // INT64 + CONVERTER[TSDataType.INT64.ordinal()][TSDataType.BOOLEAN.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final long[] longValues = (long[]) sourceValues; + final boolean[] boolValues = new boolean[longValues.length]; + for (int i = 0; i < longValues.length; i++) { + boolValues[i] = ValueConverter.convertInt64ToBoolean(longValues[i]); + } + return boolValues; + }; + CONVERTER[TSDataType.INT64.ordinal()][TSDataType.INT32.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final long[] longValues = (long[]) sourceValues; + final int[] intValues = new int[longValues.length]; + for (int i = 0; i < longValues.length; i++) { + intValues[i] = ValueConverter.convertInt64ToInt32(longValues[i]); + } + return intValues; + }; + CONVERTER[TSDataType.INT64.ordinal()][TSDataType.INT64.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> sourceValues; + CONVERTER[TSDataType.INT64.ordinal()][TSDataType.FLOAT.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final long[] longValues = (long[]) sourceValues; + final float[] floatValues = new float[longValues.length]; + for (int i = 0; i < longValues.length; i++) { + floatValues[i] = ValueConverter.convertInt64ToFloat(longValues[i]); + } + return floatValues; + }; + CONVERTER[TSDataType.INT64.ordinal()][TSDataType.DOUBLE.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final long[] longValues = (long[]) sourceValues; + final double[] doubleValues = new double[longValues.length]; + for (int i = 0; i < longValues.length; i++) { + doubleValues[i] = ValueConverter.convertInt64ToDouble(longValues[i]); + } + return doubleValues; + }; + CONVERTER[TSDataType.INT64.ordinal()][TSDataType.TEXT.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final long[] longValues = (long[]) sourceValues; + final Binary[] textValues = new Binary[longValues.length]; + for (int i = 0; i < longValues.length; i++) { + textValues[i] = ValueConverter.convertInt64ToText(longValues[i]); + } + return textValues; + }; + CONVERTER[TSDataType.INT64.ordinal()][TSDataType.VECTOR.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.INT64.ordinal()][TSDataType.UNKNOWN.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.INT64.ordinal()][TSDataType.TIMESTAMP.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final long[] longValues = (long[]) sourceValues; + final long[] timestampValues = new long[longValues.length]; + for (int i = 0; i < longValues.length; i++) { + timestampValues[i] = ValueConverter.convertInt64ToTimestamp(longValues[i]); + } + return timestampValues; + }; + CONVERTER[TSDataType.INT64.ordinal()][TSDataType.DATE.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final long[] longValues = (long[]) sourceValues; + final int[] dateValues = new int[longValues.length]; + for (int i = 0; i < longValues.length; i++) { + dateValues[i] = ValueConverter.convertInt64ToDate(longValues[i]); + } + return dateValues; + }; + CONVERTER[TSDataType.INT64.ordinal()][TSDataType.BLOB.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final long[] longValues = (long[]) sourceValues; + final Binary[] blobValues = new Binary[longValues.length]; + for (int i = 0; i < longValues.length; i++) { + blobValues[i] = ValueConverter.convertInt64ToBlob(longValues[i]); + } + return blobValues; + }; + CONVERTER[TSDataType.INT64.ordinal()][TSDataType.STRING.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final long[] longValues = (long[]) sourceValues; + final Binary[] stringValues = new Binary[longValues.length]; + for (int i = 0; i < longValues.length; i++) { + stringValues[i] = ValueConverter.convertInt64ToString(longValues[i]); + } + return stringValues; + }; + + // FLOAT + CONVERTER[TSDataType.FLOAT.ordinal()][TSDataType.BOOLEAN.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final float[] floatValues = (float[]) sourceValues; + final boolean[] boolValues = new boolean[floatValues.length]; + for (int i = 0; i < floatValues.length; i++) { + boolValues[i] = ValueConverter.convertFloatToBoolean(floatValues[i]); + } + return boolValues; + }; + CONVERTER[TSDataType.FLOAT.ordinal()][TSDataType.INT32.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final float[] floatValues = (float[]) sourceValues; + final int[] intValues = new int[floatValues.length]; + for (int i = 0; i < floatValues.length; i++) { + intValues[i] = ValueConverter.convertFloatToInt32(floatValues[i]); + } + return intValues; + }; + CONVERTER[TSDataType.FLOAT.ordinal()][TSDataType.INT64.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final float[] floatValues = (float[]) sourceValues; + final long[] longValues = new long[floatValues.length]; + for (int i = 0; i < floatValues.length; i++) { + longValues[i] = ValueConverter.convertFloatToInt64(floatValues[i]); + } + return longValues; + }; + CONVERTER[TSDataType.FLOAT.ordinal()][TSDataType.FLOAT.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> sourceValues; + CONVERTER[TSDataType.FLOAT.ordinal()][TSDataType.DOUBLE.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final float[] floatValues = (float[]) sourceValues; + final double[] doubleValues = new double[floatValues.length]; + for (int i = 0; i < floatValues.length; i++) { + doubleValues[i] = ValueConverter.convertFloatToDouble(floatValues[i]); + } + return doubleValues; + }; + CONVERTER[TSDataType.FLOAT.ordinal()][TSDataType.TEXT.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final float[] floatValues = (float[]) sourceValues; + final Binary[] textValues = new Binary[floatValues.length]; + for (int i = 0; i < floatValues.length; i++) { + textValues[i] = ValueConverter.convertFloatToText(floatValues[i]); + } + return textValues; + }; + CONVERTER[TSDataType.FLOAT.ordinal()][TSDataType.VECTOR.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.FLOAT.ordinal()][TSDataType.UNKNOWN.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.FLOAT.ordinal()][TSDataType.TIMESTAMP.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final float[] floatValues = (float[]) sourceValues; + final long[] timestampValues = new long[floatValues.length]; + for (int i = 0; i < floatValues.length; i++) { + timestampValues[i] = ValueConverter.convertFloatToTimestamp(floatValues[i]); + } + return timestampValues; + }; + CONVERTER[TSDataType.FLOAT.ordinal()][TSDataType.DATE.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final float[] floatValues = (float[]) sourceValues; + final int[] dateValues = new int[floatValues.length]; + for (int i = 0; i < floatValues.length; i++) { + dateValues[i] = ValueConverter.convertFloatToDate(floatValues[i]); + } + return dateValues; + }; + CONVERTER[TSDataType.FLOAT.ordinal()][TSDataType.BLOB.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final float[] floatValues = (float[]) sourceValues; + final Binary[] blobValues = new Binary[floatValues.length]; + for (int i = 0; i < floatValues.length; i++) { + blobValues[i] = ValueConverter.convertFloatToBlob(floatValues[i]); + } + return blobValues; + }; + CONVERTER[TSDataType.FLOAT.ordinal()][TSDataType.STRING.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final float[] floatValues = (float[]) sourceValues; + final Binary[] stringValues = new Binary[floatValues.length]; + for (int i = 0; i < floatValues.length; i++) { + stringValues[i] = ValueConverter.convertFloatToString(floatValues[i]); + } + return stringValues; + }; + + // DOUBLE + CONVERTER[TSDataType.DOUBLE.ordinal()][TSDataType.BOOLEAN.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final double[] doubleValues = (double[]) sourceValues; + final boolean[] boolValues = new boolean[doubleValues.length]; + for (int i = 0; i < doubleValues.length; i++) { + boolValues[i] = ValueConverter.convertDoubleToBoolean(doubleValues[i]); + } + return boolValues; + }; + CONVERTER[TSDataType.DOUBLE.ordinal()][TSDataType.INT32.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final double[] doubleValues = (double[]) sourceValues; + final int[] intValues = new int[doubleValues.length]; + for (int i = 0; i < doubleValues.length; i++) { + intValues[i] = ValueConverter.convertDoubleToInt32(doubleValues[i]); + } + return intValues; + }; + CONVERTER[TSDataType.DOUBLE.ordinal()][TSDataType.INT64.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final double[] doubleValues = (double[]) sourceValues; + final long[] longValues = new long[doubleValues.length]; + for (int i = 0; i < doubleValues.length; i++) { + longValues[i] = ValueConverter.convertDoubleToInt64(doubleValues[i]); + } + return longValues; + }; + CONVERTER[TSDataType.DOUBLE.ordinal()][TSDataType.FLOAT.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final double[] doubleValues = (double[]) sourceValues; + final float[] floatValues = new float[doubleValues.length]; + for (int i = 0; i < doubleValues.length; i++) { + floatValues[i] = ValueConverter.convertDoubleToFloat(doubleValues[i]); + } + return floatValues; + }; + CONVERTER[TSDataType.DOUBLE.ordinal()][TSDataType.DOUBLE.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> sourceValues; + CONVERTER[TSDataType.DOUBLE.ordinal()][TSDataType.TEXT.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final double[] doubleValues = (double[]) sourceValues; + final Binary[] textValues = new Binary[doubleValues.length]; + for (int i = 0; i < doubleValues.length; i++) { + textValues[i] = ValueConverter.convertDoubleToText(doubleValues[i]); + } + return textValues; + }; + CONVERTER[TSDataType.DOUBLE.ordinal()][TSDataType.VECTOR.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.DOUBLE.ordinal()][TSDataType.UNKNOWN.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.DOUBLE.ordinal()][TSDataType.TIMESTAMP.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final double[] doubleValues = (double[]) sourceValues; + final long[] timestampValues = new long[doubleValues.length]; + for (int i = 0; i < doubleValues.length; i++) { + timestampValues[i] = ValueConverter.convertDoubleToTimestamp(doubleValues[i]); + } + return timestampValues; + }; + CONVERTER[TSDataType.DOUBLE.ordinal()][TSDataType.DATE.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final double[] doubleValues = (double[]) sourceValues; + final int[] dateValues = new int[doubleValues.length]; + for (int i = 0; i < doubleValues.length; i++) { + dateValues[i] = ValueConverter.convertDoubleToDate(doubleValues[i]); + } + return dateValues; + }; + CONVERTER[TSDataType.DOUBLE.ordinal()][TSDataType.BLOB.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final double[] doubleValues = (double[]) sourceValues; + final Binary[] blobValues = new Binary[doubleValues.length]; + for (int i = 0; i < doubleValues.length; i++) { + blobValues[i] = ValueConverter.convertDoubleToBlob(doubleValues[i]); + } + return blobValues; + }; + CONVERTER[TSDataType.DOUBLE.ordinal()][TSDataType.STRING.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final double[] doubleValues = (double[]) sourceValues; + final Binary[] stringValues = new Binary[doubleValues.length]; + for (int i = 0; i < doubleValues.length; i++) { + stringValues[i] = ValueConverter.convertDoubleToString(doubleValues[i]); + } + return stringValues; + }; + + // TEXT + CONVERTER[TSDataType.TEXT.ordinal()][TSDataType.BOOLEAN.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] textValues = (Binary[]) sourceValues; + final boolean[] boolValues = new boolean[textValues.length]; + for (int i = 0; i < textValues.length; i++) { + boolValues[i] = ValueConverter.convertTextToBoolean(textValues[i]); + } + return boolValues; + }; + CONVERTER[TSDataType.TEXT.ordinal()][TSDataType.INT32.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] textValues = (Binary[]) sourceValues; + final int[] intValues = new int[textValues.length]; + for (int i = 0; i < textValues.length; i++) { + intValues[i] = ValueConverter.convertTextToInt32(textValues[i]); + } + return intValues; + }; + CONVERTER[TSDataType.TEXT.ordinal()][TSDataType.INT64.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] textValues = (Binary[]) sourceValues; + final long[] longValues = new long[textValues.length]; + for (int i = 0; i < textValues.length; i++) { + longValues[i] = ValueConverter.convertTextToInt64(textValues[i]); + } + return longValues; + }; + CONVERTER[TSDataType.TEXT.ordinal()][TSDataType.FLOAT.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] textValues = (Binary[]) sourceValues; + final float[] floatValues = new float[textValues.length]; + for (int i = 0; i < textValues.length; i++) { + floatValues[i] = ValueConverter.convertTextToFloat(textValues[i]); + } + return floatValues; + }; + CONVERTER[TSDataType.TEXT.ordinal()][TSDataType.DOUBLE.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] textValues = (Binary[]) sourceValues; + final double[] doubleValues = new double[textValues.length]; + for (int i = 0; i < textValues.length; i++) { + doubleValues[i] = ValueConverter.convertTextToDouble(textValues[i]); + } + return doubleValues; + }; + CONVERTER[TSDataType.TEXT.ordinal()][TSDataType.TEXT.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> sourceValues; + CONVERTER[TSDataType.TEXT.ordinal()][TSDataType.VECTOR.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.TEXT.ordinal()][TSDataType.UNKNOWN.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.TEXT.ordinal()][TSDataType.TIMESTAMP.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] textValues = (Binary[]) sourceValues; + final long[] timestampValues = new long[textValues.length]; + for (int i = 0; i < textValues.length; i++) { + timestampValues[i] = ValueConverter.convertTextToTimestamp(textValues[i]); + } + return timestampValues; + }; + CONVERTER[TSDataType.TEXT.ordinal()][TSDataType.DATE.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] textValues = (Binary[]) sourceValues; + final int[] dateValues = new int[textValues.length]; + for (int i = 0; i < textValues.length; i++) { + dateValues[i] = ValueConverter.convertTextToDate(textValues[i]); + } + return dateValues; + }; + CONVERTER[TSDataType.TEXT.ordinal()][TSDataType.BLOB.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] textValues = (Binary[]) sourceValues; + final Binary[] blobValues = new Binary[textValues.length]; + for (int i = 0; i < textValues.length; i++) { + blobValues[i] = ValueConverter.convertTextToBlob(textValues[i]); + } + return blobValues; + }; + CONVERTER[TSDataType.TEXT.ordinal()][TSDataType.STRING.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] textValues = (Binary[]) sourceValues; + final Binary[] stringValues = new Binary[textValues.length]; + for (int i = 0; i < textValues.length; i++) { + stringValues[i] = ValueConverter.convertTextToString(textValues[i]); + } + return stringValues; + }; + + // VECTOR + for (int i = 0; i < TSDataType.values().length; i++) { + CONVERTER[TSDataType.VECTOR.ordinal()][i] = DO_NOTHING_CONVERTER; + } + + // UNKNOWN + for (int i = 0; i < TSDataType.values().length; i++) { + CONVERTER[TSDataType.UNKNOWN.ordinal()][i] = DO_NOTHING_CONVERTER; + } + + // TIMESTAMP + CONVERTER[TSDataType.TIMESTAMP.ordinal()][TSDataType.BOOLEAN.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final long[] timestampValues = (long[]) sourceValues; + final boolean[] boolValues = new boolean[timestampValues.length]; + for (int i = 0; i < timestampValues.length; i++) { + boolValues[i] = ValueConverter.convertTimestampToBoolean(timestampValues[i]); + } + return boolValues; + }; + CONVERTER[TSDataType.TIMESTAMP.ordinal()][TSDataType.INT32.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final long[] timestampValues = (long[]) sourceValues; + final int[] intValues = new int[timestampValues.length]; + for (int i = 0; i < timestampValues.length; i++) { + intValues[i] = ValueConverter.convertTimestampToInt32(timestampValues[i]); + } + return intValues; + }; + CONVERTER[TSDataType.TIMESTAMP.ordinal()][TSDataType.INT64.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final long[] timestampValues = (long[]) sourceValues; + final long[] longValues = new long[timestampValues.length]; + for (int i = 0; i < timestampValues.length; i++) { + longValues[i] = ValueConverter.convertTimestampToInt64(timestampValues[i]); + } + return longValues; + }; + CONVERTER[TSDataType.TIMESTAMP.ordinal()][TSDataType.FLOAT.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final long[] timestampValues = (long[]) sourceValues; + final float[] floatValues = new float[timestampValues.length]; + for (int i = 0; i < timestampValues.length; i++) { + floatValues[i] = ValueConverter.convertTimestampToFloat(timestampValues[i]); + } + return floatValues; + }; + CONVERTER[TSDataType.TIMESTAMP.ordinal()][TSDataType.DOUBLE.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final long[] timestampValues = (long[]) sourceValues; + final double[] doubleValues = new double[timestampValues.length]; + for (int i = 0; i < timestampValues.length; i++) { + doubleValues[i] = ValueConverter.convertTimestampToDouble(timestampValues[i]); + } + return doubleValues; + }; + CONVERTER[TSDataType.TIMESTAMP.ordinal()][TSDataType.TEXT.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final long[] timestampValues = (long[]) sourceValues; + final Binary[] textValues = new Binary[timestampValues.length]; + for (int i = 0; i < timestampValues.length; i++) { + textValues[i] = ValueConverter.convertTimestampToText(timestampValues[i]); + } + return textValues; + }; + CONVERTER[TSDataType.TIMESTAMP.ordinal()][TSDataType.VECTOR.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.TIMESTAMP.ordinal()][TSDataType.UNKNOWN.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.TIMESTAMP.ordinal()][TSDataType.TIMESTAMP.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> sourceValues; + CONVERTER[TSDataType.TIMESTAMP.ordinal()][TSDataType.DATE.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final long[] timestampValues = (long[]) sourceValues; + final int[] dateValues = new int[timestampValues.length]; + for (int i = 0; i < timestampValues.length; i++) { + dateValues[i] = ValueConverter.convertTimestampToDate(timestampValues[i]); + } + return dateValues; + }; + CONVERTER[TSDataType.TIMESTAMP.ordinal()][TSDataType.BLOB.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final long[] timestampValues = (long[]) sourceValues; + final Binary[] blobValues = new Binary[timestampValues.length]; + for (int i = 0; i < timestampValues.length; i++) { + blobValues[i] = ValueConverter.convertTimestampToBlob(timestampValues[i]); + } + return blobValues; + }; + CONVERTER[TSDataType.TIMESTAMP.ordinal()][TSDataType.STRING.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final long[] timestampValues = (long[]) sourceValues; + final Binary[] stringValues = new Binary[timestampValues.length]; + for (int i = 0; i < timestampValues.length; i++) { + stringValues[i] = ValueConverter.convertTimestampToString(timestampValues[i]); + } + return stringValues; + }; + + // DATE + CONVERTER[TSDataType.DATE.ordinal()][TSDataType.BOOLEAN.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final int[] dateValues = (int[]) sourceValues; + final boolean[] boolValues = new boolean[dateValues.length]; + for (int i = 0; i < dateValues.length; i++) { + boolValues[i] = ValueConverter.convertDateToBoolean(dateValues[i]); + } + return boolValues; + }; + CONVERTER[TSDataType.DATE.ordinal()][TSDataType.INT32.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final int[] dateValues = (int[]) sourceValues; + final int[] intValues = new int[dateValues.length]; + for (int i = 0; i < dateValues.length; i++) { + intValues[i] = ValueConverter.convertDateToInt32(dateValues[i]); + } + return intValues; + }; + CONVERTER[TSDataType.DATE.ordinal()][TSDataType.INT64.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final int[] dateValues = (int[]) sourceValues; + final long[] longValues = new long[dateValues.length]; + for (int i = 0; i < dateValues.length; i++) { + longValues[i] = ValueConverter.convertDateToInt64(dateValues[i]); + } + return longValues; + }; + CONVERTER[TSDataType.DATE.ordinal()][TSDataType.FLOAT.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final int[] dateValues = (int[]) sourceValues; + final float[] floatValues = new float[dateValues.length]; + for (int i = 0; i < dateValues.length; i++) { + floatValues[i] = ValueConverter.convertDateToFloat(dateValues[i]); + } + return floatValues; + }; + CONVERTER[TSDataType.DATE.ordinal()][TSDataType.DOUBLE.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final int[] dateValues = (int[]) sourceValues; + final double[] doubleValues = new double[dateValues.length]; + for (int i = 0; i < dateValues.length; i++) { + doubleValues[i] = ValueConverter.convertDateToDouble(dateValues[i]); + } + return doubleValues; + }; + CONVERTER[TSDataType.DATE.ordinal()][TSDataType.TEXT.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final int[] dateValues = (int[]) sourceValues; + final Binary[] textValues = new Binary[dateValues.length]; + for (int i = 0; i < dateValues.length; i++) { + textValues[i] = ValueConverter.convertDateToText(dateValues[i]); + } + return textValues; + }; + CONVERTER[TSDataType.DATE.ordinal()][TSDataType.VECTOR.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.DATE.ordinal()][TSDataType.UNKNOWN.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.DATE.ordinal()][TSDataType.TIMESTAMP.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final int[] dateValues = (int[]) sourceValues; + final long[] timestampValues = new long[dateValues.length]; + for (int i = 0; i < dateValues.length; i++) { + timestampValues[i] = ValueConverter.convertDateToTimestamp(dateValues[i]); + } + return timestampValues; + }; + CONVERTER[TSDataType.DATE.ordinal()][TSDataType.DATE.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> sourceValues; + CONVERTER[TSDataType.DATE.ordinal()][TSDataType.BLOB.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final int[] dateValues = (int[]) sourceValues; + final Binary[] blobValues = new Binary[dateValues.length]; + for (int i = 0; i < dateValues.length; i++) { + blobValues[i] = ValueConverter.convertDateToBlob(dateValues[i]); + } + return blobValues; + }; + CONVERTER[TSDataType.DATE.ordinal()][TSDataType.STRING.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final int[] dateValues = (int[]) sourceValues; + final Binary[] stringValues = new Binary[dateValues.length]; + for (int i = 0; i < dateValues.length; i++) { + stringValues[i] = ValueConverter.convertDateToString(dateValues[i]); + } + return stringValues; + }; + + // BLOB + CONVERTER[TSDataType.BLOB.ordinal()][TSDataType.BOOLEAN.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] blobValues = (Binary[]) sourceValues; + final boolean[] boolValues = new boolean[blobValues.length]; + for (int i = 0; i < blobValues.length; i++) { + boolValues[i] = ValueConverter.convertBlobToBoolean(blobValues[i]); + } + return boolValues; + }; + CONVERTER[TSDataType.BLOB.ordinal()][TSDataType.INT32.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] blobValues = (Binary[]) sourceValues; + final int[] intValues = new int[blobValues.length]; + for (int i = 0; i < blobValues.length; i++) { + intValues[i] = ValueConverter.convertBlobToInt32(blobValues[i]); + } + return intValues; + }; + CONVERTER[TSDataType.BLOB.ordinal()][TSDataType.INT64.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] blobValues = (Binary[]) sourceValues; + final long[] longValues = new long[blobValues.length]; + for (int i = 0; i < blobValues.length; i++) { + longValues[i] = ValueConverter.convertBlobToInt64(blobValues[i]); + } + return longValues; + }; + CONVERTER[TSDataType.BLOB.ordinal()][TSDataType.FLOAT.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] blobValues = (Binary[]) sourceValues; + final float[] floatValues = new float[blobValues.length]; + for (int i = 0; i < blobValues.length; i++) { + floatValues[i] = ValueConverter.convertBlobToFloat(blobValues[i]); + } + return floatValues; + }; + CONVERTER[TSDataType.BLOB.ordinal()][TSDataType.DOUBLE.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] blobValues = (Binary[]) sourceValues; + final double[] doubleValues = new double[blobValues.length]; + for (int i = 0; i < blobValues.length; i++) { + doubleValues[i] = ValueConverter.convertBlobToDouble(blobValues[i]); + } + return doubleValues; + }; + CONVERTER[TSDataType.BLOB.ordinal()][TSDataType.TEXT.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] blobValues = (Binary[]) sourceValues; + final Binary[] textValues = new Binary[blobValues.length]; + for (int i = 0; i < blobValues.length; i++) { + textValues[i] = ValueConverter.convertBlobToText(blobValues[i]); + } + return textValues; + }; + CONVERTER[TSDataType.BLOB.ordinal()][TSDataType.VECTOR.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.BLOB.ordinal()][TSDataType.UNKNOWN.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.BLOB.ordinal()][TSDataType.TIMESTAMP.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] blobValues = (Binary[]) sourceValues; + final long[] timestampValues = new long[blobValues.length]; + for (int i = 0; i < blobValues.length; i++) { + timestampValues[i] = ValueConverter.convertBlobToTimestamp(blobValues[i]); + } + return timestampValues; + }; + CONVERTER[TSDataType.BLOB.ordinal()][TSDataType.DATE.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] blobValues = (Binary[]) sourceValues; + final int[] dateValues = new int[blobValues.length]; + for (int i = 0; i < blobValues.length; i++) { + dateValues[i] = ValueConverter.convertBlobToDate(blobValues[i]); + } + return dateValues; + }; + CONVERTER[TSDataType.BLOB.ordinal()][TSDataType.BLOB.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> sourceValues; + CONVERTER[TSDataType.BLOB.ordinal()][TSDataType.STRING.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] blobValues = (Binary[]) sourceValues; + final Binary[] stringValues = new Binary[blobValues.length]; + for (int i = 0; i < blobValues.length; i++) { + stringValues[i] = ValueConverter.convertBlobToString(blobValues[i]); + } + return stringValues; + }; + + // STRING + CONVERTER[TSDataType.STRING.ordinal()][TSDataType.BOOLEAN.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] stringValues = (Binary[]) sourceValues; + final boolean[] boolValues = new boolean[stringValues.length]; + for (int i = 0; i < stringValues.length; i++) { + boolValues[i] = ValueConverter.convertStringToBoolean(stringValues[i]); + } + return boolValues; + }; + CONVERTER[TSDataType.STRING.ordinal()][TSDataType.INT32.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] stringValues = (Binary[]) sourceValues; + final int[] intValues = new int[stringValues.length]; + for (int i = 0; i < stringValues.length; i++) { + intValues[i] = ValueConverter.convertStringToInt32(stringValues[i]); + } + return intValues; + }; + CONVERTER[TSDataType.STRING.ordinal()][TSDataType.INT64.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] stringValues = (Binary[]) sourceValues; + final long[] longValues = new long[stringValues.length]; + for (int i = 0; i < stringValues.length; i++) { + longValues[i] = ValueConverter.convertStringToInt64(stringValues[i]); + } + return longValues; + }; + CONVERTER[TSDataType.STRING.ordinal()][TSDataType.FLOAT.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] stringValues = (Binary[]) sourceValues; + final float[] floatValues = new float[stringValues.length]; + for (int i = 0; i < stringValues.length; i++) { + floatValues[i] = ValueConverter.convertStringToFloat(stringValues[i]); + } + return floatValues; + }; + CONVERTER[TSDataType.STRING.ordinal()][TSDataType.DOUBLE.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] stringValues = (Binary[]) sourceValues; + final double[] doubleValues = new double[stringValues.length]; + for (int i = 0; i < stringValues.length; i++) { + doubleValues[i] = ValueConverter.convertStringToDouble(stringValues[i]); + } + return doubleValues; + }; + CONVERTER[TSDataType.STRING.ordinal()][TSDataType.TEXT.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] stringValues = (Binary[]) sourceValues; + final Binary[] textValues = new Binary[stringValues.length]; + for (int i = 0; i < stringValues.length; i++) { + textValues[i] = ValueConverter.convertStringToText(stringValues[i]); + } + return textValues; + }; + CONVERTER[TSDataType.STRING.ordinal()][TSDataType.VECTOR.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.STRING.ordinal()][TSDataType.UNKNOWN.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.STRING.ordinal()][TSDataType.TIMESTAMP.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] stringValues = (Binary[]) sourceValues; + final long[] timestampValues = new long[stringValues.length]; + for (int i = 0; i < stringValues.length; i++) { + timestampValues[i] = ValueConverter.convertStringToTimestamp(stringValues[i]); + } + return timestampValues; + }; + CONVERTER[TSDataType.STRING.ordinal()][TSDataType.DATE.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] stringValues = (Binary[]) sourceValues; + final int[] dateValues = new int[stringValues.length]; + for (int i = 0; i < stringValues.length; i++) { + dateValues[i] = ValueConverter.convertStringToDate(stringValues[i]); + } + return dateValues; + }; + CONVERTER[TSDataType.STRING.ordinal()][TSDataType.BLOB.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> { + final Binary[] stringValues = (Binary[]) sourceValues; + final Binary[] blobValues = new Binary[stringValues.length]; + for (int i = 0; i < stringValues.length; i++) { + blobValues[i] = ValueConverter.convertStringToBlob(stringValues[i]); + } + return blobValues; + }; + CONVERTER[TSDataType.STRING.ordinal()][TSDataType.STRING.ordinal()] = + (sourceDataType, targetDataType, sourceValues) -> sourceValues; + } + + public static Object convert( + final TSDataType sourceDataType, final TSDataType targetDataType, final Object sourceValues) { + return sourceValues == null + ? null + : CONVERTER[sourceDataType.ordinal()][targetDataType.ordinal()].convert( + sourceDataType, targetDataType, sourceValues); + } + + private ArrayConverter() { + // forbidden to construct + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/transform/converter/ValueConverter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/transform/converter/ValueConverter.java new file mode 100644 index 0000000000000..09e2f265ec556 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/transform/converter/ValueConverter.java @@ -0,0 +1,833 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.receiver.transform.converter; + +import org.apache.iotdb.db.utils.DateTimeUtils; +import org.apache.iotdb.db.utils.TypeInferenceUtils; + +import org.apache.commons.lang3.StringUtils; +import org.apache.tsfile.common.conf.TSFileConfig; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.utils.Binary; +import org.apache.tsfile.utils.DateUtils; + +import java.time.Instant; +import java.time.LocalDate; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; + +public class ValueConverter { + + @FunctionalInterface + private interface Converter { + Object convert( + final TSDataType sourceDataType, final TSDataType targetDataType, final Object sourceValue); + } + + private static final Converter[][] CONVERTER = + new Converter[TSDataType.values().length][TSDataType.values().length]; + + private static final Converter DO_NOTHING_CONVERTER = + (sourceDataType, targetDataType, sourceValue) -> sourceValue; + + static { + for (final TSDataType sourceDataType : TSDataType.values()) { + for (final TSDataType targetDataType : TSDataType.values()) { + CONVERTER[sourceDataType.ordinal()][targetDataType.ordinal()] = DO_NOTHING_CONVERTER; + } + } + + // BOOLEAN + CONVERTER[TSDataType.BOOLEAN.ordinal()][TSDataType.BOOLEAN.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.BOOLEAN.ordinal()][TSDataType.INT32.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertBooleanToInt32((boolean) sourceValue); + CONVERTER[TSDataType.BOOLEAN.ordinal()][TSDataType.INT64.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertBooleanToInt64((boolean) sourceValue); + CONVERTER[TSDataType.BOOLEAN.ordinal()][TSDataType.FLOAT.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertBooleanToFloat((boolean) sourceValue); + CONVERTER[TSDataType.BOOLEAN.ordinal()][TSDataType.DOUBLE.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertBooleanToDouble((boolean) sourceValue); + CONVERTER[TSDataType.BOOLEAN.ordinal()][TSDataType.TEXT.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertBooleanToText((boolean) sourceValue); + CONVERTER[TSDataType.BOOLEAN.ordinal()][TSDataType.TIMESTAMP.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertBooleanToTimestamp((boolean) sourceValue); + CONVERTER[TSDataType.BOOLEAN.ordinal()][TSDataType.DATE.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertBooleanToDate((boolean) sourceValue); + CONVERTER[TSDataType.BOOLEAN.ordinal()][TSDataType.BLOB.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertBooleanToBlob((boolean) sourceValue); + CONVERTER[TSDataType.BOOLEAN.ordinal()][TSDataType.STRING.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertBooleanToString((boolean) sourceValue); + + // INT32 + CONVERTER[TSDataType.INT32.ordinal()][TSDataType.BOOLEAN.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertInt32ToBoolean((int) sourceValue); + CONVERTER[TSDataType.INT32.ordinal()][TSDataType.INT32.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.INT32.ordinal()][TSDataType.INT64.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertInt32ToInt64((int) sourceValue); + CONVERTER[TSDataType.INT32.ordinal()][TSDataType.FLOAT.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertInt32ToFloat((int) sourceValue); + CONVERTER[TSDataType.INT32.ordinal()][TSDataType.DOUBLE.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertInt32ToDouble((int) sourceValue); + CONVERTER[TSDataType.INT32.ordinal()][TSDataType.TEXT.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertInt32ToText((int) sourceValue); + CONVERTER[TSDataType.INT32.ordinal()][TSDataType.TIMESTAMP.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertInt32ToTimestamp((int) sourceValue); + CONVERTER[TSDataType.INT32.ordinal()][TSDataType.DATE.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertInt32ToDate((int) sourceValue); + CONVERTER[TSDataType.INT32.ordinal()][TSDataType.BLOB.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertInt32ToBlob((int) sourceValue); + CONVERTER[TSDataType.INT32.ordinal()][TSDataType.STRING.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertInt32ToString((int) sourceValue); + + // INT64 + CONVERTER[TSDataType.INT64.ordinal()][TSDataType.BOOLEAN.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertInt64ToBoolean((long) sourceValue); + CONVERTER[TSDataType.INT64.ordinal()][TSDataType.INT32.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertInt64ToInt32((long) sourceValue); + CONVERTER[TSDataType.INT64.ordinal()][TSDataType.INT64.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.INT64.ordinal()][TSDataType.FLOAT.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertInt64ToFloat((long) sourceValue); + CONVERTER[TSDataType.INT64.ordinal()][TSDataType.DOUBLE.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertInt64ToDouble((long) sourceValue); + CONVERTER[TSDataType.INT64.ordinal()][TSDataType.TEXT.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertInt64ToText((long) sourceValue); + CONVERTER[TSDataType.INT64.ordinal()][TSDataType.TIMESTAMP.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertInt64ToTimestamp((long) sourceValue); + CONVERTER[TSDataType.INT64.ordinal()][TSDataType.DATE.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertInt64ToDate((long) sourceValue); + CONVERTER[TSDataType.INT64.ordinal()][TSDataType.BLOB.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertInt64ToBlob((long) sourceValue); + CONVERTER[TSDataType.INT64.ordinal()][TSDataType.STRING.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertInt64ToString((long) sourceValue); + + // FLOAT + CONVERTER[TSDataType.FLOAT.ordinal()][TSDataType.BOOLEAN.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertFloatToBoolean((float) sourceValue); + CONVERTER[TSDataType.FLOAT.ordinal()][TSDataType.INT32.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertFloatToInt32((float) sourceValue); + CONVERTER[TSDataType.FLOAT.ordinal()][TSDataType.INT64.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertFloatToInt64((float) sourceValue); + CONVERTER[TSDataType.FLOAT.ordinal()][TSDataType.FLOAT.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.FLOAT.ordinal()][TSDataType.DOUBLE.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertFloatToDouble((float) sourceValue); + CONVERTER[TSDataType.FLOAT.ordinal()][TSDataType.TEXT.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertFloatToText((float) sourceValue); + CONVERTER[TSDataType.FLOAT.ordinal()][TSDataType.TIMESTAMP.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertFloatToTimestamp((float) sourceValue); + CONVERTER[TSDataType.FLOAT.ordinal()][TSDataType.DATE.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertFloatToDate((float) sourceValue); + CONVERTER[TSDataType.FLOAT.ordinal()][TSDataType.BLOB.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertFloatToBlob((float) sourceValue); + CONVERTER[TSDataType.FLOAT.ordinal()][TSDataType.STRING.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertFloatToString((float) sourceValue); + + // DOUBLE + CONVERTER[TSDataType.DOUBLE.ordinal()][TSDataType.BOOLEAN.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertDoubleToBoolean((double) sourceValue); + CONVERTER[TSDataType.DOUBLE.ordinal()][TSDataType.INT32.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertDoubleToInt32((double) sourceValue); + CONVERTER[TSDataType.DOUBLE.ordinal()][TSDataType.INT64.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertDoubleToInt64((double) sourceValue); + CONVERTER[TSDataType.DOUBLE.ordinal()][TSDataType.FLOAT.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertDoubleToFloat((double) sourceValue); + CONVERTER[TSDataType.DOUBLE.ordinal()][TSDataType.DOUBLE.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.DOUBLE.ordinal()][TSDataType.TEXT.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertDoubleToText((double) sourceValue); + CONVERTER[TSDataType.DOUBLE.ordinal()][TSDataType.TIMESTAMP.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertDoubleToTimestamp((double) sourceValue); + CONVERTER[TSDataType.DOUBLE.ordinal()][TSDataType.DATE.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertDoubleToDate((double) sourceValue); + CONVERTER[TSDataType.DOUBLE.ordinal()][TSDataType.BLOB.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertDoubleToBlob((double) sourceValue); + CONVERTER[TSDataType.DOUBLE.ordinal()][TSDataType.STRING.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertDoubleToString((double) sourceValue); + + // TEXT + CONVERTER[TSDataType.TEXT.ordinal()][TSDataType.BOOLEAN.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertTextToBoolean((Binary) sourceValue); + CONVERTER[TSDataType.TEXT.ordinal()][TSDataType.INT32.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertTextToInt32((Binary) sourceValue); + CONVERTER[TSDataType.TEXT.ordinal()][TSDataType.INT64.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertTextToInt64((Binary) sourceValue); + CONVERTER[TSDataType.TEXT.ordinal()][TSDataType.FLOAT.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertTextToFloat((Binary) sourceValue); + CONVERTER[TSDataType.TEXT.ordinal()][TSDataType.DOUBLE.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertTextToDouble((Binary) sourceValue); + CONVERTER[TSDataType.TEXT.ordinal()][TSDataType.TEXT.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.TEXT.ordinal()][TSDataType.TIMESTAMP.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertTextToTimestamp((Binary) sourceValue); + CONVERTER[TSDataType.TEXT.ordinal()][TSDataType.DATE.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertTextToDate((Binary) sourceValue); + CONVERTER[TSDataType.TEXT.ordinal()][TSDataType.BLOB.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertTextToBlob((Binary) sourceValue); + CONVERTER[TSDataType.TEXT.ordinal()][TSDataType.STRING.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertTextToString((Binary) sourceValue); + + // TIMESTAMP + CONVERTER[TSDataType.TIMESTAMP.ordinal()][TSDataType.BOOLEAN.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertTimestampToBoolean((long) sourceValue); + CONVERTER[TSDataType.TIMESTAMP.ordinal()][TSDataType.INT32.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertTimestampToInt32((long) sourceValue); + CONVERTER[TSDataType.TIMESTAMP.ordinal()][TSDataType.INT64.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertTimestampToInt64((long) sourceValue); + CONVERTER[TSDataType.TIMESTAMP.ordinal()][TSDataType.FLOAT.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertTimestampToFloat((long) sourceValue); + CONVERTER[TSDataType.TIMESTAMP.ordinal()][TSDataType.DOUBLE.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertTimestampToDouble((long) sourceValue); + CONVERTER[TSDataType.TIMESTAMP.ordinal()][TSDataType.TEXT.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertTimestampToText((long) sourceValue); + CONVERTER[TSDataType.TIMESTAMP.ordinal()][TSDataType.TIMESTAMP.ordinal()] = + DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.TIMESTAMP.ordinal()][TSDataType.DATE.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertTimestampToDate((long) sourceValue); + CONVERTER[TSDataType.TIMESTAMP.ordinal()][TSDataType.BLOB.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertTimestampToBlob((long) sourceValue); + CONVERTER[TSDataType.TIMESTAMP.ordinal()][TSDataType.STRING.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertTimestampToString((long) sourceValue); + + // DATE + CONVERTER[TSDataType.DATE.ordinal()][TSDataType.BOOLEAN.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertDateToBoolean((int) sourceValue); + CONVERTER[TSDataType.DATE.ordinal()][TSDataType.INT32.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertDateToInt32((int) sourceValue); + CONVERTER[TSDataType.DATE.ordinal()][TSDataType.INT64.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertDateToInt64((int) sourceValue); + CONVERTER[TSDataType.DATE.ordinal()][TSDataType.FLOAT.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertDateToFloat((int) sourceValue); + CONVERTER[TSDataType.DATE.ordinal()][TSDataType.DOUBLE.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertDateToDouble((int) sourceValue); + CONVERTER[TSDataType.DATE.ordinal()][TSDataType.TEXT.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertDateToText((int) sourceValue); + CONVERTER[TSDataType.DATE.ordinal()][TSDataType.TIMESTAMP.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertDateToTimestamp((int) sourceValue); + CONVERTER[TSDataType.DATE.ordinal()][TSDataType.DATE.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.DATE.ordinal()][TSDataType.BLOB.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertDateToBlob((int) sourceValue); + CONVERTER[TSDataType.DATE.ordinal()][TSDataType.STRING.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertDateToString((int) sourceValue); + + // BLOB + CONVERTER[TSDataType.BLOB.ordinal()][TSDataType.BOOLEAN.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertBlobToBoolean((Binary) sourceValue); + CONVERTER[TSDataType.BLOB.ordinal()][TSDataType.INT32.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertBlobToInt32((Binary) sourceValue); + CONVERTER[TSDataType.BLOB.ordinal()][TSDataType.INT64.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertBlobToInt64((Binary) sourceValue); + CONVERTER[TSDataType.BLOB.ordinal()][TSDataType.FLOAT.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertBlobToFloat((Binary) sourceValue); + CONVERTER[TSDataType.BLOB.ordinal()][TSDataType.DOUBLE.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertBlobToDouble((Binary) sourceValue); + CONVERTER[TSDataType.BLOB.ordinal()][TSDataType.TEXT.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertBlobToText((Binary) sourceValue); + CONVERTER[TSDataType.BLOB.ordinal()][TSDataType.TIMESTAMP.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertBlobToTimestamp((Binary) sourceValue); + CONVERTER[TSDataType.BLOB.ordinal()][TSDataType.DATE.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertBlobToDate((Binary) sourceValue); + CONVERTER[TSDataType.BLOB.ordinal()][TSDataType.BLOB.ordinal()] = DO_NOTHING_CONVERTER; + CONVERTER[TSDataType.BLOB.ordinal()][TSDataType.STRING.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertBlobToString((Binary) sourceValue); + + // STRING + CONVERTER[TSDataType.STRING.ordinal()][TSDataType.BOOLEAN.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertStringToBoolean((Binary) sourceValue); + CONVERTER[TSDataType.STRING.ordinal()][TSDataType.INT32.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertStringToInt32((Binary) sourceValue); + CONVERTER[TSDataType.STRING.ordinal()][TSDataType.INT64.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertStringToInt64((Binary) sourceValue); + CONVERTER[TSDataType.STRING.ordinal()][TSDataType.FLOAT.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertStringToFloat((Binary) sourceValue); + CONVERTER[TSDataType.STRING.ordinal()][TSDataType.DOUBLE.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertStringToDouble((Binary) sourceValue); + CONVERTER[TSDataType.STRING.ordinal()][TSDataType.TEXT.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertStringToText((Binary) sourceValue); + CONVERTER[TSDataType.STRING.ordinal()][TSDataType.TIMESTAMP.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> + convertStringToTimestamp((Binary) sourceValue); + CONVERTER[TSDataType.STRING.ordinal()][TSDataType.DATE.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertStringToDate((Binary) sourceValue); + CONVERTER[TSDataType.STRING.ordinal()][TSDataType.BLOB.ordinal()] = + (sourceDataType, targetDataType, sourceValue) -> convertStringToBlob((Binary) sourceValue); + CONVERTER[TSDataType.STRING.ordinal()][TSDataType.STRING.ordinal()] = DO_NOTHING_CONVERTER; + } + + public static Object convert( + final TSDataType sourceDataType, final TSDataType targetDataType, final Object sourceValue) { + return sourceValue == null + ? null + : CONVERTER[sourceDataType.ordinal()][targetDataType.ordinal()].convert( + sourceDataType, targetDataType, sourceValue); + } + + ////////////// BOOLEAN ////////////// + + private static final Binary BINARY_TRUE = parseString(Boolean.TRUE.toString()); + private static final Binary BINARY_FALSE = parseString(Boolean.FALSE.toString()); + private static final int TRUE_DATE = DateUtils.parseDateExpressionToInt(LocalDate.of(1970, 1, 2)); + private static final int FALSE_DATE = + DateUtils.parseDateExpressionToInt(LocalDate.of(1970, 1, 1)); + private static final int DEFAULT_DATE = + DateUtils.parseDateExpressionToInt(LocalDate.of(1970, 1, 1)); + + public static int convertBooleanToInt32(final boolean value) { + return value ? 1 : 0; + } + + public static long convertBooleanToInt64(final boolean value) { + return value ? 1L : 0L; + } + + public static float convertBooleanToFloat(final boolean value) { + return value ? 1.0f : 0.0f; + } + + public static double convertBooleanToDouble(final boolean value) { + return value ? 1.0 : 0.0; + } + + public static Binary convertBooleanToText(final boolean value) { + return value ? BINARY_TRUE : BINARY_FALSE; + } + + public static long convertBooleanToTimestamp(final boolean value) { + return value ? 1L : 0L; + } + + public static int convertBooleanToDate(final boolean value) { + return value ? TRUE_DATE : FALSE_DATE; + } + + public static Binary convertBooleanToBlob(final boolean value) { + return value ? BINARY_TRUE : BINARY_FALSE; + } + + public static Binary convertBooleanToString(final boolean value) { + return value ? BINARY_TRUE : BINARY_FALSE; + } + + ///////////// INT32 ////////////// + + public static boolean convertInt32ToBoolean(final int value) { + return value != 0; + } + + public static long convertInt32ToInt64(final int value) { + return value; + } + + public static float convertInt32ToFloat(final int value) { + return value; + } + + public static double convertInt32ToDouble(final int value) { + return value; + } + + public static Binary convertInt32ToText(final int value) { + return parseText(Integer.toString(value)); + } + + public static long convertInt32ToTimestamp(final int value) { + return value; + } + + public static int convertInt32ToDate(final int value) { + try { + DateUtils.parseIntToLocalDate(value); + return value; + } catch (Exception e) { + return DEFAULT_DATE; + } + } + + public static Binary convertInt32ToBlob(final int value) { + return parseBlob(Integer.toString(value)); + } + + public static Binary convertInt32ToString(final int value) { + return parseString(Integer.toString(value)); + } + + ///////////// INT64 ////////////// + + public static boolean convertInt64ToBoolean(final long value) { + return value != 0; + } + + public static int convertInt64ToInt32(final long value) { + return (int) value; + } + + public static float convertInt64ToFloat(final long value) { + return value; + } + + public static double convertInt64ToDouble(final long value) { + return value; + } + + public static Binary convertInt64ToText(final long value) { + return parseText(Long.toString(value)); + } + + public static long convertInt64ToTimestamp(final long value) { + return value; + } + + public static int convertInt64ToDate(final long value) { + try { + int data = (int) value; + DateUtils.parseIntToLocalDate(data); + return data; + } catch (Exception e) { + return DEFAULT_DATE; + } + } + + public static Binary convertInt64ToBlob(final long value) { + return parseBlob(Long.toString(value)); + } + + public static Binary convertInt64ToString(final long value) { + return parseString(Long.toString(value)); + } + + ///////////// FLOAT ////////////// + + public static boolean convertFloatToBoolean(final float value) { + return value != 0; + } + + public static int convertFloatToInt32(final float value) { + return (int) value; + } + + public static long convertFloatToInt64(final float value) { + return (long) value; + } + + public static double convertFloatToDouble(final float value) { + return value; + } + + public static Binary convertFloatToText(final float value) { + return parseText(Float.toString(value)); + } + + public static long convertFloatToTimestamp(final float value) { + return (long) value; + } + + public static int convertFloatToDate(final float value) { + try { + int data = (int) value; + DateUtils.parseIntToLocalDate(data); + return data; + } catch (Exception e) { + return DEFAULT_DATE; + } + } + + public static Binary convertFloatToBlob(final float value) { + return parseBlob(Float.toString(value)); + } + + public static Binary convertFloatToString(final float value) { + return parseString(Float.toString(value)); + } + + ///////////// DOUBLE ////////////// + + public static boolean convertDoubleToBoolean(final double value) { + return value != 0; + } + + public static int convertDoubleToInt32(final double value) { + return (int) value; + } + + public static long convertDoubleToInt64(final double value) { + return (long) value; + } + + public static float convertDoubleToFloat(final double value) { + return (float) value; + } + + public static Binary convertDoubleToText(final double value) { + return parseText(Double.toString(value)); + } + + public static long convertDoubleToTimestamp(final double value) { + return (long) value; + } + + public static int convertDoubleToDate(final double value) { + try { + int data = (int) value; + DateUtils.parseIntToLocalDate(data); + return data; + } catch (Exception e) { + return DEFAULT_DATE; + } + } + + public static Binary convertDoubleToBlob(final double value) { + return parseBlob(Double.toString(value)); + } + + public static Binary convertDoubleToString(final double value) { + return parseString(Double.toString(value)); + } + + ///////////// TEXT ////////////// + + public static boolean convertTextToBoolean(final Binary value) { + return Boolean.parseBoolean(value.toString()); + } + + public static int convertTextToInt32(final Binary value) { + return (int) parseDouble(value.toString()); + } + + public static long convertTextToInt64(final Binary value) { + return (long) parseDouble(value.toString()); + } + + public static float convertTextToFloat(final Binary value) { + return parseFloat(value.toString()); + } + + public static double convertTextToDouble(final Binary value) { + return parseDouble(value.toString()); + } + + public static long convertTextToTimestamp(final Binary value) { + return parseTimestamp(value.toString()); + } + + public static int convertTextToDate(final Binary value) { + return parseDate(value.toString()); + } + + public static Binary convertTextToBlob(final Binary value) { + return parseBlob(value.toString()); + } + + public static Binary convertTextToString(final Binary value) { + return value; + } + + ///////////// TIMESTAMP ////////////// + + public static boolean convertTimestampToBoolean(final long value) { + return value != 0; + } + + public static int convertTimestampToInt32(final long value) { + return (int) value; + } + + public static long convertTimestampToInt64(final long value) { + return value; + } + + public static float convertTimestampToFloat(final long value) { + return value; + } + + public static double convertTimestampToDouble(final long value) { + return value; + } + + public static Binary convertTimestampToText(final long value) { + return parseText(Long.toString(value)); + } + + public static int convertTimestampToDate(final long value) { + try { + Instant instant = Instant.ofEpochMilli(value); + return DateUtils.parseDateExpressionToInt(instant.atZone(ZoneOffset.UTC).toLocalDate()); + } catch (Exception e) { + return DEFAULT_DATE; + } + } + + public static Binary convertTimestampToBlob(final long value) { + return parseBlob(Long.toString(value)); + } + + public static Binary convertTimestampToString(final long value) { + return parseString(Long.toString(value)); + } + + ///////////// DATE ////////////// + + public static boolean convertDateToBoolean(final int value) { + return value != FALSE_DATE; + } + + public static int convertDateToInt32(final int value) { + return value; + } + + public static long convertDateToInt64(final int value) { + return value; + } + + public static float convertDateToFloat(final int value) { + return value; + } + + public static double convertDateToDouble(final int value) { + return value; + } + + public static Binary convertDateToText(final int value) { + return parseText(Integer.toString(value)); + } + + public static long convertDateToTimestamp(final int value) { + try { + LocalDate date = DateUtils.parseIntToLocalDate(value); + ZonedDateTime dateTime = date.atStartOfDay(ZoneOffset.UTC); + Instant instant = dateTime.toInstant(); + return instant.toEpochMilli(); + } catch (Exception e) { + return 0L; + } + } + + public static Binary convertDateToBlob(final int value) { + return parseBlob(Integer.toString(value)); + } + + public static Binary convertDateToString(final int value) { + return parseString(Integer.toString(value)); + } + + ///////////// BLOB ////////////// + + public static boolean convertBlobToBoolean(final Binary value) { + return Boolean.parseBoolean(value.toString()); + } + + public static int convertBlobToInt32(final Binary value) { + return (int) parseDouble(value.toString()); + } + + public static long convertBlobToInt64(final Binary value) { + return (long) parseDouble(value.toString()); + } + + public static float convertBlobToFloat(final Binary value) { + return parseFloat(value.toString()); + } + + public static double convertBlobToDouble(final Binary value) { + return parseDouble(value.toString()); + } + + public static long convertBlobToTimestamp(final Binary value) { + return parseTimestamp(value.toString()); + } + + public static int convertBlobToDate(final Binary value) { + return parseDate(value.toString()); + } + + public static Binary convertBlobToString(final Binary value) { + return value; + } + + public static Binary convertBlobToText(final Binary value) { + return value; + } + + ///////////// STRING ////////////// + + public static boolean convertStringToBoolean(final Binary value) { + return Boolean.parseBoolean(value.toString()); + } + + public static int convertStringToInt32(final Binary value) { + return (int) parseDouble(value.toString()); + } + + public static long convertStringToInt64(final Binary value) { + return (long) parseDouble(value.toString()); + } + + public static float convertStringToFloat(final Binary value) { + return parseFloat(value.toString()); + } + + public static double convertStringToDouble(final Binary value) { + return parseDouble(value.toString()); + } + + public static long convertStringToTimestamp(final Binary value) { + return parseTimestamp(value.toString()); + } + + public static int convertStringToDate(final Binary value) { + return parseDate(value.toString()); + } + + public static Binary convertStringToBlob(final Binary value) { + return parseBlob(value.toString()); + } + + public static Binary convertStringToText(final Binary value) { + return value; + } + + ///////////// UTILS ////////////// + + public static Object parse(final String value, final TSDataType dataType) { + if (value == null) { + return null; + } + switch (dataType) { + case BOOLEAN: + return Boolean.parseBoolean(value); + case INT32: + return parseInteger(value); + case INT64: + return parseLong(value); + case FLOAT: + return parseFloat(value); + case DOUBLE: + return parseDouble(value); + case TEXT: + return parseText(value); + case TIMESTAMP: + return parseTimestamp(value); + case DATE: + return parseDate(value); + case BLOB: + return parseBlob(value); + case STRING: + return parseString(value); + default: + throw new UnsupportedOperationException("Unsupported data type: " + dataType); + } + } + + private static Binary parseBlob(final String value) { + return new Binary(value, TSFileConfig.STRING_CHARSET); + } + + private static int parseInteger(final String value) { + try { + return Integer.parseInt(value); + } catch (Exception e) { + return 0; + } + } + + private static long parseLong(final String value) { + try { + return Long.parseLong(value); + } catch (Exception e) { + return 0L; + } + } + + private static float parseFloat(final String value) { + try { + return Float.parseFloat(value); + } catch (Exception e) { + return 0.0f; + } + } + + private static double parseDouble(final String value) { + try { + return Double.parseDouble(value); + } catch (Exception e) { + return 0.0d; + } + } + + private static long parseTimestamp(final String value) { + if (value == null || value.isEmpty()) { + return 0L; + } + try { + return TypeInferenceUtils.isNumber(value) + ? Long.parseLong(value) + : DateTimeUtils.parseDateTimeExpressionToLong(StringUtils.trim(value), ZoneOffset.UTC); + } catch (final Exception e) { + return 0L; + } + } + + private static int parseDate(final String value) { + if (value == null || value.isEmpty()) { + return DEFAULT_DATE; + } + try { + if (TypeInferenceUtils.isNumber(value)) { + int date = Integer.parseInt(value); + DateUtils.parseIntToLocalDate(date); + return date; + } + return DateTimeUtils.parseDateExpressionToInt(StringUtils.trim(value)); + } catch (final Exception e) { + return DEFAULT_DATE; + } + } + + private static Binary parseString(final String value) { + return new Binary(value, TSFileConfig.STRING_CHARSET); + } + + private static Binary parseText(final String value) { + return new Binary(value, TSFileConfig.STRING_CHARSET); + } + + private ValueConverter() { + // forbidden to construct + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/transform/statement/PipeConvertedInsertRowStatement.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/transform/statement/PipeConvertedInsertRowStatement.java new file mode 100644 index 0000000000000..43f0a6728eb60 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/transform/statement/PipeConvertedInsertRowStatement.java @@ -0,0 +1,141 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.receiver.transform.statement; + +import org.apache.iotdb.commons.conf.IoTDBConstant; +import org.apache.iotdb.db.conf.IoTDBDescriptor; +import org.apache.iotdb.db.exception.metadata.PathNotExistException; +import org.apache.iotdb.db.exception.query.QueryProcessException; +import org.apache.iotdb.db.pipe.receiver.transform.converter.ValueConverter; +import org.apache.iotdb.db.queryengine.plan.statement.crud.InsertRowStatement; + +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.time.ZoneId; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +public class PipeConvertedInsertRowStatement extends InsertRowStatement { + + private static final Logger LOGGER = + LoggerFactory.getLogger(PipeConvertedInsertRowStatement.class); + + public PipeConvertedInsertRowStatement(final InsertRowStatement insertRowStatement) { + super(); + // Statement + isDebug = insertRowStatement.isDebug(); + // InsertBaseStatement + devicePath = insertRowStatement.getDevicePath(); + isAligned = insertRowStatement.isAligned(); + measurementSchemas = insertRowStatement.getMeasurementSchemas(); + measurements = insertRowStatement.getMeasurements(); + dataTypes = insertRowStatement.getDataTypes(); + // InsertRowStatement + time = insertRowStatement.getTime(); + values = insertRowStatement.getValues(); + isNeedInferType = insertRowStatement.isNeedInferType(); + + // To ensure that the measurement remains unchanged during the WAL writing process, the array + // needs to be copied before the failed Measurement mark can be deleted. + final MeasurementSchema[] measurementSchemas = insertRowStatement.getMeasurementSchemas(); + if (measurementSchemas != null) { + this.measurementSchemas = Arrays.copyOf(measurementSchemas, measurementSchemas.length); + } + + final String[] measurements = insertRowStatement.getMeasurements(); + if (measurements != null) { + this.measurements = Arrays.copyOf(measurements, measurements.length); + } + + final TSDataType[] dataTypes = insertRowStatement.getDataTypes(); + if (dataTypes != null) { + this.dataTypes = Arrays.copyOf(dataTypes, dataTypes.length); + } + + final Map failedMeasurementIndex2Info = + insertRowStatement.getFailedMeasurementInfoMap(); + if (failedMeasurementIndex2Info != null) { + this.failedMeasurementIndex2Info = new HashMap<>(failedMeasurementIndex2Info); + } + + removeAllFailedMeasurementMarks(); + } + + @Override + protected boolean checkAndCastDataType(int columnIndex, TSDataType dataType) { + LOGGER.info( + "Pipe: Inserting row to {}.{}. Casting type from {} to {}.", + devicePath, + measurements[columnIndex], + dataTypes[columnIndex], + dataType); + values[columnIndex] = + ValueConverter.convert(dataTypes[columnIndex], dataType, values[columnIndex]); + dataTypes[columnIndex] = dataType; + return true; + } + + @Override + public void transferType(ZoneId zoneId) throws QueryProcessException { + for (int i = 0; i < measurementSchemas.length; i++) { + // null when time series doesn't exist + if (measurementSchemas[i] == null) { + if (!IoTDBDescriptor.getInstance().getConfig().isEnablePartialInsert()) { + throw new QueryProcessException( + new PathNotExistException( + devicePath.getFullPath() + IoTDBConstant.PATH_SEPARATOR + measurements[i])); + } else { + markFailedMeasurement( + i, + new QueryProcessException( + new PathNotExistException( + devicePath.getFullPath() + IoTDBConstant.PATH_SEPARATOR + measurements[i]))); + } + continue; + } + + // parse string value to specific type + dataTypes[i] = measurementSchemas[i].getType(); + try { + values[i] = ValueConverter.parse(values[i].toString(), dataTypes[i]); + } catch (Exception e) { + LOGGER.warn( + "data type of {}.{} is not consistent, " + + "registered type {}, inserting timestamp {}, value {}", + devicePath, + measurements[i], + dataTypes[i], + time, + values[i]); + if (!IoTDBDescriptor.getInstance().getConfig().isEnablePartialInsert()) { + throw e; + } else { + markFailedMeasurement(i, e); + } + } + } + + isNeedInferType = false; + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/transform/statement/PipeConvertedInsertTabletStatement.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/transform/statement/PipeConvertedInsertTabletStatement.java new file mode 100644 index 0000000000000..7a0052422832f --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/transform/statement/PipeConvertedInsertTabletStatement.java @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.receiver.transform.statement; + +import org.apache.iotdb.db.pipe.receiver.transform.converter.ArrayConverter; +import org.apache.iotdb.db.queryengine.plan.statement.crud.InsertTabletStatement; + +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +public class PipeConvertedInsertTabletStatement extends InsertTabletStatement { + + private static final Logger LOGGER = + LoggerFactory.getLogger(PipeConvertedInsertTabletStatement.class); + + public PipeConvertedInsertTabletStatement( + final InsertTabletStatement insertTabletStatement, boolean isCopyMeasurement) { + super(); + // Statement + isDebug = insertTabletStatement.isDebug(); + // InsertBaseStatement + devicePath = insertTabletStatement.getDevicePath(); + isAligned = insertTabletStatement.isAligned(); + // InsertTabletStatement + times = insertTabletStatement.getTimes(); + bitMaps = insertTabletStatement.getBitMaps(); + columns = insertTabletStatement.getColumns(); + rowCount = insertTabletStatement.getRowCount(); + + // To ensure that the measurement remains unchanged during the WAL writing process, the array + // needs to be copied before the failed Measurement mark can be deleted. + if (isCopyMeasurement) { + final MeasurementSchema[] measurementSchemas = insertTabletStatement.getMeasurementSchemas(); + if (measurementSchemas != null) { + this.measurementSchemas = Arrays.copyOf(measurementSchemas, measurementSchemas.length); + } + + final String[] measurements = insertTabletStatement.getMeasurements(); + if (measurements != null) { + this.measurements = Arrays.copyOf(measurements, measurements.length); + } + + final TSDataType[] dataTypes = insertTabletStatement.getDataTypes(); + if (dataTypes != null) { + this.dataTypes = Arrays.copyOf(dataTypes, dataTypes.length); + } + + final Map failedMeasurementIndex2Info = + insertTabletStatement.getFailedMeasurementInfoMap(); + if (failedMeasurementIndex2Info != null) { + this.failedMeasurementIndex2Info = new HashMap<>(failedMeasurementIndex2Info); + } + } else { + this.measurementSchemas = insertTabletStatement.getMeasurementSchemas(); + this.measurements = insertTabletStatement.getMeasurements(); + this.dataTypes = insertTabletStatement.getDataTypes(); + this.failedMeasurementIndex2Info = insertTabletStatement.getFailedMeasurementInfoMap(); + } + + removeAllFailedMeasurementMarks(); + } + + public PipeConvertedInsertTabletStatement(final InsertTabletStatement insertTabletStatement) { + this(insertTabletStatement, true); + } + + @Override + protected boolean checkAndCastDataType(int columnIndex, TSDataType dataType) { + LOGGER.info( + "Pipe: Inserting tablet to {}.{}. Casting type from {} to {}.", + devicePath, + measurements[columnIndex], + dataTypes[columnIndex], + dataType); + columns[columnIndex] = + ArrayConverter.convert(dataTypes[columnIndex], dataType, columns[columnIndex]); + dataTypes[columnIndex] = dataType; + return true; + } + + protected boolean originalCheckAndCastDataType(int columnIndex, TSDataType dataType) { + return super.checkAndCastDataType(columnIndex, dataType); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipePlanToStatementVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipePlanToStatementVisitor.java index 593e3715c88e8..89e931f7ed35f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipePlanToStatementVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipePlanToStatementVisitor.java @@ -74,6 +74,9 @@ public Statement visitPlan(final PlanNode node, final Void context) { @Override public InsertRowStatement visitInsertRow(final InsertRowNode node, final Void context) { + // MeasurementSchema is only allowed to be set at the analysis type stage. However, InsertNode + // has already set MeasurementSchema at the sending end. If the statement constructed this time + // calls SetMeasurementSchema, it will cause NPE final InsertRowStatement statement = new InsertRowStatement(); statement.setDevicePath(node.getDevicePath()); statement.setTime(node.getTime()); @@ -82,12 +85,14 @@ public InsertRowStatement visitInsertRow(final InsertRowNode node, final Void co statement.setValues(node.getValues()); statement.setNeedInferType(node.isNeedInferType()); statement.setAligned(node.isAligned()); - statement.setMeasurementSchemas(node.getMeasurementSchemas()); return statement; } @Override public InsertTabletStatement visitInsertTablet(final InsertTabletNode node, final Void context) { + // MeasurementSchema is only allowed to be set at the analysis type stage. However, InsertNode + // has already set MeasurementSchema at the sending end. If the statement constructed this time + // calls SetMeasurementSchema, it will cause NPE final InsertTabletStatement statement = new InsertTabletStatement(); statement.setDevicePath(node.getDevicePath()); statement.setMeasurements(node.getMeasurements()); @@ -97,7 +102,6 @@ public InsertTabletStatement visitInsertTablet(final InsertTabletNode node, fina statement.setRowCount(node.getRowCount()); statement.setDataTypes(node.getDataTypes()); statement.setAligned(node.isAligned()); - statement.setMeasurementSchemas(node.getMeasurementSchemas()); return statement; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeStatementDataTypeConvertExecutionVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeStatementDataTypeConvertExecutionVisitor.java new file mode 100644 index 0000000000000..caa4a399f67fb --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeStatementDataTypeConvertExecutionVisitor.java @@ -0,0 +1,225 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.receiver.visitor; + +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.pipe.config.PipeConfig; +import org.apache.iotdb.commons.pipe.datastructure.pattern.IoTDBPipePattern; +import org.apache.iotdb.db.pipe.event.common.tsfile.container.scan.TsFileInsertionScanDataContainer; +import org.apache.iotdb.db.pipe.receiver.protocol.thrift.IoTDBDataNodeReceiver; +import org.apache.iotdb.db.pipe.receiver.transform.statement.PipeConvertedInsertRowStatement; +import org.apache.iotdb.db.pipe.receiver.transform.statement.PipeConvertedInsertTabletStatement; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTabletRawReq; +import org.apache.iotdb.db.queryengine.plan.statement.Statement; +import org.apache.iotdb.db.queryengine.plan.statement.StatementNode; +import org.apache.iotdb.db.queryengine.plan.statement.StatementVisitor; +import org.apache.iotdb.db.queryengine.plan.statement.crud.InsertMultiTabletsStatement; +import org.apache.iotdb.db.queryengine.plan.statement.crud.InsertRowStatement; +import org.apache.iotdb.db.queryengine.plan.statement.crud.InsertRowsOfOneDeviceStatement; +import org.apache.iotdb.db.queryengine.plan.statement.crud.InsertRowsStatement; +import org.apache.iotdb.db.queryengine.plan.statement.crud.InsertTabletStatement; +import org.apache.iotdb.db.queryengine.plan.statement.crud.LoadTsFileStatement; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.apache.commons.io.FileUtils; +import org.apache.tsfile.utils.Pair; +import org.apache.tsfile.write.record.Tablet; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.util.Optional; +import java.util.stream.Collectors; + +/** + * This visitor transforms the data type of the statement when the statement is executed and an + * exception occurs. The transformed statement (if any) is returned and will be executed again. + */ +public class PipeStatementDataTypeConvertExecutionVisitor + extends StatementVisitor, TSStatus> { + + private static final Logger LOGGER = + LoggerFactory.getLogger(PipeStatementDataTypeConvertExecutionVisitor.class); + + @FunctionalInterface + public interface StatementExecutor { + TSStatus execute(final Statement statement); + } + + private final StatementExecutor statementExecutor; + + public PipeStatementDataTypeConvertExecutionVisitor(final StatementExecutor statementExecutor) { + this.statementExecutor = statementExecutor; + } + + private Optional tryExecute(final Statement statement) { + try { + return Optional.of(statementExecutor.execute(statement)); + } catch (final Exception e) { + LOGGER.warn("Failed to execute statement after data type conversion.", e); + return Optional.empty(); + } + } + + @Override + public Optional visitNode(final StatementNode statementNode, final TSStatus status) { + return Optional.empty(); + } + + @Override + public Optional visitLoadFile( + final LoadTsFileStatement loadTsFileStatement, final TSStatus status) { + if (status.getCode() != TSStatusCode.LOAD_FILE_ERROR.getStatusCode() + // Ignore the error if it is caused by insufficient memory + || (status.getMessage() != null && status.getMessage().contains("memory")) + || !PipeConfig.getInstance().isPipeReceiverLoadConversionEnabled()) { + return Optional.empty(); + } + + LOGGER.warn( + "Data type mismatch detected (TSStatus: {}) for LoadTsFileStatement: {}. Start data type conversion.", + status, + loadTsFileStatement); + + for (final File file : loadTsFileStatement.getTsFiles()) { + try (final TsFileInsertionScanDataContainer container = + new TsFileInsertionScanDataContainer( + file, new IoTDBPipePattern(null), Long.MIN_VALUE, Long.MAX_VALUE, null, null)) { + for (final Pair tabletWithIsAligned : container.toTabletWithIsAligneds()) { + final PipeConvertedInsertTabletStatement statement = + new PipeConvertedInsertTabletStatement( + PipeTransferTabletRawReq.toTPipeTransferRawReq( + tabletWithIsAligned.getLeft(), tabletWithIsAligned.getRight()) + .constructStatement(), + false); + + TSStatus result; + try { + result = + statement.accept( + IoTDBDataNodeReceiver.STATEMENT_STATUS_VISITOR, + statementExecutor.execute(statement)); + + // Retry max 5 times if the write process is rejected + for (int i = 0; + i < 5 + && result.getCode() + == TSStatusCode.PIPE_RECEIVER_TEMPORARY_UNAVAILABLE_EXCEPTION + .getStatusCode(); + i++) { + Thread.sleep(100L * (i + 1)); + result = + statement.accept( + IoTDBDataNodeReceiver.STATEMENT_STATUS_VISITOR, + statementExecutor.execute(statement)); + } + } catch (final Exception e) { + if (e instanceof InterruptedException) { + Thread.currentThread().interrupt(); + } + result = statement.accept(IoTDBDataNodeReceiver.STATEMENT_EXCEPTION_VISITOR, e); + } + + if (!(result.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode() + || result.getCode() == TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode() + || result.getCode() + == TSStatusCode.PIPE_RECEIVER_IDEMPOTENT_CONFLICT_EXCEPTION.getStatusCode())) { + return Optional.empty(); + } + } + } catch (final Exception e) { + LOGGER.warn( + "Failed to convert data type for LoadTsFileStatement: {}.", loadTsFileStatement, e); + return Optional.empty(); + } + } + + if (loadTsFileStatement.isDeleteAfterLoad()) { + loadTsFileStatement.getTsFiles().forEach(FileUtils::deleteQuietly); + } + + LOGGER.warn( + "Data type conversion for LoadTsFileStatement {} is successful.", loadTsFileStatement); + + return Optional.of(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); + } + + @Override + public Optional visitInsertRow( + final InsertRowStatement insertRowStatement, final TSStatus status) { + return tryExecute(new PipeConvertedInsertRowStatement(insertRowStatement)); + } + + @Override + public Optional visitInsertRows( + final InsertRowsStatement insertRowsStatement, final TSStatus status) { + if (insertRowsStatement.getInsertRowStatementList() == null + || insertRowsStatement.getInsertRowStatementList().isEmpty()) { + return Optional.empty(); + } + + final InsertRowsStatement convertedInsertRowsStatement = new InsertRowsStatement(); + convertedInsertRowsStatement.setInsertRowStatementList( + insertRowsStatement.getInsertRowStatementList().stream() + .map(PipeConvertedInsertRowStatement::new) + .collect(Collectors.toList())); + return tryExecute(convertedInsertRowsStatement); + } + + @Override + public Optional visitInsertRowsOfOneDevice( + final InsertRowsOfOneDeviceStatement insertRowsOfOneDeviceStatement, final TSStatus status) { + if (insertRowsOfOneDeviceStatement.getInsertRowStatementList() == null + || insertRowsOfOneDeviceStatement.getInsertRowStatementList().isEmpty()) { + return Optional.empty(); + } + + final InsertRowsOfOneDeviceStatement convertedInsertRowsOfOneDeviceStatement = + new InsertRowsOfOneDeviceStatement(); + convertedInsertRowsOfOneDeviceStatement.setInsertRowStatementList( + insertRowsOfOneDeviceStatement.getInsertRowStatementList().stream() + .map(PipeConvertedInsertRowStatement::new) + .collect(Collectors.toList())); + return tryExecute(convertedInsertRowsOfOneDeviceStatement); + } + + @Override + public Optional visitInsertTablet( + final InsertTabletStatement insertTabletStatement, final TSStatus status) { + return tryExecute(new PipeConvertedInsertTabletStatement(insertTabletStatement)); + } + + @Override + public Optional visitInsertMultiTablets( + final InsertMultiTabletsStatement insertMultiTabletsStatement, final TSStatus status) { + if (insertMultiTabletsStatement.getInsertTabletStatementList() == null + || insertMultiTabletsStatement.getInsertTabletStatementList().isEmpty()) { + return Optional.empty(); + } + + final InsertMultiTabletsStatement convertedInsertMultiTabletsStatement = + new InsertMultiTabletsStatement(); + convertedInsertMultiTabletsStatement.setInsertTabletStatementList( + insertMultiTabletsStatement.getInsertTabletStatementList().stream() + .map(PipeConvertedInsertTabletStatement::new) + .collect(Collectors.toList())); + return tryExecute(convertedInsertMultiTabletsStatement); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeStatementExceptionVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeStatementExceptionVisitor.java index 5a65b1a5e0577..8059ee639303e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeStatementExceptionVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeStatementExceptionVisitor.java @@ -22,7 +22,7 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.exception.IoTDBException; import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.db.exception.LoadRuntimeOutOfMemoryException; +import org.apache.iotdb.db.exception.load.LoadRuntimeOutOfMemoryException; import org.apache.iotdb.db.exception.sql.SemanticException; import org.apache.iotdb.db.exception.sql.StatementAnalyzeException; import org.apache.iotdb.db.queryengine.plan.statement.Statement; @@ -77,7 +77,7 @@ public TSStatus visitCreateAlignedTimeseries( } @Override - public TSStatus visitCreateMultiTimeseries( + public TSStatus visitCreateMultiTimeSeries( final CreateMultiTimeSeriesStatement statement, final Exception context) { return visitGeneralCreateTimeSeries(statement, context); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeStatementPatternParseVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeStatementPatternParseVisitor.java index 0754a2fb55df2..75394e98cc5ff 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeStatementPatternParseVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeStatementPatternParseVisitor.java @@ -19,10 +19,11 @@ package org.apache.iotdb.db.pipe.receiver.visitor; -import org.apache.iotdb.commons.pipe.pattern.IoTDBPipePattern; +import org.apache.iotdb.commons.pipe.datastructure.pattern.IoTDBPipePattern; import org.apache.iotdb.db.queryengine.plan.statement.Statement; import org.apache.iotdb.db.queryengine.plan.statement.StatementNode; import org.apache.iotdb.db.queryengine.plan.statement.StatementVisitor; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.AlterTimeSeriesStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.CreateAlignedTimeSeriesStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.CreateTimeSeriesStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.template.ActivateTemplateStatement; @@ -103,6 +104,17 @@ public Optional visitCreateAlignedTimeseries( return Optional.of(targetCreateAlignedTimeSeriesStatement); } + // For logical view with tags/attributes + @Override + public Optional visitAlterTimeSeries( + final AlterTimeSeriesStatement alterTimeSeriesStatement, final IoTDBPipePattern pattern) { + return pattern.matchesMeasurement( + alterTimeSeriesStatement.getPath().getDevice(), + alterTimeSeriesStatement.getPath().getMeasurement()) + ? Optional.of(alterTimeSeriesStatement) + : Optional.empty(); + } + @Override public Optional visitActivateTemplate( final ActivateTemplateStatement activateTemplateStatement, final IoTDBPipePattern pattern) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeStatementTSStatusVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeStatementTSStatusVisitor.java index 03a917379ceb1..0874857c3f751 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeStatementTSStatusVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeStatementTSStatusVisitor.java @@ -97,7 +97,8 @@ public TSStatus visitInsertMultiTablets( private TSStatus visitInsertBase( final InsertBaseStatement insertBaseStatement, final TSStatus context) { - if (context.getCode() == TSStatusCode.SYSTEM_READ_ONLY.getStatusCode()) { + if (context.getCode() == TSStatusCode.SYSTEM_READ_ONLY.getStatusCode() + || context.getCode() == TSStatusCode.WRITE_PROCESS_REJECT.getStatusCode()) { return new TSStatus( TSStatusCode.PIPE_RECEIVER_TEMPORARY_UNAVAILABLE_EXCEPTION.getStatusCode()) .setMessage(context.getMessage()); @@ -140,7 +141,7 @@ private TSStatus visitGeneralCreateTimeSeries(final Statement statement, final T } @Override - public TSStatus visitCreateMultiTimeseries( + public TSStatus visitCreateMultiTimeSeries( final CreateMultiTimeSeriesStatement createMultiTimeSeriesStatement, final TSStatus context) { return visitGeneralCreateMultiTimeseries(createMultiTimeSeriesStatement, context); } @@ -183,7 +184,7 @@ private TSStatus visitGeneralCreateMultiTimeseries( } @Override - public TSStatus visitAlterTimeseries( + public TSStatus visitAlterTimeSeries( final AlterTimeSeriesStatement alterTimeSeriesStatement, final TSStatus context) { if (context.getCode() == TSStatusCode.METADATA_ERROR.getStatusCode()) { if (context.getMessage().contains("already")) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeStatementToBatchVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeStatementToBatchVisitor.java index f48a2bc002c3c..1bca479fed6b0 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeStatementToBatchVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeStatementToBatchVisitor.java @@ -115,15 +115,16 @@ private void addNonAlignedTimeSeriesToBatchStatement( statement.getPath().getDevicePath(), devicePath -> new Pair<>(false, new MeasurementGroup())) .getRight(); - group.addMeasurement( + if (group.addMeasurement( statement.getPath().getMeasurement(), statement.getDataType(), statement.getEncoding(), - statement.getCompressor()); - group.addAttributes(statement.getAttributes()); - group.addTags(statement.getTags()); - group.addProps(statement.getProps()); - group.addAlias(statement.getAlias()); + statement.getCompressor())) { + group.addAttributes(statement.getAttributes()); + group.addTags(statement.getTags()); + group.addProps(statement.getProps()); + group.addAlias(statement.getAlias()); + } } private void addAlignedTimeSeriesToBatchStatement( @@ -136,17 +137,18 @@ private void addAlignedTimeSeriesToBatchStatement( statement.getDevicePath(), devicePath -> new Pair<>(true, new MeasurementGroup())) .getRight(); for (int i = 0; i < statement.getMeasurements().size(); ++i) { - group.addMeasurement( + if (group.addMeasurement( statement.getMeasurements().get(i), statement.getDataTypes().get(i), statement.getEncodings().get(i), - statement.getCompressors().get(i)); - group.addProps(new HashMap<>()); + statement.getCompressors().get(i))) { + group.addProps(new HashMap<>()); + // Non-null lists + group.addTags(statement.getTagsList().get(i)); + group.addAttributes(statement.getAttributesList().get(i)); + group.addAlias(statement.getAliasList().get(i)); + } } - // Non-null lists - statement.getTagsList().forEach(group::addTags); - statement.getAttributesList().forEach(group::addAttributes); - statement.getAliasList().forEach(group::addAlias); } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeHardlinkOrCopiedFileDirStartupCleaner.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeHardlinkOrCopiedFileDirStartupCleaner.java index c6d4aefc705c2..b58d934988f1a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeHardlinkOrCopiedFileDirStartupCleaner.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeHardlinkOrCopiedFileDirStartupCleaner.java @@ -20,11 +20,10 @@ package org.apache.iotdb.db.pipe.resource; import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.pipe.resource.PipeSnapshotResourceManager; +import org.apache.iotdb.commons.pipe.resource.snapshot.PipeSnapshotResourceManager; import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.commons.io.FileUtils; -import org.apache.commons.io.filefilter.DirectoryFileFilter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -46,22 +45,20 @@ public static void clean() { private static void cleanTsFileDir() { for (final String dataDir : IoTDBDescriptor.getInstance().getConfig().getDataDirs()) { - for (final File file : - FileUtils.listFilesAndDirs( - new File(dataDir), DirectoryFileFilter.INSTANCE, DirectoryFileFilter.INSTANCE)) { - if (file.isDirectory() - && file.getName().equals(PipeConfig.getInstance().getPipeHardlinkBaseDirName())) { - LOGGER.info( - "Pipe hardlink dir found, deleting it: {}, result: {}", - file, - FileUtils.deleteQuietly(file)); - } + final File pipeHardLinkDir = + new File( + dataDir + File.separator + PipeConfig.getInstance().getPipeHardlinkBaseDirName()); + if (pipeHardLinkDir.isDirectory()) { + LOGGER.info( + "Pipe hardlink dir found, deleting it: {}, result: {}", + pipeHardLinkDir, + FileUtils.deleteQuietly(pipeHardLinkDir)); } } } private static void cleanSnapshotDir() { - File pipeConsensusDir = + final File pipeConsensusDir = new File( IoTDBDescriptor.getInstance().getConfig().getConsensusDir() + File.separator diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeResourceManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeResourceManager.java index 83901ddcfca64..aaf9eff454b8c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeResourceManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeResourceManager.java @@ -19,42 +19,30 @@ package org.apache.iotdb.db.pipe.resource; -import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.pipe.resource.PipeSnapshotResourceManager; import org.apache.iotdb.commons.pipe.resource.log.PipeLogManager; +import org.apache.iotdb.commons.pipe.resource.ref.PipePhantomReferenceManager; +import org.apache.iotdb.commons.pipe.resource.snapshot.PipeSnapshotResourceManager; import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryManager; +import org.apache.iotdb.db.pipe.resource.ref.PipeDataNodePhantomReferenceManager; import org.apache.iotdb.db.pipe.resource.snapshot.PipeDataNodeSnapshotResourceManager; +import org.apache.iotdb.db.pipe.resource.tsfile.PipeCompactionManager; import org.apache.iotdb.db.pipe.resource.tsfile.PipeTsFileResourceManager; -import org.apache.iotdb.db.pipe.resource.wal.PipeWALResourceManager; -import org.apache.iotdb.db.pipe.resource.wal.hardlink.PipeWALHardlinkResourceManager; -import org.apache.iotdb.db.pipe.resource.wal.selfhost.PipeWALSelfHostResourceManager; - -import java.util.concurrent.atomic.AtomicReference; public class PipeDataNodeResourceManager { private final PipeTsFileResourceManager pipeTsFileResourceManager; - private final AtomicReference pipeWALResourceManager; + private final PipeCompactionManager pipeCompactionManager; private final PipeSnapshotResourceManager pipeSnapshotResourceManager; private final PipeMemoryManager pipeMemoryManager; private final PipeLogManager pipeLogManager; + private final PipePhantomReferenceManager pipePhantomReferenceManager; public static PipeTsFileResourceManager tsfile() { return PipeResourceManagerHolder.INSTANCE.pipeTsFileResourceManager; } - public static PipeWALResourceManager wal() { - if (PipeResourceManagerHolder.INSTANCE.pipeWALResourceManager.get() == null) { - synchronized (PipeResourceManagerHolder.INSTANCE) { - if (PipeResourceManagerHolder.INSTANCE.pipeWALResourceManager.get() == null) { - PipeResourceManagerHolder.INSTANCE.pipeWALResourceManager.set( - PipeConfig.getInstance().getPipeHardLinkWALEnabled() - ? new PipeWALHardlinkResourceManager() - : new PipeWALSelfHostResourceManager()); - } - } - } - return PipeResourceManagerHolder.INSTANCE.pipeWALResourceManager.get(); + public static PipeCompactionManager compaction() { + return PipeResourceManagerHolder.INSTANCE.pipeCompactionManager; } public static PipeSnapshotResourceManager snapshot() { @@ -69,14 +57,19 @@ public static PipeLogManager log() { return PipeResourceManagerHolder.INSTANCE.pipeLogManager; } + public static PipePhantomReferenceManager ref() { + return PipeResourceManagerHolder.INSTANCE.pipePhantomReferenceManager; + } + ///////////////////////////// SINGLETON ///////////////////////////// private PipeDataNodeResourceManager() { pipeTsFileResourceManager = new PipeTsFileResourceManager(); - pipeWALResourceManager = new AtomicReference<>(); + pipeCompactionManager = new PipeCompactionManager(); pipeSnapshotResourceManager = new PipeDataNodeSnapshotResourceManager(); pipeMemoryManager = new PipeMemoryManager(); pipeLogManager = new PipeLogManager(); + pipePhantomReferenceManager = new PipeDataNodePhantomReferenceManager(); } private static class PipeResourceManagerHolder { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/InsertNodeMemoryEstimator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/InsertNodeMemoryEstimator.java new file mode 100644 index 0000000000000..c6746f13c45a8 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/InsertNodeMemoryEstimator.java @@ -0,0 +1,614 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.resource.memory; + +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.consensus.index.ProgressIndex; +import org.apache.iotdb.commons.path.PartialPath; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertMultiTabletsNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowsNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowsOfOneDeviceNode; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertTabletNode; + +import org.apache.tsfile.common.constant.TsFileConstant; +import org.apache.tsfile.encoding.encoder.TSEncodingBuilder; +import org.apache.tsfile.file.metadata.IDeviceID; +import org.apache.tsfile.file.metadata.enums.TSEncoding; +import org.apache.tsfile.utils.Binary; +import org.apache.tsfile.utils.BitMap; +import org.apache.tsfile.utils.RamUsageEstimator; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class InsertNodeMemoryEstimator { + + private static final Logger LOGGER = LoggerFactory.getLogger(InsertNodeMemoryEstimator.class); + + private static final String INSERT_TABLET_NODE = "InsertTabletNode"; + private static final String INSERT_ROW_NODE = "InsertRowNode"; + private static final String INSERT_ROWS_NODE = "InsertRowsNode"; + private static final String INSERT_ROWS_OF_ONE_DEVICE_NODE = "InsertRowsOfOneDeviceNode"; + private static final String INSERT_MULTI_TABLETS_NODE = "InsertMultiTabletsNode"; + private static final String RELATIONAL_INSERT_ROW_NODE = "RelationalInsertRowNode"; + + private static final long NUM_BYTES_OBJECT_REF = RamUsageEstimator.NUM_BYTES_OBJECT_REF; + private static final long NUM_BYTES_OBJECT_HEADER = RamUsageEstimator.NUM_BYTES_OBJECT_HEADER; + private static final long NUM_BYTES_ARRAY_HEADER = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER; + + private static final long TS_ENCODING_PLAIN_BUILDER_SIZE = + RamUsageEstimator.shallowSizeOf(TSEncodingBuilder.getEncodingBuilder(TSEncoding.PLAIN)); + + // =============================InsertNode================================== + + private static final long INSERT_TABLET_NODE_SIZE = + RamUsageEstimator.shallowSizeOfInstance(InsertTabletNode.class); + + private static final long INSERT_ROW_NODE_SIZE = + RamUsageEstimator.shallowSizeOfInstance(InsertRowNode.class); + + private static final long INSERT_ROWS_NODE_SIZE = + RamUsageEstimator.shallowSizeOfInstance(InsertRowsNode.class); + + private static final long INSERT_ROWS_OF_ONE_DEVICE_NODE_SIZE = + RamUsageEstimator.shallowSizeOfInstance(InsertRowsOfOneDeviceNode.class); + + private static final long INSERT_MULTI_TABLETS_NODE_SIZE = + RamUsageEstimator.shallowSizeOfInstance(InsertMultiTabletsNode.class); + + // ============================Device And Measurement=================================== + + private static final long PARTIAL_PATH_SIZE = + RamUsageEstimator.shallowSizeOfInstance(PartialPath.class); + + private static final long MEASUREMENT_SCHEMA_SIZE = + RamUsageEstimator.shallowSizeOfInstance(MeasurementSchema.class); + + // =============================Thrift================================== + + private static final long T_REGION_REPLICA_SET_SIZE = + RamUsageEstimator.shallowSizeOfInstance(TRegionReplicaSet.class); + + private static final long T_DATA_NODE_LOCATION_SIZE = + RamUsageEstimator.shallowSizeOfInstance(TDataNodeLocation.class); + + private static final long TS_STATUS_SIZE = + RamUsageEstimator.shallowSizeOfInstance(TSStatus.class); + + private static final long T_END_POINT_SIZE = + RamUsageEstimator.shallowSizeOfInstance(TEndPoint.class); + + private static final long T_CONSENSUS_GROUP_ID_SIZE = + RamUsageEstimator.shallowSizeOfInstance(TConsensusGroupId.class); + + // =============================BitMap================================== + + private static final long BIT_MAP_SIZE = RamUsageEstimator.shallowSizeOfInstance(BitMap.class); + + // ============================= Primitive Type Wrapper Classes ========= + + private static final long SIZE_OF_LONG = + RamUsageEstimator.alignObjectSize(Long.BYTES + NUM_BYTES_OBJECT_HEADER); + private static final long SIZE_OF_INT = + RamUsageEstimator.alignObjectSize(Integer.BYTES + NUM_BYTES_OBJECT_HEADER); + private static final long SIZE_OF_DOUBLE = + RamUsageEstimator.alignObjectSize(Double.BYTES + NUM_BYTES_OBJECT_HEADER); + private static final long SIZE_OF_FLOAT = + RamUsageEstimator.alignObjectSize(Float.BYTES + NUM_BYTES_OBJECT_HEADER); + private static final long SIZE_OF_BOOLEAN = + RamUsageEstimator.alignObjectSize(1 + NUM_BYTES_OBJECT_HEADER); + private static final long SIZE_OF_STRING = RamUsageEstimator.shallowSizeOfInstance(String.class); + + // The calculated result needs to be magnified by 1.3 times, which is 1.3 times different + // from the actual result because the properties of the parent class are not added. + private static final double INSERT_ROW_NODE_EXPANSION_FACTOR = 1.3; + + public static long sizeOf(final InsertNode insertNode) { + try { + final String className = insertNode.getClass().getSimpleName(); + switch (className) { + case INSERT_TABLET_NODE: + return sizeOfInsertTabletNode((InsertTabletNode) insertNode); + case INSERT_ROW_NODE: + return (long) + (sizeOfInsertRowNode((InsertRowNode) insertNode) * INSERT_ROW_NODE_EXPANSION_FACTOR); + case INSERT_ROWS_NODE: + return sizeOfInsertRowsNode((InsertRowsNode) insertNode); + case INSERT_ROWS_OF_ONE_DEVICE_NODE: + return sizeOfInsertRowsOfOneDeviceNode((InsertRowsOfOneDeviceNode) insertNode); + case INSERT_MULTI_TABLETS_NODE: + return sizeOfInsertMultiTabletsNode((InsertMultiTabletsNode) insertNode); + default: + return 0L; + } + } catch (Exception e) { + LOGGER.warn("Failed to estimate size for InsertNode: {}", e.getMessage(), e); + return 0L; + } + } + + // =============================InsertNode================================== + + private static long calculateFullInsertNodeSize(final InsertNode node) { + long size = 0; + // PartialPath + size += sizeOfPartialPath(node.getDevicePath()); + // MeasurementSchemas + size += sizeOfMeasurementSchemas(node.getMeasurementSchemas()); + // Measurement + size += sizeOfStringArray(node.getMeasurements()); + // dataTypes + size += RamUsageEstimator.shallowSizeOf(node.getDataTypes()); + // deviceID + if (node.isDeviceIDExists()) { + size += sizeOfIDeviceID(node.getDeviceID()); + } + // dataRegionReplicaSet + size += sizeOfTRegionReplicaSet(node.getRegionReplicaSet()); + // progressIndex + size += sizeOfProgressIndex(node.getProgressIndex()); + return size; + } + + private static long calculateInsertNodeSizeExcludingSchemas(final InsertNode node) { + // Measurement + long size = 2 * RamUsageEstimator.shallowSizeOf(node.getMeasurementSchemas()); + // dataTypes + size += RamUsageEstimator.shallowSizeOf(node.getDataTypes()); + // deviceID + if (node.isDeviceIDExists()) { + size += sizeOfIDeviceID(node.getDeviceID()); + } + // dataRegionReplicaSet + size += sizeOfTRegionReplicaSet(node.getRegionReplicaSet()); + // progressIndex + size += sizeOfProgressIndex(node.getProgressIndex()); + return size; + } + + private static long sizeOfInsertTabletNode(final InsertTabletNode node) { + long size = INSERT_TABLET_NODE_SIZE; + size += calculateFullInsertNodeSize(node); + size += RamUsageEstimator.sizeOf(node.getTimes()); + size += sizeOfBitMapArray(node.getBitMaps()); + size += sizeOfColumns(node.getColumns(), node.getMeasurementSchemas()); + final List range = node.getRange(); + if (range != null) { + size += NUM_BYTES_OBJECT_HEADER + SIZE_OF_INT * range.size(); + } + return size; + } + + private static long calculateInsertTabletNodeSizeExcludingSchemas(final InsertTabletNode node) { + long size = INSERT_TABLET_NODE_SIZE; + + size += calculateInsertNodeSizeExcludingSchemas(node); + + size += RamUsageEstimator.sizeOf(node.getTimes()); + + size += sizeOfBitMapArray(node.getBitMaps()); + + size += sizeOfColumns(node.getColumns(), node.getMeasurementSchemas()); + + final List range = node.getRange(); + if (range != null) { + size += NUM_BYTES_OBJECT_HEADER + SIZE_OF_INT * range.size(); + } + return size; + } + + private static long sizeOfInsertRowNode(final InsertRowNode node) { + long size = INSERT_ROW_NODE_SIZE; + size += calculateFullInsertNodeSize(node); + size += sizeOfValues(node.getValues(), node.getMeasurementSchemas()); + return size; + } + + private static long calculateInsertRowNodeExcludingSchemas(final InsertRowNode node) { + long size = INSERT_ROW_NODE_SIZE; + size += calculateInsertNodeSizeExcludingSchemas(node); + size += sizeOfValues(node.getValues(), node.getMeasurementSchemas()); + return size; + } + + private static long sizeOfInsertRowsNode(final InsertRowsNode node) { + long size = INSERT_ROWS_NODE_SIZE; + size += calculateFullInsertNodeSize(node); + final List rows = node.getInsertRowNodeList(); + final List indexList = node.getInsertRowNodeIndexList(); + if (rows != null && !rows.isEmpty()) { + // InsertRowNodeList + size += NUM_BYTES_OBJECT_HEADER; + size += + (calculateInsertRowNodeExcludingSchemas(rows.get(0)) + NUM_BYTES_OBJECT_REF) + * rows.size(); + size += sizeOfPartialPath(rows.get(0).getDevicePath()); + size += sizeOfMeasurementSchemas(rows.get(0).getMeasurementSchemas()); + // InsertRowNodeIndexList + size += NUM_BYTES_OBJECT_HEADER; + size += (long) indexList.size() * (SIZE_OF_INT + NUM_BYTES_OBJECT_REF); + } + return size; + } + + private static long sizeOfInsertRowsOfOneDeviceNode(final InsertRowsOfOneDeviceNode node) { + long size = INSERT_ROWS_OF_ONE_DEVICE_NODE_SIZE; + size += calculateFullInsertNodeSize(node); + final List rows = node.getInsertRowNodeList(); + final List indexList = node.getInsertRowNodeIndexList(); + if (rows != null && !rows.isEmpty()) { + // InsertRowNodeList + size += NUM_BYTES_OBJECT_HEADER; + size += + (calculateInsertRowNodeExcludingSchemas(rows.get(0)) + NUM_BYTES_OBJECT_REF) + * rows.size(); + size += sizeOfPartialPath(rows.get(0).getDevicePath()); + size += sizeOfMeasurementSchemas(rows.get(0).getMeasurementSchemas()); + // InsertRowNodeIndexList + size += NUM_BYTES_OBJECT_HEADER; + size += (long) indexList.size() * (SIZE_OF_INT + NUM_BYTES_OBJECT_REF); + } + // results + size += NUM_BYTES_OBJECT_HEADER; + for (Map.Entry entry : node.getResults().entrySet()) { + size += + Integer.BYTES + + sizeOfTSStatus(entry.getValue()) + + RamUsageEstimator.HASHTABLE_RAM_BYTES_PER_ENTRY; + } + return size; + } + + private static long sizeOfInsertMultiTabletsNode(final InsertMultiTabletsNode node) { + long size = INSERT_MULTI_TABLETS_NODE_SIZE; + size += calculateFullInsertNodeSize(node); + // dataTypes + size += RamUsageEstimator.shallowSizeOf(node.getDataTypes()); + + final List rows = node.getInsertTabletNodeList(); + final List indexList = node.getParentInsertTabletNodeIndexList(); + if (rows != null && !rows.isEmpty()) { + // InsertTabletNodeList + size += NUM_BYTES_OBJECT_HEADER; + size += + (calculateInsertTabletNodeSizeExcludingSchemas(rows.get(0)) + NUM_BYTES_OBJECT_REF) + * rows.size(); + size += sizeOfPartialPath(rows.get(0).getDevicePath()); + size += sizeOfMeasurementSchemas(rows.get(0).getMeasurementSchemas()); + // ParentInsertTabletNodeIndexList + size += NUM_BYTES_OBJECT_HEADER; + size += (long) indexList.size() * (SIZE_OF_INT + NUM_BYTES_OBJECT_REF); + } + // results + if (node.getResults() != null) { + size += NUM_BYTES_OBJECT_HEADER; + for (Map.Entry entry : node.getResults().entrySet()) { + size += + Integer.BYTES + + sizeOfTSStatus(entry.getValue()) + + RamUsageEstimator.HASHTABLE_RAM_BYTES_PER_ENTRY; + } + } + return size; + } + + // ============================Device And Measurement=================================== + + public static long sizeOfPartialPath(final PartialPath partialPath) { + if (partialPath == null) { + return 0L; + } + long size = PARTIAL_PATH_SIZE; + final String[] nodes = partialPath.getNodes(); + if (nodes != null) { + // Since fullPath may be lazy loaded, lazy loading will not be triggered here, so it is + // assumed that the memory size of fullPath is the same as that of nodes. + size += sizeOfStringArray(nodes) * 2; + size += TsFileConstant.PATH_SEPARATOR.length() * (nodes.length - 1) + NUM_BYTES_OBJECT_HEADER; + } + return size; + } + + public static long sizeOfMeasurementSchemas(final MeasurementSchema[] measurementSchemas) { + if (measurementSchemas == null) { + return 0L; + } + long size = + RamUsageEstimator.alignObjectSize( + NUM_BYTES_ARRAY_HEADER + NUM_BYTES_OBJECT_REF * measurementSchemas.length); + for (MeasurementSchema measurementSchema : measurementSchemas) { + size += sizeOfMeasurementSchema(measurementSchema); + } + return size; + } + + private static long sizeOfMeasurementSchema(final MeasurementSchema measurementSchema) { + if (measurementSchema == null) { + return 0L; + } + // Header + primitive + reference + long size = MEASUREMENT_SCHEMA_SIZE; + // measurementId + size += RamUsageEstimator.sizeOf(measurementSchema.getMeasurementId()); + // props + final Map props = measurementSchema.getProps(); + if (props != null) { + size += NUM_BYTES_OBJECT_HEADER; + for (Map.Entry entry : props.entrySet()) { + size += + RamUsageEstimator.sizeOf(entry.getKey()) + + RamUsageEstimator.sizeOf(entry.getValue()) + + RamUsageEstimator.HASHTABLE_RAM_BYTES_PER_ENTRY; + } + } + size += TS_ENCODING_PLAIN_BUILDER_SIZE; + return size; + } + + private static long sizeOfIDeviceID(final IDeviceID deviceID) { + return Objects.nonNull(deviceID) ? deviceID.ramBytesUsed() : 0L; + } + + // =============================Thrift================================== + + private static long sizeOfTRegionReplicaSet(final TRegionReplicaSet tRegionReplicaSet) { + if (tRegionReplicaSet == null) { + return 0L; + } + // Memory alignment of basic types and reference types in structures + long size = T_REGION_REPLICA_SET_SIZE; + // Memory calculation in reference type, cannot get exact value, roughly estimate + if (tRegionReplicaSet.isSetRegionId()) { + size += sizeOfTConsensusGroupId(); + } + if (tRegionReplicaSet.isSetDataNodeLocations()) { + size += NUM_BYTES_OBJECT_HEADER; + for (TDataNodeLocation tDataNodeLocation : tRegionReplicaSet.getDataNodeLocations()) { + size += sizeOfTDataNodeLocation(tDataNodeLocation); + } + } + return size; + } + + private static long sizeOfTConsensusGroupId() { + // objectHeader + type + id + return T_CONSENSUS_GROUP_ID_SIZE; + } + + private static long sizeOfTDataNodeLocation(final TDataNodeLocation tDataNodeLocation) { + if (tDataNodeLocation == null) { + return 0L; + } + long size = T_DATA_NODE_LOCATION_SIZE; + + size += sizeOfTEndPoint(tDataNodeLocation.getClientRpcEndPoint()); + size += sizeOfTEndPoint(tDataNodeLocation.getInternalEndPoint()); + size += sizeOfTEndPoint(tDataNodeLocation.getMPPDataExchangeEndPoint()); + size += sizeOfTEndPoint(tDataNodeLocation.getDataRegionConsensusEndPoint()); + size += sizeOfTEndPoint(tDataNodeLocation.getSchemaRegionConsensusEndPoint()); + + return size; + } + + private static long sizeOfTEndPoint(final TEndPoint tEndPoint) { + if (tEndPoint == null) { + return 0L; + } + // objectHeader + ip + port + long size = T_END_POINT_SIZE; + + size += RamUsageEstimator.sizeOf(tEndPoint.ip); + return size; + } + + private static long sizeOfTSStatus(final TSStatus tSStatus) { + if (tSStatus == null) { + return 0L; + } + long size = TS_STATUS_SIZE; + // message + if (tSStatus.isSetMessage()) { + size += RamUsageEstimator.sizeOf(tSStatus.message); + } + // ignore subStatus + // redirectNode + if (tSStatus.isSetRedirectNode()) { + size += sizeOfTEndPoint(tSStatus.redirectNode); + } + return size; + } + + // =============================ProgressIndex================================== + + private static long sizeOfProgressIndex(final ProgressIndex progressIndex) { + return Objects.nonNull(progressIndex) ? progressIndex.ramBytesUsed() : 0L; + } + + // =============================Write================================== + + private static long sizeOfBinary(final Binary binary) { + return Objects.nonNull(binary) ? binary.ramBytesUsed() : 0L; + } + + public static long sizeOfStringArray(final String[] values) { + return Objects.nonNull(values) ? RamUsageEstimator.sizeOf(values) : 0L; + } + + public static long sizeOfBitMapArray(BitMap[] bitMaps) { + if (bitMaps == null) { + return 0L; + } + long size = + RamUsageEstimator.alignObjectSize( + NUM_BYTES_ARRAY_HEADER + NUM_BYTES_OBJECT_REF * bitMaps.length); + for (BitMap bitMap : bitMaps) { + size += sizeOfBitMap(bitMap); + } + return size; + } + + private static long sizeOfBitMap(final BitMap bitMaps) { + if (bitMaps == null) { + return 0L; + } + long size = BIT_MAP_SIZE; + + size += + RamUsageEstimator.alignObjectSize(NUM_BYTES_ARRAY_HEADER + bitMaps.getByteArray().length); + return size; + } + + public static long sizeOfColumns( + final Object[] columns, final MeasurementSchema[] measurementSchemas) { + // Directly calculate if measurementSchemas are absent + if (Objects.isNull(measurementSchemas)) { + return RamUsageEstimator.shallowSizeOf(columns) + + Arrays.stream(columns) + .mapToLong(InsertNodeMemoryEstimator::getNumBytesUnknownObject) + .reduce(0L, Long::sum); + } + long size = + RamUsageEstimator.alignObjectSize( + NUM_BYTES_ARRAY_HEADER + NUM_BYTES_OBJECT_REF * columns.length); + for (int i = 0; i < columns.length; i++) { + if (measurementSchemas[i] == null || measurementSchemas[i].getType() == null) { + continue; + } + switch (measurementSchemas[i].getType()) { + case INT64: + case TIMESTAMP: + { + size += RamUsageEstimator.sizeOf((long[]) columns[i]); + break; + } + case DATE: + case INT32: + { + size += RamUsageEstimator.sizeOf((int[]) columns[i]); + break; + } + case DOUBLE: + { + size += RamUsageEstimator.sizeOf((double[]) columns[i]); + break; + } + case FLOAT: + { + size += RamUsageEstimator.sizeOf((float[]) columns[i]); + break; + } + case BOOLEAN: + { + size += RamUsageEstimator.sizeOf((boolean[]) columns[i]); + break; + } + case STRING: + case TEXT: + case BLOB: + { + size += getBinarySize((Binary[]) columns[i]); + break; + } + } + } + return size; + } + + private static long getNumBytesUnknownObject(final Object obj) { + return obj instanceof Binary[] + ? getBinarySize((Binary[]) obj) + : RamUsageEstimator.sizeOfObject(obj); + } + + private static long getBinarySize(final Binary[] binaries) { + if (binaries == null) { + return 0L; + } + + return RamUsageEstimator.shallowSizeOf(binaries) + + Arrays.stream(binaries) + .mapToLong(InsertNodeMemoryEstimator::sizeOfBinary) + .reduce(0L, Long::sum); + } + + public static long sizeOfValues( + final Object[] values, final MeasurementSchema[] measurementSchemas) { + // Directly calculate if measurementSchemas are absent + if (Objects.isNull(measurementSchemas)) { + return RamUsageEstimator.shallowSizeOf(values) + + Arrays.stream(values) + .mapToLong(InsertNodeMemoryEstimator::getNumBytesUnknownObject) + .reduce(0L, Long::sum); + } + long size = + RamUsageEstimator.alignObjectSize( + NUM_BYTES_ARRAY_HEADER + NUM_BYTES_OBJECT_REF * values.length); + for (int i = 0; i < values.length; i++) { + switch (measurementSchemas[i].getType()) { + case INT64: + case TIMESTAMP: + { + size += SIZE_OF_LONG; + break; + } + case DATE: + case INT32: + { + size += SIZE_OF_INT; + break; + } + case DOUBLE: + { + size += SIZE_OF_DOUBLE; + break; + } + case FLOAT: + { + size += SIZE_OF_FLOAT; + break; + } + case BOOLEAN: + { + size += SIZE_OF_BOOLEAN; + break; + } + case STRING: + case TEXT: + case BLOB: + { + final Binary binary = (Binary) values[i]; + size += sizeOfBinary(binary); + } + } + } + return size; + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeDynamicMemoryBlock.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeDynamicMemoryBlock.java new file mode 100644 index 0000000000000..4e33b87182821 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeDynamicMemoryBlock.java @@ -0,0 +1,156 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.resource.memory; + +import org.apache.tsfile.utils.Pair; + +import javax.validation.constraints.NotNull; + +import java.util.function.Consumer; +import java.util.stream.Stream; + +public class PipeDynamicMemoryBlock { + + private final PipeModelFixedMemoryBlock fixedMemoryBlock; + + private boolean isExpandable = true; + + private Consumer expand = null; + + private volatile boolean released = false; + + private volatile long memoryUsageInBytes; + + private volatile double historyMemoryEfficiency; + + private volatile double currentMemoryEfficiency; + + PipeDynamicMemoryBlock( + final @NotNull PipeModelFixedMemoryBlock fixedMemoryBlock, final long memoryUsageInBytes) { + this.memoryUsageInBytes = Math.min(memoryUsageInBytes, 0); + this.fixedMemoryBlock = fixedMemoryBlock; + } + + public long getMemoryUsageInBytes() { + return memoryUsageInBytes; + } + + public void setMemoryUsageInBytes(final long memoryUsageInBytes) { + this.memoryUsageInBytes = memoryUsageInBytes; + } + + public Pair getMemoryEfficiency() { + synchronized (fixedMemoryBlock) { + return new Pair<>(historyMemoryEfficiency, currentMemoryEfficiency); + } + } + + public void setExpandable(boolean expandable) { + isExpandable = expandable; + } + + public void setExpand(Consumer expand) { + this.expand = expand; + } + + public double getMemoryBlockUsageRatio() { + return (double) memoryUsageInBytes / fixedMemoryBlock.getMemoryUsageInBytes(); + } + + public double getFixedMemoryBlockUsageRatio() { + return (double) fixedMemoryBlock.getMemoryAllocatedInBytes() + / fixedMemoryBlock.getMemoryUsageInBytes(); + } + + public long canAllocateMemorySize() { + return fixedMemoryBlock.getMemoryUsageInBytes() - fixedMemoryBlock.getMemoryAllocatedInBytes(); + } + + public synchronized long getExpectedAverageAllocatedMemorySize() { + return fixedMemoryBlock.getMemoryUsageInBytes() / fixedMemoryBlock.getMemoryBlocks().size(); + } + + public void updateCurrentMemoryEfficiencyAdjustMem(double currentMemoryEfficiency) { + synchronized (fixedMemoryBlock) { + this.historyMemoryEfficiency = this.currentMemoryEfficiency; + if (Double.isNaN(currentMemoryEfficiency) + || Double.isInfinite(currentMemoryEfficiency) + || currentMemoryEfficiency < 0.0) { + currentMemoryEfficiency = 0.0; + } + this.currentMemoryEfficiency = Math.min(currentMemoryEfficiency, 1.0); + fixedMemoryBlock.dynamicallyAdjustMemory(this); + } + } + + public long getFixedMemoryCapacity() { + return fixedMemoryBlock.getMemoryUsageInBytes(); + } + + public void updateMemoryEfficiency( + double currentMemoryEfficiency, double historyMemoryEfficiency) { + synchronized (fixedMemoryBlock) { + if (Double.isNaN(currentMemoryEfficiency) + || Double.isInfinite(currentMemoryEfficiency) + || currentMemoryEfficiency < 0.0) { + currentMemoryEfficiency = 0.0; + } + + if (Double.isNaN(historyMemoryEfficiency) + || Double.isInfinite(historyMemoryEfficiency) + || historyMemoryEfficiency < 0.0) { + currentMemoryEfficiency = 0.0; + } + + this.historyMemoryEfficiency = Math.min(historyMemoryEfficiency, 1.0); + this.currentMemoryEfficiency = Math.min(currentMemoryEfficiency, 1.0); + } + } + + public Stream getMemoryBlocks() { + return fixedMemoryBlock.getMemoryBlocksStream(); + } + + public void applyForDynamicMemory(final long memoryUsageInBytes) { + fixedMemoryBlock.resetMemoryBlockSize(this, memoryUsageInBytes); + } + + public boolean isReleased() { + return released; + } + + public void close() { + if (released) { + return; + } + synchronized (fixedMemoryBlock) { + if (!released) { + fixedMemoryBlock.releaseMemory(this); + released = true; + } + } + } + + void doExpand() { + if (isExpandable && expand != null) { + expand.accept(this); + } + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeFixedMemoryBlock.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeFixedMemoryBlock.java new file mode 100644 index 0000000000000..5b2eaa7c09e4d --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeFixedMemoryBlock.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.resource.memory; + +import java.util.function.BiConsumer; +import java.util.function.LongUnaryOperator; + +public abstract class PipeFixedMemoryBlock extends PipeMemoryBlock { + + public PipeFixedMemoryBlock(long memoryUsageInBytes) { + super(memoryUsageInBytes); + } + + @Override + boolean shrink() { + return false; + } + + @Override + boolean expand() { + return false; + } + + @Override + public PipeMemoryBlock setShrinkMethod(LongUnaryOperator shrinkMethod) { + throw new UnsupportedOperationException( + "Shrink method is not supported in PipeFixedMemoryBlock"); + } + + @Override + public PipeMemoryBlock setShrinkCallback(BiConsumer shrinkCallback) { + throw new UnsupportedOperationException( + "Shrink callback is not supported in PipeFixedMemoryBlock"); + } + + @Override + public PipeMemoryBlock setExpandMethod(LongUnaryOperator extendMethod) { + throw new UnsupportedOperationException( + "Expand method is not supported in PipeFixedMemoryBlock"); + } + + @Override + public PipeMemoryBlock setExpandCallback(BiConsumer expandCallback) { + throw new UnsupportedOperationException( + "Expand callback is not supported in PipeFixedMemoryBlock"); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryBlock.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryBlock.java index 8ebe90b388c21..07f5b904523c8 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryBlock.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryBlock.java @@ -170,20 +170,32 @@ public String toString() { @Override public void close() { + boolean isInterrupted = false; + while (true) { try { if (lock.tryLock(50, TimeUnit.MICROSECONDS)) { try { pipeMemoryManager.release(this); + if (isInterrupted) { + LOGGER.warn("{} is released after thread interruption.", this); + } break; } finally { lock.unlock(); } } } catch (final InterruptedException e) { - Thread.currentThread().interrupt(); + // Each time the close task is run, it means that the interrupt status left by the previous + // tryLock does not need to be retained. Otherwise, it will lead to an infinite loop. + isInterrupted = true; LOGGER.warn("Interrupted while waiting for the lock.", e); } } + + // Restore the interrupt status of the current thread + if (isInterrupted) { + Thread.currentThread().interrupt(); + } } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryBlockType.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryBlockType.java new file mode 100644 index 0000000000000..846fc7dd1ce17 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryBlockType.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.resource.memory; + +public enum PipeMemoryBlockType { + NORMAL, + TABLET, + TS_FILE, + BATCH, + WAL +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryManager.java index 8c7bc2c8ed259..8a87cf8bad0a0 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryManager.java @@ -23,8 +23,8 @@ import org.apache.iotdb.commons.pipe.config.PipeConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; +import org.apache.iotdb.db.pipe.resource.memory.strategy.ThresholdAllocationStrategy; -import org.apache.tsfile.write.record.Tablet; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -41,12 +41,6 @@ public class PipeMemoryManager { private static final boolean PIPE_MEMORY_MANAGEMENT_ENABLED = PipeConfig.getInstance().getPipeMemoryManagementEnabled(); - - private static final int MEMORY_ALLOCATE_MAX_RETRIES = - PipeConfig.getInstance().getPipeMemoryAllocateMaxRetries(); - private static final long MEMORY_ALLOCATE_RETRY_INTERVAL_IN_MS = - PipeConfig.getInstance().getPipeMemoryAllocateRetryIntervalInMs(); - private static final long TOTAL_MEMORY_SIZE_IN_BYTES = IoTDBDescriptor.getInstance().getConfig().getAllocateMemoryForPipe(); private static final long MEMORY_ALLOCATE_MIN_SIZE_IN_BYTES = @@ -54,82 +48,248 @@ public class PipeMemoryManager { private long usedMemorySizeInBytes; - // To avoid too much parsed events causing OOM. If total tablet memory size exceeds this - // threshold, allocations of memory block for tablets will be rejected. - private static final double TABLET_MEMORY_REJECT_THRESHOLD = - PipeConfig.getInstance().getPipeDataStructureTabletMemoryBlockAllocationRejectThreshold(); + private static final double EXCEED_PROTECT_THRESHOLD = 0.95; + private volatile long usedMemorySizeInBytesOfTablets; + private volatile long usedMemorySizeInBytesOfTsFiles; + + // Only non-zero memory blocks will be added to this set. private final Set allocatedBlocks = new HashSet<>(); public PipeMemoryManager() { PipeDataNodeAgent.runtime() .registerPeriodicalJob( "PipeMemoryManager#tryExpandAll()", - this::tryExpandAll, + this::tryExpandAllAndCheckConsistency, PipeConfig.getInstance().getPipeMemoryExpanderIntervalSeconds()); } + // NOTE: Here we unify the memory threshold judgment for tablet and tsfile memory block, because + // introducing too many heuristic rules not conducive to flexible dynamic adjustment of memory + // configuration: + // 1. Proportion of memory occupied by tablet memory block: [TABLET_MEMORY_REJECT_THRESHOLD / 2, + // TABLET_MEMORY_REJECT_THRESHOLD + TS_FILE_MEMORY_REJECT_THRESHOLD / 2] + // 2. Proportion of memory occupied by tsfile memory block: [TS_FILE_MEMORY_REJECT_THRESHOLD / 2, + // TS_FILE_MEMORY_REJECT_THRESHOLD + TABLET_MEMORY_REJECT_THRESHOLD / 2] + // 3. The sum of the memory proportion occupied by the tablet memory block and the tsfile memory + // block does not exceed TABLET_MEMORY_REJECT_THRESHOLD + TS_FILE_MEMORY_REJECT_THRESHOLD + + private static double allowedMaxMemorySizeInBytesOfTabletsAndTsFiles() { + return (PipeConfig.getInstance() + .getPipeDataStructureTabletMemoryBlockAllocationRejectThreshold() + + PipeConfig.getInstance() + .getPipeDataStructureTsFileMemoryBlockAllocationRejectThreshold()) + * getTotalNonFloatingMemorySizeInBytes(); + } + + private static double allowedMaxMemorySizeInBytesOfTablets() { + return (PipeConfig.getInstance() + .getPipeDataStructureTabletMemoryBlockAllocationRejectThreshold() + + PipeConfig.getInstance() + .getPipeDataStructureTsFileMemoryBlockAllocationRejectThreshold() + / 2) + * getTotalNonFloatingMemorySizeInBytes(); + } + + private static double allowedMaxMemorySizeInBytesOfTsTiles() { + return (PipeConfig.getInstance() + .getPipeDataStructureTsFileMemoryBlockAllocationRejectThreshold() + + PipeConfig.getInstance() + .getPipeDataStructureTabletMemoryBlockAllocationRejectThreshold() + / 2) + * getTotalNonFloatingMemorySizeInBytes(); + } + + public boolean isEnough4TabletParsing() { + return (double) usedMemorySizeInBytesOfTablets + (double) usedMemorySizeInBytesOfTsFiles + < EXCEED_PROTECT_THRESHOLD * allowedMaxMemorySizeInBytesOfTabletsAndTsFiles() + && (double) usedMemorySizeInBytesOfTablets + < EXCEED_PROTECT_THRESHOLD * allowedMaxMemorySizeInBytesOfTablets(); + } + + private boolean isHardEnough4TabletParsing() { + return (double) usedMemorySizeInBytesOfTablets + (double) usedMemorySizeInBytesOfTsFiles + < allowedMaxMemorySizeInBytesOfTabletsAndTsFiles() + && (double) usedMemorySizeInBytesOfTablets < allowedMaxMemorySizeInBytesOfTablets(); + } + + public boolean isEnough4TsFileSlicing() { + return (double) usedMemorySizeInBytesOfTablets + (double) usedMemorySizeInBytesOfTsFiles + < EXCEED_PROTECT_THRESHOLD * allowedMaxMemorySizeInBytesOfTabletsAndTsFiles() + && (double) usedMemorySizeInBytesOfTsFiles + < EXCEED_PROTECT_THRESHOLD * allowedMaxMemorySizeInBytesOfTsTiles(); + } + + private boolean isHardEnough4TsFileSlicing() { + return (double) usedMemorySizeInBytesOfTablets + (double) usedMemorySizeInBytesOfTsFiles + < allowedMaxMemorySizeInBytesOfTabletsAndTsFiles() + && (double) usedMemorySizeInBytesOfTsFiles < allowedMaxMemorySizeInBytesOfTsTiles(); + } + public synchronized PipeMemoryBlock forceAllocate(long sizeInBytes) throws PipeRuntimeOutOfMemoryCriticalException { - return forceAllocate(sizeInBytes, false); + if (!PIPE_MEMORY_MANAGEMENT_ENABLED) { + // No need to calculate the tablet size, skip it to save time + return new PipeMemoryBlock(0); + } + + if (sizeInBytes == 0) { + return registerMemoryBlock(0); + } + + return forceAllocateWithRetry(sizeInBytes, PipeMemoryBlockType.NORMAL); } - public PipeTabletMemoryBlock forceAllocateWithRetry(Tablet tablet) + public PipeTabletMemoryBlock forceAllocateForTabletWithRetry(long tabletSizeInBytes) throws PipeRuntimeOutOfMemoryCriticalException { if (!PIPE_MEMORY_MANAGEMENT_ENABLED) { // No need to calculate the tablet size, skip it to save time return new PipeTabletMemoryBlock(0); } - for (int i = 1; i <= MEMORY_ALLOCATE_MAX_RETRIES; i++) { - if ((double) usedMemorySizeInBytesOfTablets / TOTAL_MEMORY_SIZE_IN_BYTES - < TABLET_MEMORY_REJECT_THRESHOLD) { + if (tabletSizeInBytes == 0) { + return (PipeTabletMemoryBlock) registerMemoryBlock(0, PipeMemoryBlockType.TABLET); + } + + for (int i = 1, size = PipeConfig.getInstance().getPipeMemoryAllocateMaxRetries(); + i <= size; + i++) { + if (isHardEnough4TabletParsing()) { break; } try { - Thread.sleep(MEMORY_ALLOCATE_RETRY_INTERVAL_IN_MS); + Thread.sleep(PipeConfig.getInstance().getPipeMemoryAllocateRetryIntervalInMs()); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); LOGGER.warn("forceAllocateWithRetry: interrupted while waiting for available memory", ex); } } - if ((double) usedMemorySizeInBytesOfTablets / TOTAL_MEMORY_SIZE_IN_BYTES - >= TABLET_MEMORY_REJECT_THRESHOLD) { + if (!isHardEnough4TabletParsing()) { throw new PipeRuntimeOutOfMemoryCriticalException( String.format( "forceAllocateForTablet: failed to allocate because there's too much memory for tablets, " - + "total memory size %d bytes, used memory for tablet size %d bytes,", - TOTAL_MEMORY_SIZE_IN_BYTES, usedMemorySizeInBytesOfTablets)); + + "total memory size %d bytes, used memory for tablet size %d bytes, requested memory size %d bytes", + getTotalNonFloatingMemorySizeInBytes(), + usedMemorySizeInBytesOfTablets, + tabletSizeInBytes)); } synchronized (this) { final PipeTabletMemoryBlock block = (PipeTabletMemoryBlock) - forceAllocate(PipeMemoryWeightUtil.calculateTabletSizeInBytes(tablet), true); + forceAllocateWithRetry(tabletSizeInBytes, PipeMemoryBlockType.TABLET); usedMemorySizeInBytesOfTablets += block.getMemoryUsageInBytes(); return block; } } - private PipeMemoryBlock forceAllocate(long sizeInBytes, boolean isForTablet) + public PipeTsFileMemoryBlock forceAllocateForTsFileWithRetry(long tsFileSizeInBytes) throws PipeRuntimeOutOfMemoryCriticalException { if (!PIPE_MEMORY_MANAGEMENT_ENABLED) { - return isForTablet - ? new PipeTabletMemoryBlock(sizeInBytes) - : new PipeMemoryBlock(sizeInBytes); + return new PipeTsFileMemoryBlock(0); + } + + if (tsFileSizeInBytes == 0) { + return (PipeTsFileMemoryBlock) registerMemoryBlock(0, PipeMemoryBlockType.TS_FILE); + } + + for (int i = 1, size = PipeConfig.getInstance().getPipeMemoryAllocateMaxRetries(); + i <= size; + i++) { + if (isHardEnough4TsFileSlicing()) { + break; + } + + try { + Thread.sleep(PipeConfig.getInstance().getPipeMemoryAllocateRetryIntervalInMs()); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + LOGGER.warn("forceAllocateWithRetry: interrupted while waiting for available memory", ex); + } + } + + if (!isHardEnough4TsFileSlicing()) { + throw new PipeRuntimeOutOfMemoryCriticalException( + String.format( + "forceAllocateForTsFile: failed to allocate because there's too much memory for tsfiles, " + + "total memory size %d bytes, used memory for tsfile size %d bytes, requested memory size %d bytes", + getTotalNonFloatingMemorySizeInBytes(), + usedMemorySizeInBytesOfTsFiles, + tsFileSizeInBytes)); + } + + synchronized (this) { + final PipeTsFileMemoryBlock block = + (PipeTsFileMemoryBlock) + forceAllocateWithRetry(tsFileSizeInBytes, PipeMemoryBlockType.TS_FILE); + usedMemorySizeInBytesOfTsFiles += block.getMemoryUsageInBytes(); + return block; + } + } + + public PipeModelFixedMemoryBlock forceAllocateForModelFixedMemoryBlock( + long fixedSizeInBytes, PipeMemoryBlockType type) + throws PipeRuntimeOutOfMemoryCriticalException { + if (!PIPE_MEMORY_MANAGEMENT_ENABLED) { + return new PipeModelFixedMemoryBlock(Long.MAX_VALUE, new ThresholdAllocationStrategy()); + } + + if (fixedSizeInBytes == 0) { + return (PipeModelFixedMemoryBlock) registerMemoryBlock(0, type); + } + + for (int i = 1, size = PipeConfig.getInstance().getPipeMemoryAllocateMaxRetries(); + i <= size; + i++) { + if (getFreeMemorySizeInBytes() >= fixedSizeInBytes) { + break; + } + + try { + Thread.sleep(PipeConfig.getInstance().getPipeMemoryAllocateRetryIntervalInMs()); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + LOGGER.warn("forceAllocateWithRetry: interrupted while waiting for available memory", ex); + } + } + + synchronized (this) { + if (getFreeMemorySizeInBytes() < fixedSizeInBytes) { + return (PipeModelFixedMemoryBlock) forceAllocateWithRetry(getFreeMemorySizeInBytes(), type); + } + + return (PipeModelFixedMemoryBlock) forceAllocateWithRetry(fixedSizeInBytes, type); + } + } + + private PipeMemoryBlock forceAllocateWithRetry(long sizeInBytes, PipeMemoryBlockType type) + throws PipeRuntimeOutOfMemoryCriticalException { + if (!PIPE_MEMORY_MANAGEMENT_ENABLED) { + switch (type) { + case TABLET: + return new PipeTabletMemoryBlock(sizeInBytes); + case TS_FILE: + return new PipeTsFileMemoryBlock(sizeInBytes); + case BATCH: + case WAL: + return new PipeModelFixedMemoryBlock(sizeInBytes, new ThresholdAllocationStrategy()); + default: + return new PipeMemoryBlock(sizeInBytes); + } } - for (int i = 1; i <= MEMORY_ALLOCATE_MAX_RETRIES; i++) { - if (TOTAL_MEMORY_SIZE_IN_BYTES - usedMemorySizeInBytes >= sizeInBytes) { - return registerMemoryBlock(sizeInBytes, isForTablet); + final int memoryAllocateMaxRetries = PipeConfig.getInstance().getPipeMemoryAllocateMaxRetries(); + for (int i = 1; i <= memoryAllocateMaxRetries; i++) { + if (getTotalNonFloatingMemorySizeInBytes() - usedMemorySizeInBytes >= sizeInBytes) { + return registerMemoryBlock(sizeInBytes, type); } try { - tryShrink4Allocate(sizeInBytes); - this.wait(MEMORY_ALLOCATE_RETRY_INTERVAL_IN_MS); + tryShrinkUntilFreeMemorySatisfy(sizeInBytes); + this.wait(PipeConfig.getInstance().getPipeMemoryAllocateRetryIntervalInMs()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); LOGGER.warn("forceAllocate: interrupted while waiting for available memory", e); @@ -141,20 +301,91 @@ private PipeMemoryBlock forceAllocate(long sizeInBytes, boolean isForTablet) "forceAllocate: failed to allocate memory after %d retries, " + "total memory size %d bytes, used memory size %d bytes, " + "requested memory size %d bytes", - MEMORY_ALLOCATE_MAX_RETRIES, - TOTAL_MEMORY_SIZE_IN_BYTES, + memoryAllocateMaxRetries, + getTotalNonFloatingMemorySizeInBytes(), + usedMemorySizeInBytes, + sizeInBytes)); + } + + public synchronized void forceResize(PipeMemoryBlock block, long targetSize) { + if (block == null || block.isReleased()) { + LOGGER.warn("forceResize: cannot resize a null or released memory block"); + return; + } + + if (!PIPE_MEMORY_MANAGEMENT_ENABLED) { + block.setMemoryUsageInBytes(targetSize); + return; + } + + final long oldSize = block.getMemoryUsageInBytes(); + if (oldSize == 0) { + // If the memory block is not registered, we need to register it first. + // Otherwise, the memory usage will be inconsistent. + // See registerMemoryBlock for more details. + allocatedBlocks.add(block); + } + + if (oldSize >= targetSize) { + usedMemorySizeInBytes -= oldSize - targetSize; + if (block instanceof PipeTabletMemoryBlock) { + usedMemorySizeInBytesOfTablets -= oldSize - targetSize; + } + if (block instanceof PipeTsFileMemoryBlock) { + usedMemorySizeInBytesOfTsFiles -= oldSize - targetSize; + } + block.setMemoryUsageInBytes(targetSize); + + // If no memory is used in the block, we can remove it from the allocated blocks. + if (targetSize == 0) { + allocatedBlocks.remove(block); + } + return; + } + + long sizeInBytes = targetSize - oldSize; + final int memoryAllocateMaxRetries = PipeConfig.getInstance().getPipeMemoryAllocateMaxRetries(); + for (int i = 1; i <= memoryAllocateMaxRetries; i++) { + if (getTotalNonFloatingMemorySizeInBytes() - usedMemorySizeInBytes >= sizeInBytes) { + usedMemorySizeInBytes += sizeInBytes; + if (block instanceof PipeTabletMemoryBlock) { + usedMemorySizeInBytesOfTablets += sizeInBytes; + } + if (block instanceof PipeTsFileMemoryBlock) { + usedMemorySizeInBytesOfTsFiles += sizeInBytes; + } + block.setMemoryUsageInBytes(targetSize); + return; + } + + try { + tryShrinkUntilFreeMemorySatisfy(sizeInBytes); + this.wait(PipeConfig.getInstance().getPipeMemoryAllocateRetryIntervalInMs()); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + LOGGER.warn("forceResize: interrupted while waiting for available memory", e); + } + } + + throw new PipeRuntimeOutOfMemoryCriticalException( + String.format( + "forceResize: failed to allocate memory after %d retries, " + + "total memory size %d bytes, used memory size %d bytes, " + + "requested memory size %d bytes", + memoryAllocateMaxRetries, + getTotalNonFloatingMemorySizeInBytes(), usedMemorySizeInBytes, sizeInBytes)); } /** - * Allocate a {@link PipeMemoryBlock} for pipe only if memory already used is less than the - * specified threshold. + * Allocate a {@link PipeMemoryBlock} for pipe only if memory used after allocation is less than + * the specified threshold. * * @param sizeInBytes size of memory needed to allocate * @param usedThreshold proportion of memory used, ranged from 0.0 to 1.0 - * @return {@code null} if the proportion of memory already used exceeds {@code usedThreshold}. - * Will return a memory block otherwise. + * @return {@code null} if the proportion of memory used after allocation exceeds {@code + * usedThreshold}. Will return a memory block otherwise. */ public synchronized PipeMemoryBlock forceAllocateIfSufficient( long sizeInBytes, float usedThreshold) { @@ -166,18 +397,15 @@ public synchronized PipeMemoryBlock forceAllocateIfSufficient( return new PipeMemoryBlock(sizeInBytes); } - if (TOTAL_MEMORY_SIZE_IN_BYTES - usedMemorySizeInBytes >= sizeInBytes - && (float) usedMemorySizeInBytes / TOTAL_MEMORY_SIZE_IN_BYTES < usedThreshold) { + if (sizeInBytes == 0) { + return registerMemoryBlock(0); + } + + if ((float) (usedMemorySizeInBytes + sizeInBytes) + <= getTotalNonFloatingMemorySizeInBytes() * usedThreshold) { return forceAllocate(sizeInBytes); - } else { - long memoryToShrink = - Math.max( - usedMemorySizeInBytes - (long) (TOTAL_MEMORY_SIZE_IN_BYTES * usedThreshold), - sizeInBytes); - if (tryShrink4Allocate(memoryToShrink)) { - return forceAllocate(sizeInBytes); - } } + return null; } @@ -191,19 +419,20 @@ public synchronized PipeMemoryBlock tryAllocate( return new PipeMemoryBlock(sizeInBytes); } - if (TOTAL_MEMORY_SIZE_IN_BYTES - usedMemorySizeInBytes >= sizeInBytes) { + if (sizeInBytes == 0 + || getTotalNonFloatingMemorySizeInBytes() - usedMemorySizeInBytes >= sizeInBytes) { return registerMemoryBlock(sizeInBytes); } long sizeToAllocateInBytes = sizeInBytes; while (sizeToAllocateInBytes > MEMORY_ALLOCATE_MIN_SIZE_IN_BYTES) { - if (TOTAL_MEMORY_SIZE_IN_BYTES - usedMemorySizeInBytes >= sizeToAllocateInBytes) { + if (getTotalNonFloatingMemorySizeInBytes() - usedMemorySizeInBytes >= sizeToAllocateInBytes) { LOGGER.info( "tryAllocate: allocated memory, " + "total memory size {} bytes, used memory size {} bytes, " - + "original requested memory size {} bytes," + + "original requested memory size {} bytes, " + "actual requested memory size {} bytes", - TOTAL_MEMORY_SIZE_IN_BYTES, + getTotalNonFloatingMemorySizeInBytes(), usedMemorySizeInBytes, sizeInBytes, sizeToAllocateInBytes); @@ -216,13 +445,13 @@ public synchronized PipeMemoryBlock tryAllocate( MEMORY_ALLOCATE_MIN_SIZE_IN_BYTES); } - if (tryShrink4Allocate(sizeToAllocateInBytes)) { + if (tryShrinkUntilFreeMemorySatisfy(sizeToAllocateInBytes)) { LOGGER.info( "tryAllocate: allocated memory, " + "total memory size {} bytes, used memory size {} bytes, " - + "original requested memory size {} bytes," + + "original requested memory size {} bytes, " + "actual requested memory size {} bytes", - TOTAL_MEMORY_SIZE_IN_BYTES, + getTotalNonFloatingMemorySizeInBytes(), usedMemorySizeInBytes, sizeInBytes, sizeToAllocateInBytes); @@ -232,7 +461,7 @@ public synchronized PipeMemoryBlock tryAllocate( "tryAllocate: failed to allocate memory, " + "total memory size {} bytes, used memory size {} bytes, " + "requested memory size {} bytes", - TOTAL_MEMORY_SIZE_IN_BYTES, + getTotalNonFloatingMemorySizeInBytes(), usedMemorySizeInBytes, sizeInBytes); return registerMemoryBlock(0); @@ -245,11 +474,15 @@ public synchronized boolean tryAllocate( return false; } - if (TOTAL_MEMORY_SIZE_IN_BYTES - usedMemorySizeInBytes >= memoryInBytesNeededToBeAllocated) { + if (getTotalNonFloatingMemorySizeInBytes() - usedMemorySizeInBytes + >= memoryInBytesNeededToBeAllocated) { usedMemorySizeInBytes += memoryInBytesNeededToBeAllocated; if (block instanceof PipeTabletMemoryBlock) { usedMemorySizeInBytesOfTablets += memoryInBytesNeededToBeAllocated; } + if (block instanceof PipeTsFileMemoryBlock) { + usedMemorySizeInBytesOfTsFiles += memoryInBytesNeededToBeAllocated; + } block.setMemoryUsageInBytes(block.getMemoryUsageInBytes() + memoryInBytesNeededToBeAllocated); return true; } @@ -258,19 +491,40 @@ public synchronized boolean tryAllocate( } private PipeMemoryBlock registerMemoryBlock(long sizeInBytes) { - return registerMemoryBlock(sizeInBytes, false); + return registerMemoryBlock(sizeInBytes, PipeMemoryBlockType.NORMAL); } - private PipeMemoryBlock registerMemoryBlock(long sizeInBytes, boolean isForTablet) { - usedMemorySizeInBytes += sizeInBytes; + private PipeMemoryBlock registerMemoryBlock(long sizeInBytes, PipeMemoryBlockType type) { + final PipeMemoryBlock returnedMemoryBlock; + switch (type) { + case TABLET: + returnedMemoryBlock = new PipeTabletMemoryBlock(sizeInBytes); + break; + case TS_FILE: + returnedMemoryBlock = new PipeTsFileMemoryBlock(sizeInBytes); + break; + case BATCH: + case WAL: + returnedMemoryBlock = + new PipeModelFixedMemoryBlock(sizeInBytes, new ThresholdAllocationStrategy()); + break; + default: + returnedMemoryBlock = new PipeMemoryBlock(sizeInBytes); + break; + } + + // For memory block whose size is 0, we do not need to add it to the allocated blocks now. + // It's good for performance and will not trigger concurrent issues. + // If forceResize is called on it, we will add it to the allocated blocks. + if (sizeInBytes > 0) { + usedMemorySizeInBytes += sizeInBytes; + allocatedBlocks.add(returnedMemoryBlock); + } - final PipeMemoryBlock returnedMemoryBlock = - isForTablet ? new PipeTabletMemoryBlock(sizeInBytes) : new PipeMemoryBlock(sizeInBytes); - allocatedBlocks.add(returnedMemoryBlock); return returnedMemoryBlock; } - private boolean tryShrink4Allocate(long sizeInBytes) { + private boolean tryShrinkUntilFreeMemorySatisfy(long sizeInBytes) { final List shuffledBlocks = new ArrayList<>(allocatedBlocks); Collections.shuffle(shuffledBlocks); @@ -279,7 +533,7 @@ private boolean tryShrink4Allocate(long sizeInBytes) { for (final PipeMemoryBlock block : shuffledBlocks) { if (block.shrink()) { hasAtLeastOneBlockShrinkable = true; - if (TOTAL_MEMORY_SIZE_IN_BYTES - usedMemorySizeInBytes >= sizeInBytes) { + if (getTotalNonFloatingMemorySizeInBytes() - usedMemorySizeInBytes >= sizeInBytes) { return true; } } @@ -290,8 +544,44 @@ private boolean tryShrink4Allocate(long sizeInBytes) { } } - public synchronized void tryExpandAll() { + public synchronized void tryExpandAllAndCheckConsistency() { allocatedBlocks.forEach(PipeMemoryBlock::expand); + + long blockSum = + allocatedBlocks.stream().mapToLong(PipeMemoryBlock::getMemoryUsageInBytes).sum(); + if (blockSum != usedMemorySizeInBytes) { + LOGGER.warn( + "tryExpandAllAndCheckConsistency: memory usage is not consistent with allocated blocks," + + " usedMemorySizeInBytes is {} but sum of all blocks is {}", + usedMemorySizeInBytes, + blockSum); + } + + long tabletBlockSum = + allocatedBlocks.stream() + .filter(PipeTabletMemoryBlock.class::isInstance) + .mapToLong(PipeMemoryBlock::getMemoryUsageInBytes) + .sum(); + if (tabletBlockSum != usedMemorySizeInBytesOfTablets) { + LOGGER.warn( + "tryExpandAllAndCheckConsistency: memory usage of tablets is not consistent with allocated blocks," + + " usedMemorySizeInBytesOfTablets is {} but sum of all tablet blocks is {}", + usedMemorySizeInBytesOfTablets, + tabletBlockSum); + } + + long tsFileBlockSum = + allocatedBlocks.stream() + .filter(PipeTsFileMemoryBlock.class::isInstance) + .mapToLong(PipeMemoryBlock::getMemoryUsageInBytes) + .sum(); + if (tsFileBlockSum != usedMemorySizeInBytesOfTsFiles) { + LOGGER.warn( + "tryExpandAllAndCheckConsistency: memory usage of tsfiles is not consistent with allocated blocks," + + " usedMemorySizeInBytesOfTsFiles is {} but sum of all tsfile blocks is {}", + usedMemorySizeInBytesOfTsFiles, + tsFileBlockSum); + } } public synchronized void release(PipeMemoryBlock block) { @@ -304,6 +594,9 @@ public synchronized void release(PipeMemoryBlock block) { if (block instanceof PipeTabletMemoryBlock) { usedMemorySizeInBytesOfTablets -= block.getMemoryUsageInBytes(); } + if (block instanceof PipeTsFileMemoryBlock) { + usedMemorySizeInBytesOfTsFiles -= block.getMemoryUsageInBytes(); + } block.markAsReleased(); this.notifyAll(); @@ -318,6 +611,9 @@ public synchronized boolean release(PipeMemoryBlock block, long sizeInBytes) { if (block instanceof PipeTabletMemoryBlock) { usedMemorySizeInBytesOfTablets -= sizeInBytes; } + if (block instanceof PipeTsFileMemoryBlock) { + usedMemorySizeInBytesOfTsFiles -= sizeInBytes; + } block.setMemoryUsageInBytes(block.getMemoryUsageInBytes() - sizeInBytes); this.notifyAll(); @@ -329,7 +625,37 @@ public long getUsedMemorySizeInBytes() { return usedMemorySizeInBytes; } - public long getTotalMemorySizeInBytes() { + public long getUsedMemorySizeInBytesOfTablets() { + return usedMemorySizeInBytesOfTablets; + } + + public long getUsedMemorySizeInBytesOfTsFiles() { + return usedMemorySizeInBytesOfTsFiles; + } + + public long getAllocatedMemorySizeInBytesOfBatch() { + return (long) + (PipeConfig.getInstance().getPipeDataStructureBatchMemoryProportion() + * getTotalNonFloatingMemorySizeInBytes()); + } + + public long getFreeMemorySizeInBytes() { + return TOTAL_MEMORY_SIZE_IN_BYTES - usedMemorySizeInBytes; + } + + public static long getTotalNonFloatingMemorySizeInBytes() { + return (long) + (TOTAL_MEMORY_SIZE_IN_BYTES + * (1 - PipeConfig.getInstance().getPipeTotalFloatingMemoryProportion())); + } + + public static long getTotalFloatingMemorySizeInBytes() { + return (long) + (TOTAL_MEMORY_SIZE_IN_BYTES + * PipeConfig.getInstance().getPipeTotalFloatingMemoryProportion()); + } + + public static long getTotalMemorySizeInBytes() { return TOTAL_MEMORY_SIZE_IN_BYTES; } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryWeightUtil.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryWeightUtil.java index f93d8f33fbf15..d82c26144d6c6 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryWeightUtil.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryWeightUtil.java @@ -19,11 +19,18 @@ package org.apache.iotdb.db.pipe.resource.memory; +import org.apache.iotdb.commons.pipe.config.PipeConfig; +import org.apache.iotdb.db.pipe.event.common.row.PipeRow; import org.apache.iotdb.db.utils.MemUtils; import org.apache.tsfile.enums.TSDataType; import org.apache.tsfile.file.metadata.IDeviceID; +import org.apache.tsfile.read.common.BatchData; +import org.apache.tsfile.read.common.Field; +import org.apache.tsfile.read.common.RowRecord; import org.apache.tsfile.utils.Binary; +import org.apache.tsfile.utils.Pair; +import org.apache.tsfile.utils.TsPrimitiveType; import org.apache.tsfile.write.record.Tablet; import org.apache.tsfile.write.schema.MeasurementSchema; @@ -62,6 +69,128 @@ public static long memoryOfIDeviceID2StrList(Map> map) { return usageInBytes + 16L; // add the overhead of map } + /** + * Given a row of a tablet, calculate the row count and memory cost of the pipe tablet that will + * be constructed according to config. + * + * @return left is the row count of tablet, right is the memory cost of tablet in bytes + */ + public static Pair calculateTabletRowCountAndMemory(RowRecord row) { + int totalSizeInBytes = 0; + + // timestamp + totalSizeInBytes += 8L; + + // values + final List fields = row.getFields(); + int schemaCount = 0; + if (fields != null) { + schemaCount = fields.size(); + for (final Field field : fields) { + if (field == null) { + continue; + } + + final TSDataType tsDataType = field.getDataType(); + if (tsDataType == null) { + continue; + } + + if (tsDataType.isBinary()) { + final Binary binary = field.getBinaryV(); + totalSizeInBytes += binary == null ? 8 : binary.ramBytesUsed(); + } else { + totalSizeInBytes += + roundUpToMultiple(TsPrimitiveType.getByType(tsDataType).getSize() + 8, 8); + } + } + } + + return calculateTabletRowCountAndMemoryBySize(totalSizeInBytes, schemaCount); + } + + /** + * Given a BatchData, calculate the row count and memory cost of the pipe tablet that will be + * constructed according to config. + * + * @return left is the row count of tablet, right is the memory cost of tablet in bytes + */ + public static Pair calculateTabletRowCountAndMemory(BatchData batchData) { + int totalSizeInBytes = 0; + int schemaCount = 0; + + // timestamp + totalSizeInBytes += 8L; + + // values + final TSDataType type = batchData.getDataType(); + if (type != null) { + if (type == TSDataType.VECTOR && batchData.getVector() != null) { + schemaCount = batchData.getVector().length; + for (int i = 0; i < schemaCount; ++i) { + final TsPrimitiveType primitiveType = batchData.getVector()[i]; + if (primitiveType == null || primitiveType.getDataType() == null) { + continue; + } + + if (primitiveType.getDataType().isBinary()) { + final Binary binary = primitiveType.getBinary(); + totalSizeInBytes += binary == null ? 8 : binary.ramBytesUsed(); + } else { + totalSizeInBytes += roundUpToMultiple(primitiveType.getSize() + 8, 8); + } + } + } else { + schemaCount = 1; + if (type.isBinary()) { + final Binary binary = batchData.getBinary(); + totalSizeInBytes += binary == null ? 8 : binary.ramBytesUsed(); + } else { + totalSizeInBytes += roundUpToMultiple(TsPrimitiveType.getByType(type).getSize() + 8, 8); + } + } + } + + return calculateTabletRowCountAndMemoryBySize(totalSizeInBytes, schemaCount); + } + + /** + * Given a row of a tablet, calculate the row count and memory cost of the pipe tablet that will + * be constructed according to config. + * + * @return left is the row count of tablet, right is the memory cost of tablet in bytes + */ + public static Pair calculateTabletRowCountAndMemory(PipeRow row) { + return calculateTabletRowCountAndMemoryBySize(row.getCurrentRowSize(), row.size()); + } + + private static Pair calculateTabletRowCountAndMemoryBySize( + int rowSize, int schemaCount) { + if (rowSize <= 0) { + return new Pair<>(1, 0); + } + + // Calculate row number according to the max size of a pipe tablet. + // "-100" is the estimated size of other data structures in a pipe tablet. + // "*8" converts bytes to bits, because the bitmap size is 1 bit per schema. + int rowNumber = + 8 + * (PipeConfig.getInstance().getPipeDataStructureTabletSizeInBytes() - 100) + / (8 * rowSize + schemaCount); + rowNumber = Math.max(1, rowNumber); + + if ( // This means the row number is larger than the max row count of a pipe tablet + rowNumber > PipeConfig.getInstance().getPipeDataStructureTabletRowSize()) { + // Bound the row number, the memory cost is rowSize * rowNumber + return new Pair<>( + PipeConfig.getInstance().getPipeDataStructureTabletRowSize(), + rowSize * PipeConfig.getInstance().getPipeDataStructureTabletRowSize()); + } else { + return new Pair<>( + rowNumber, PipeConfig.getInstance().getPipeDataStructureTabletSizeInBytes()); + } + } + public static long calculateTabletSizeInBytes(Tablet tablet) { long totalSizeInBytes = 0; @@ -97,11 +226,10 @@ public static long calculateTabletSizeInBytes(Tablet tablet) { continue; } for (Binary value : values) { - totalSizeInBytes += - value == null ? 0 : (value.getLength() == -1 ? 0 : value.getLength()); + totalSizeInBytes += value == null ? 8 : value.ramBytesUsed(); } } else { - totalSizeInBytes += (long) tablet.timestamps.length * tsDataType.getDataTypeSize(); + totalSizeInBytes += (long) tablet.getMaxRowNumber() * tsDataType.getDataTypeSize(); } } } @@ -118,4 +246,52 @@ public static long calculateTabletSizeInBytes(Tablet tablet) { return totalSizeInBytes; } + + public static int calculateBatchDataRamBytesUsed(BatchData batchData) { + int totalSizeInBytes = 0; + + // timestamp + totalSizeInBytes += 8; + + // values + final TSDataType type = batchData.getDataType(); + if (type != null) { + if (type == TSDataType.VECTOR && batchData.getVector() != null) { + for (int i = 0; i < batchData.getVector().length; ++i) { + final TsPrimitiveType primitiveType = batchData.getVector()[i]; + if (primitiveType == null || primitiveType.getDataType() == null) { + continue; + } + // consider variable references (plus 8) and memory alignment (round up to 8) + totalSizeInBytes += roundUpToMultiple(primitiveType.getSize() + 8, 8); + } + } else { + if (type.isBinary()) { + final Binary binary = batchData.getBinary(); + // refer to org.apache.tsfile.utils.TsPrimitiveType.TsBinary.getSize + totalSizeInBytes += + roundUpToMultiple((binary == null ? 8 : binary.getLength() + 8) + 8, 8); + } else { + totalSizeInBytes += roundUpToMultiple(TsPrimitiveType.getByType(type).getSize() + 8, 8); + } + } + } + + return batchData.length() * totalSizeInBytes; + } + + /** + * Rounds up the given integer num to the nearest multiple of n. + * + * @param num The integer to be rounded up. + * @param n The specified multiple. + * @return The nearest multiple of n greater than or equal to num. + */ + private static int roundUpToMultiple(int num, int n) { + if (n == 0) { + throw new IllegalArgumentException("The multiple n must be greater than 0"); + } + // Calculate the rounded up value to the nearest multiple of n + return ((num + n - 1) / n) * n; + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeModelFixedMemoryBlock.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeModelFixedMemoryBlock.java new file mode 100644 index 0000000000000..647fb81a4b91c --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeModelFixedMemoryBlock.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.resource.memory; + +import org.apache.iotdb.db.pipe.resource.memory.strategy.DynamicMemoryAllocationStrategy; + +import java.util.Collections; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Stream; + +public class PipeModelFixedMemoryBlock extends PipeFixedMemoryBlock { + + private final Set memoryBlocks = + Collections.newSetFromMap(new ConcurrentHashMap<>()); + + private final DynamicMemoryAllocationStrategy allocationStrategy; + + private volatile long memoryAllocatedInBytes; + + public PipeModelFixedMemoryBlock( + final long memoryUsageInBytes, final DynamicMemoryAllocationStrategy allocationStrategy) { + super(memoryUsageInBytes); + this.memoryAllocatedInBytes = 0; + this.allocationStrategy = allocationStrategy; + } + + public synchronized PipeDynamicMemoryBlock registerPipeBatchMemoryBlock( + final long memorySizeInBytes) { + final PipeDynamicMemoryBlock memoryBlock = new PipeDynamicMemoryBlock(this, 0); + memoryBlocks.add(memoryBlock); + if (memorySizeInBytes != 0) { + resetMemoryBlockSize(memoryBlock, memorySizeInBytes); + double e = (double) getMemoryUsageInBytes() / memorySizeInBytes; + memoryBlock.updateMemoryEfficiency(e, e); + return memoryBlock; + } + + memoryBlock.updateMemoryEfficiency(0.0, 0.0); + return memoryBlock; + } + + @Override + public synchronized boolean expand() { + // Ensure that the memory block that gets most of the memory is released first, which can reduce + // the jitter of memory allocationIf the memory block is not expanded, it will not be expanded + // again.This function not only completes the expansion but also the reduction. + memoryBlocks.stream() + .sorted((a, b) -> Long.compare(b.getMemoryUsageInBytes(), a.getMemoryUsageInBytes())) + .forEach(PipeDynamicMemoryBlock::doExpand); + return false; + } + + public long getMemoryAllocatedInBytes() { + return memoryAllocatedInBytes; + } + + public synchronized Set getMemoryBlocks() { + return memoryBlocks; + } + + synchronized void releaseMemory(final PipeDynamicMemoryBlock memoryBlock) { + resetMemoryBlockSize(memoryBlock, 0); + memoryBlocks.remove(memoryBlock); + } + + synchronized void dynamicallyAdjustMemory(final PipeDynamicMemoryBlock block) { + if (this.isReleased() || block.isReleased() || !memoryBlocks.contains(block)) { + throw new IllegalStateException("The memory block has been released"); + } + allocationStrategy.dynamicallyAdjustMemory(block); + } + + synchronized void resetMemoryBlockSize( + final PipeDynamicMemoryBlock block, final long memorySizeInBytes) { + if (this.isReleased() || block.isReleased() || !memoryBlocks.contains(block)) { + throw new IllegalStateException("The memory block has been released"); + } + + final long diff = memorySizeInBytes - block.getMemoryUsageInBytes(); + + // If the capacity is expanded, determine whether it will exceed the maximum value of the fixed + // module + if (getMemoryUsageInBytes() - memoryAllocatedInBytes < diff) { + // Pay attention to the order of calls, otherwise it will cause resource leakage + block.setMemoryUsageInBytes( + block.getMemoryUsageInBytes() + getMemoryUsageInBytes() - memoryAllocatedInBytes); + memoryAllocatedInBytes = getMemoryUsageInBytes(); + return; + } + + memoryAllocatedInBytes = memoryAllocatedInBytes + diff; + block.setMemoryUsageInBytes(memorySizeInBytes); + } + + Stream getMemoryBlocksStream() { + if (isReleased()) { + throw new IllegalStateException("The memory block has been released"); + } + return memoryBlocks.stream(); + } + + @Override + public synchronized void close() { + memoryBlocks.forEach(PipeDynamicMemoryBlock::close); + super.close(); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeTabletMemoryBlock.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeTabletMemoryBlock.java index ed23215874a72..529a2e1ac5c56 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeTabletMemoryBlock.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeTabletMemoryBlock.java @@ -19,46 +19,9 @@ package org.apache.iotdb.db.pipe.resource.memory; -import java.util.function.BiConsumer; -import java.util.function.LongUnaryOperator; - -public class PipeTabletMemoryBlock extends PipeMemoryBlock { +public class PipeTabletMemoryBlock extends PipeFixedMemoryBlock { public PipeTabletMemoryBlock(long memoryUsageInBytes) { super(memoryUsageInBytes); } - - @Override - boolean shrink() { - return false; - } - - @Override - boolean expand() { - return false; - } - - @Override - public PipeMemoryBlock setShrinkMethod(LongUnaryOperator shrinkMethod) { - throw new UnsupportedOperationException( - "Shrink method is not supported in PipeTabletMemoryBlock"); - } - - @Override - public PipeMemoryBlock setShrinkCallback(BiConsumer shrinkCallback) { - throw new UnsupportedOperationException( - "Shrink callback is not supported in PipeTabletMemoryBlock"); - } - - @Override - public PipeMemoryBlock setExpandMethod(LongUnaryOperator extendMethod) { - throw new UnsupportedOperationException( - "Expand method is not supported in PipeTabletMemoryBlock"); - } - - @Override - public PipeMemoryBlock setExpandCallback(BiConsumer expandCallback) { - throw new UnsupportedOperationException( - "Expand callback is not supported in PipeTabletMemoryBlock"); - } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeTsFileMemoryBlock.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeTsFileMemoryBlock.java new file mode 100644 index 0000000000000..268388d080009 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeTsFileMemoryBlock.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.resource.memory; + +public class PipeTsFileMemoryBlock extends PipeFixedMemoryBlock { + + public PipeTsFileMemoryBlock(long memoryUsageInBytes) { + super(memoryUsageInBytes); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/strategy/DynamicMemoryAllocationStrategy.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/strategy/DynamicMemoryAllocationStrategy.java new file mode 100644 index 0000000000000..8e5ba9af05335 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/strategy/DynamicMemoryAllocationStrategy.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.resource.memory.strategy; + +import org.apache.iotdb.db.pipe.resource.memory.PipeDynamicMemoryBlock; + +// Now let's define the operation memory behavior: Producers produce memory, consumers consume +// memory, and in order to ensure that consumers do not encounter back pressure, the memory that +// consumers need to use is allocated in advance. Consumer instances obtain their expected memory +// through allocation strategies, and the total memory of all consumer instances must not be greater +// than the pre-allocated memory. The memory allocation algorithm is to adjust the memory of +// consumers so that the consumption rate can reach the optimal +public interface DynamicMemoryAllocationStrategy { + + void dynamicallyAdjustMemory(PipeDynamicMemoryBlock dynamicMemoryBlock); +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/strategy/ThresholdAllocationStrategy.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/strategy/ThresholdAllocationStrategy.java new file mode 100644 index 0000000000000..72a390f799e1c --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/strategy/ThresholdAllocationStrategy.java @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.resource.memory.strategy; + +import org.apache.iotdb.commons.pipe.config.PipeConfig; +import org.apache.iotdb.db.pipe.resource.memory.PipeDynamicMemoryBlock; + +import org.apache.tsfile.utils.Pair; + +import java.util.concurrent.atomic.AtomicBoolean; + +// The algorithm is optimized for different scenarios: The following describes the behavior of the +// algorithm in different scenarios: +// 1. When the memory is large enough, it will try to allocate memory to the memory block +// 2. When the memory is insufficient, the algorithm will try to ensure that the memory with a +// relatively large memory share is released +public class ThresholdAllocationStrategy implements DynamicMemoryAllocationStrategy { + + private static final PipeConfig PIPE_CONFIG = PipeConfig.getInstance(); + + @Override + public void dynamicallyAdjustMemory(final PipeDynamicMemoryBlock dynamicMemoryBlock) { + final double deficitRatio = calculateDeficitRatio(dynamicMemoryBlock); + final long oldMemoryUsageInBytes = dynamicMemoryBlock.getMemoryUsageInBytes(); + final long expectedMemory = (long) (oldMemoryUsageInBytes / deficitRatio); + final double memoryBlockUsageRatio = dynamicMemoryBlock.getMemoryBlockUsageRatio(); + final long maximumMemoryIncrease = + (long) + (dynamicMemoryBlock.getFixedMemoryCapacity() + * PIPE_CONFIG.getPipeThresholdAllocationStrategyMaximumMemoryIncrementRatio()); + + // Avoid overflow and infinite values + if (deficitRatio <= 0.0 || oldMemoryUsageInBytes == 0 || expectedMemory == 0) { + dynamicMemoryBlock.applyForDynamicMemory(maximumMemoryIncrease); + final double efficiencyRatio = + (double) dynamicMemoryBlock.getMemoryUsageInBytes() / maximumMemoryIncrease; + dynamicMemoryBlock.updateMemoryEfficiency(efficiencyRatio, efficiencyRatio); + return; + } + + // No matter what, give priority to applying for memory use, and adjust the memory size when the + // memory is insufficient + final double lowUsageThreshold = + PIPE_CONFIG.getPipeThresholdAllocationStrategyLowUsageThreshold(); + if (dynamicMemoryBlock.getFixedMemoryBlockUsageRatio() + < PIPE_CONFIG.getPipeThresholdAllocationStrategyFixedMemoryHighUsageThreshold()) { + if (deficitRatio >= 1.0) { + return; + } + + final long maxAvailableMemory = + Math.min(expectedMemory, dynamicMemoryBlock.canAllocateMemorySize()); + long newMemoryRequest; + + // Need to ensure that you get memory in smaller chunks and get more memory faster + if (memoryBlockUsageRatio > lowUsageThreshold) { + newMemoryRequest = + Math.min(oldMemoryUsageInBytes + oldMemoryUsageInBytes / 2, maxAvailableMemory); + } else { + newMemoryRequest = Math.min(oldMemoryUsageInBytes * 2, maxAvailableMemory); + } + + dynamicMemoryBlock.applyForDynamicMemory(newMemoryRequest); + final double efficiencyRatio = + dynamicMemoryBlock.getMemoryUsageInBytes() / (double) expectedMemory; + dynamicMemoryBlock.updateMemoryEfficiency(efficiencyRatio, efficiencyRatio); + return; + } + + // Entering this logic means that the memory is insufficient and the memory allocation needs to + // be adjusted + final AtomicBoolean isMemoryNotEnough = new AtomicBoolean(false); + final double averageDeficitRatio = + dynamicMemoryBlock + .getMemoryBlocks() + .mapToDouble( + block -> { + double ratio = calculateDeficitRatio(block); + if (block.getMemoryUsageInBytes() == 0 || ratio == 0.0) { + isMemoryNotEnough.set(true); + } + return ratio; + }) + .average() + .orElse(1.0); + + final double adjustmentThreshold = PIPE_CONFIG.getPipeDynamicMemoryAdjustmentThreshold(); + // When memory is insufficient, try to ensure that smaller memory blocks apply for less memory, + // and larger memory blocks release more memory. + final double diff = + isMemoryNotEnough.get() && averageDeficitRatio > 2 * adjustmentThreshold + ? averageDeficitRatio - deficitRatio - adjustmentThreshold + : averageDeficitRatio - deficitRatio; + + if (Math.abs(diff) > PIPE_CONFIG.getPipeDynamicMemoryAdjustmentThreshold()) { + final long mem = (long) ((dynamicMemoryBlock.getMemoryUsageInBytes() / deficitRatio) * diff); + dynamicMemoryBlock.applyForDynamicMemory(dynamicMemoryBlock.getMemoryUsageInBytes() + mem); + if (oldMemoryUsageInBytes != dynamicMemoryBlock.getMemoryUsageInBytes()) { + final double efficiencyRatio = + dynamicMemoryBlock.getMemoryUsageInBytes() / (double) expectedMemory; + dynamicMemoryBlock.updateMemoryEfficiency(efficiencyRatio, efficiencyRatio); + } + } else if (memoryBlockUsageRatio > lowUsageThreshold + && memoryBlockUsageRatio > dynamicMemoryBlock.getExpectedAverageAllocatedMemorySize()) { + // If there is insufficient memory, some memory must be released + dynamicMemoryBlock.applyForDynamicMemory(oldMemoryUsageInBytes / 2); + dynamicMemoryBlock.updateMemoryEfficiency(deficitRatio / 2, deficitRatio / 2); + } + } + + private double calculateDeficitRatio(final PipeDynamicMemoryBlock block) { + final Pair memoryEfficiency = block.getMemoryEfficiency(); + double pipeDynamicMemoryHistoryWeight = PIPE_CONFIG.getPipeDynamicMemoryHistoryWeight(); + return (1 - pipeDynamicMemoryHistoryWeight) * memoryEfficiency.getRight() + + pipeDynamicMemoryHistoryWeight * memoryEfficiency.getLeft(); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/ref/PipeDataNodePhantomReferenceManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/ref/PipeDataNodePhantomReferenceManager.java new file mode 100644 index 0000000000000..991cfebe8fac4 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/ref/PipeDataNodePhantomReferenceManager.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.resource.ref; + +import org.apache.iotdb.commons.pipe.config.PipeConfig; +import org.apache.iotdb.commons.pipe.resource.ref.PipePhantomReferenceManager; +import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; + +public class PipeDataNodePhantomReferenceManager extends PipePhantomReferenceManager { + + public PipeDataNodePhantomReferenceManager() { + super(); + + PipeDataNodeAgent.runtime() + .registerPhantomReferenceCleanJob( + "PipePhantomReferenceManager#gcHook()", + // NOTE: lambda CAN NOT be replaced with method reference + () -> super.gcHook(), + PipeConfig.getInstance().getPipeEventReferenceEliminateIntervalSeconds()); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/snapshot/PipeDataNodeSnapshotResourceManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/snapshot/PipeDataNodeSnapshotResourceManager.java index 8e286a9a5693b..8e197d31a9bde 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/snapshot/PipeDataNodeSnapshotResourceManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/snapshot/PipeDataNodeSnapshotResourceManager.java @@ -20,7 +20,7 @@ package org.apache.iotdb.db.pipe.resource.snapshot; import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.commons.pipe.resource.PipeSnapshotResourceManager; +import org.apache.iotdb.commons.pipe.resource.snapshot.PipeSnapshotResourceManager; import java.util.Arrays; import java.util.HashSet; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeCompactionManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeCompactionManager.java new file mode 100644 index 0000000000000..0a4f7cb97af0d --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeCompactionManager.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.resource.tsfile; + +import org.apache.iotdb.commons.pipe.agent.task.connection.UnboundedBlockingPendingQueue; +import org.apache.iotdb.db.pipe.agent.task.subtask.sink.PipeRealtimePriorityBlockingQueue; +import org.apache.iotdb.db.pipe.agent.task.subtask.sink.PipeSinkSubtaskLifeCycle; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.pipe.api.event.Event; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArraySet; + +public class PipeCompactionManager { + + private final Set pipeSinkSubtaskLifeCycles = + new CopyOnWriteArraySet<>(); + + public void registerPipeConnectorSubtaskLifeCycle( + final PipeSinkSubtaskLifeCycle pipeSinkSubtaskLifeCycle) { + pipeSinkSubtaskLifeCycles.add(pipeSinkSubtaskLifeCycle); + } + + public void deregisterPipeConnectorSubtaskLifeCycle( + final PipeSinkSubtaskLifeCycle pipeSinkSubtaskLifeCycle) { + pipeSinkSubtaskLifeCycles.remove(pipeSinkSubtaskLifeCycle); + } + + public void emitResult( + final String storageGroupName, + final String dataRegionId, + final long timePartition, + final List seqFileResources, + final List unseqFileResources, + final List targetFileResources) { + final Set sourceFileResources = new HashSet<>(seqFileResources); + sourceFileResources.addAll(unseqFileResources); + + for (final PipeSinkSubtaskLifeCycle lifeCycle : pipeSinkSubtaskLifeCycles) { + final UnboundedBlockingPendingQueue pendingQueue = lifeCycle.getPendingQueue(); + if (pendingQueue instanceof PipeRealtimePriorityBlockingQueue) { + final PipeRealtimePriorityBlockingQueue realtimePriorityBlockingQueue = + (PipeRealtimePriorityBlockingQueue) pendingQueue; + realtimePriorityBlockingQueue.replace( + dataRegionId, sourceFileResources, targetFileResources); + } + } + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFilePublicResource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFilePublicResource.java new file mode 100644 index 0000000000000..47134fe117c95 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFilePublicResource.java @@ -0,0 +1,228 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.resource.tsfile; + +import org.apache.iotdb.commons.pipe.config.PipeConfig; +import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; +import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlock; +import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryWeightUtil; + +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.IDeviceID; +import org.apache.tsfile.read.TsFileDeviceIterator; +import org.apache.tsfile.read.TsFileSequenceReader; +import org.apache.tsfile.utils.Pair; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class PipeTsFilePublicResource extends PipeTsFileResource { + private static final Logger LOGGER = LoggerFactory.getLogger(PipeTsFilePublicResource.class); + public static final float MEMORY_SUFFICIENT_THRESHOLD = 0.7f; + private PipeMemoryBlock allocatedMemoryBlock; + private Map> deviceMeasurementsMap = null; + private Map deviceIsAlignedMap = null; + private Map measurementDataTypeMap = null; + + public PipeTsFilePublicResource(File hardlinkOrCopiedFile) { + super(hardlinkOrCopiedFile); + } + + @Override + public void close() { + super.close(); + + if (deviceMeasurementsMap != null) { + deviceMeasurementsMap = null; + } + + if (deviceIsAlignedMap != null) { + deviceIsAlignedMap = null; + } + + if (measurementDataTypeMap != null) { + measurementDataTypeMap = null; + } + + if (allocatedMemoryBlock != null) { + allocatedMemoryBlock.close(); + allocatedMemoryBlock = null; + } + } + + //////////////////////////// Cache Getter //////////////////////////// + + public synchronized Map> tryGetDeviceMeasurementsMap(final File tsFile) + throws IOException { + if (deviceMeasurementsMap == null) { + cacheObjectsIfAbsent(tsFile); + } + return deviceMeasurementsMap; + } + + public synchronized Map tryGetDeviceIsAlignedMap( + final boolean cacheOtherMetadata, final File tsFile) throws IOException { + if (deviceIsAlignedMap == null) { + if (cacheOtherMetadata) { + cacheObjectsIfAbsent(tsFile); + } else { + cacheDeviceIsAlignedMapIfAbsent(tsFile); + } + } + return deviceIsAlignedMap; + } + + public synchronized Map tryGetMeasurementDataTypeMap(final File tsFile) + throws IOException { + if (measurementDataTypeMap == null) { + cacheObjectsIfAbsent(tsFile); + } + return measurementDataTypeMap; + } + + synchronized boolean cacheDeviceIsAlignedMapIfAbsent(final File tsFile) throws IOException { + + if (allocatedMemoryBlock != null) { + // This means objects are already cached. + return true; + } + + // See if pipe memory is sufficient to be allocated for TsFileSequenceReader. + // Only allocate when pipe memory used is less than 50%, because memory here + // is hard to shrink and may consume too much memory. + allocatedMemoryBlock = + PipeDataNodeResourceManager.memory() + .forceAllocateIfSufficient( + PipeConfig.getInstance().getPipeMemoryAllocateForTsFileSequenceReaderInBytes(), + MEMORY_SUFFICIENT_THRESHOLD); + if (allocatedMemoryBlock == null) { + LOGGER.info( + "Failed to cacheDeviceIsAlignedMapIfAbsent for tsfile {}, because memory usage is high", + tsFile.getPath()); + return false; + } + + long memoryRequiredInBytes = 0L; + try (TsFileSequenceReader sequenceReader = + new TsFileSequenceReader(tsFile.getPath(), true, false)) { + deviceIsAlignedMap = new HashMap<>(); + final TsFileDeviceIterator deviceIsAlignedIterator = + sequenceReader.getAllDevicesIteratorWithIsAligned(); + while (deviceIsAlignedIterator.hasNext()) { + final Pair deviceIsAlignedPair = deviceIsAlignedIterator.next(); + deviceIsAlignedMap.put(deviceIsAlignedPair.getLeft(), deviceIsAlignedPair.getRight()); + } + memoryRequiredInBytes += PipeMemoryWeightUtil.memoryOfIDeviceId2Bool(deviceIsAlignedMap); + } + // Release memory of TsFileSequenceReader. + allocatedMemoryBlock.close(); + allocatedMemoryBlock = null; + + // Allocate again for the cached objects. + allocatedMemoryBlock = + PipeDataNodeResourceManager.memory() + .forceAllocateIfSufficient(memoryRequiredInBytes, MEMORY_SUFFICIENT_THRESHOLD); + if (allocatedMemoryBlock == null) { + LOGGER.info( + "PipeTsFileResource: Failed to cache objects for tsfile {} in cache, because memory usage is high", + tsFile.getPath()); + deviceIsAlignedMap = null; + return false; + } + + LOGGER.info("PipeTsFileResource: Cached deviceIsAlignedMap for tsfile {}.", tsFile.getPath()); + return true; + } + + synchronized boolean cacheObjectsIfAbsent(final File tsFile) throws IOException { + if (allocatedMemoryBlock != null) { + if (deviceMeasurementsMap != null) { + return true; + } else { + // Recalculate it again because only deviceIsAligned map is cached + allocatedMemoryBlock.close(); + allocatedMemoryBlock = null; + } + } + + // See if pipe memory is sufficient to be allocated for TsFileSequenceReader. + // Only allocate when pipe memory used is less than 50%, because memory here + // is hard to shrink and may consume too much memory. + allocatedMemoryBlock = + PipeDataNodeResourceManager.memory() + .forceAllocateIfSufficient( + PipeConfig.getInstance().getPipeMemoryAllocateForTsFileSequenceReaderInBytes(), + MEMORY_SUFFICIENT_THRESHOLD); + if (allocatedMemoryBlock == null) { + LOGGER.info( + "Failed to cacheObjectsIfAbsent for tsfile {}, because memory usage is high", + tsFile.getPath()); + return false; + } + + long memoryRequiredInBytes = 0L; + try (TsFileSequenceReader sequenceReader = + new TsFileSequenceReader(tsFile.getPath(), true, true)) { + deviceMeasurementsMap = sequenceReader.getDeviceMeasurementsMap(); + memoryRequiredInBytes += + PipeMemoryWeightUtil.memoryOfIDeviceID2StrList(deviceMeasurementsMap); + + if (Objects.isNull(deviceIsAlignedMap)) { + deviceIsAlignedMap = new HashMap<>(); + final TsFileDeviceIterator deviceIsAlignedIterator = + sequenceReader.getAllDevicesIteratorWithIsAligned(); + while (deviceIsAlignedIterator.hasNext()) { + final Pair deviceIsAlignedPair = deviceIsAlignedIterator.next(); + deviceIsAlignedMap.put(deviceIsAlignedPair.getLeft(), deviceIsAlignedPair.getRight()); + } + } + memoryRequiredInBytes += PipeMemoryWeightUtil.memoryOfIDeviceId2Bool(deviceIsAlignedMap); + + measurementDataTypeMap = sequenceReader.getFullPathDataTypeMap(); + memoryRequiredInBytes += PipeMemoryWeightUtil.memoryOfStr2TSDataType(measurementDataTypeMap); + } + // Release memory of TsFileSequenceReader. + allocatedMemoryBlock.close(); + allocatedMemoryBlock = null; + + // Allocate again for the cached objects. + allocatedMemoryBlock = + PipeDataNodeResourceManager.memory() + .forceAllocateIfSufficient(memoryRequiredInBytes, MEMORY_SUFFICIENT_THRESHOLD); + if (allocatedMemoryBlock == null) { + LOGGER.info( + "PipeTsFileResource: Failed to cache objects for tsfile {} in cache, because memory usage is high", + tsFile.getPath()); + deviceIsAlignedMap = null; + deviceMeasurementsMap = null; + measurementDataTypeMap = null; + return false; + } + + LOGGER.info("PipeTsFileResource: Cached objects for tsfile {}.", tsFile.getPath()); + return true; + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResource.java index 7bb67e781f2b3..8b37f87709447 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResource.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResource.java @@ -19,73 +19,33 @@ package org.apache.iotdb.db.pipe.resource.tsfile; -import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; -import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlock; -import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryWeightUtil; -import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; -import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResourceStatus; - -import org.apache.tsfile.enums.TSDataType; -import org.apache.tsfile.file.metadata.IDeviceID; -import org.apache.tsfile.read.TsFileDeviceIterator; -import org.apache.tsfile.read.TsFileSequenceReader; -import org.apache.tsfile.utils.Pair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; -import java.io.IOException; import java.nio.file.Files; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; public class PipeTsFileResource implements AutoCloseable { private static final Logger LOGGER = LoggerFactory.getLogger(PipeTsFileResource.class); - public static final long TSFILE_MIN_TIME_TO_LIVE_IN_MS = 1000L * 20; - private static final float MEMORY_SUFFICIENT_THRESHOLD = 0.5f; - private final File hardlinkOrCopiedFile; - private final boolean isTsFile; - - /** this TsFileResource is used to track the {@link TsFileResourceStatus} of original TsFile. * */ - private final TsFileResource tsFileResource; private volatile long fileSize = -1L; private final AtomicInteger referenceCount; - private final AtomicLong lastUnpinToZeroTime; - private PipeMemoryBlock allocatedMemoryBlock; - private Map> deviceMeasurementsMap = null; - private Map deviceIsAlignedMap = null; - private Map measurementDataTypeMap = null; - public PipeTsFileResource( - final File hardlinkOrCopiedFile, - final boolean isTsFile, - final TsFileResource tsFileResource) { + public PipeTsFileResource(final File hardlinkOrCopiedFile) { this.hardlinkOrCopiedFile = hardlinkOrCopiedFile; - this.isTsFile = isTsFile; - this.tsFileResource = tsFileResource; referenceCount = new AtomicInteger(1); - lastUnpinToZeroTime = new AtomicLong(Long.MAX_VALUE); } public File getFile() { return hardlinkOrCopiedFile; } - public boolean isOriginalTsFileDeleted() { - return isTsFile && Objects.nonNull(tsFileResource) && tsFileResource.isDeleted(); - } - public long getFileSize() { if (fileSize == -1L) { synchronized (this) { @@ -103,216 +63,37 @@ public int getReferenceCount() { return referenceCount.get(); } - public int increaseAndGetReference() { - return referenceCount.addAndGet(1); + public void increaseReferenceCount() { + referenceCount.addAndGet(1); } - public int decreaseAndGetReference() { + public boolean decreaseReferenceCount() { final int finalReferenceCount = referenceCount.addAndGet(-1); if (finalReferenceCount == 0) { - lastUnpinToZeroTime.set(System.currentTimeMillis()); + close(); + return true; } if (finalReferenceCount < 0) { LOGGER.warn("PipeTsFileResource's reference count is decreased to below 0."); } - return finalReferenceCount; - } - - public synchronized boolean closeIfOutOfTimeToLive() throws IOException { - if (referenceCount.get() <= 0 - && (deviceMeasurementsMap == null // Not cached yet. - || System.currentTimeMillis() - lastUnpinToZeroTime.get() - > TSFILE_MIN_TIME_TO_LIVE_IN_MS)) { - close(); - return true; - } else { - return false; - } + return false; } @Override - public synchronized void close() throws IOException { - if (deviceMeasurementsMap != null) { - deviceMeasurementsMap = null; - } - - if (deviceIsAlignedMap != null) { - deviceIsAlignedMap = null; - } - - if (measurementDataTypeMap != null) { - measurementDataTypeMap = null; - } - - if (allocatedMemoryBlock != null) { - allocatedMemoryBlock.close(); - allocatedMemoryBlock = null; - } - - Files.deleteIfExists(hardlinkOrCopiedFile.toPath()); - - LOGGER.info("PipeTsFileResource: Closed tsfile {} and cleaned up.", hardlinkOrCopiedFile); - } - - //////////////////////////// Cache Getter //////////////////////////// - - public synchronized Map> tryGetDeviceMeasurementsMap() - throws IOException { - if (deviceMeasurementsMap == null && isTsFile) { - cacheObjectsIfAbsent(); - } - return deviceMeasurementsMap; - } - - public synchronized Map tryGetDeviceIsAlignedMap( - final boolean cacheOtherMetadata) throws IOException { - if (deviceIsAlignedMap == null && isTsFile) { - if (cacheOtherMetadata) { - cacheObjectsIfAbsent(); - } else { - cacheDeviceIsAlignedMapIfAbsent(); - } - } - return deviceIsAlignedMap; - } - - public synchronized Map tryGetMeasurementDataTypeMap() throws IOException { - if (measurementDataTypeMap == null && isTsFile) { - cacheObjectsIfAbsent(); - } - return measurementDataTypeMap; - } - - synchronized boolean cacheDeviceIsAlignedMapIfAbsent() throws IOException { - if (!isTsFile) { - return false; - } - - if (allocatedMemoryBlock != null) { - // This means objects are already cached. - return true; - } - - // See if pipe memory is sufficient to be allocated for TsFileSequenceReader. - // Only allocate when pipe memory used is less than 50%, because memory here - // is hard to shrink and may consume too much memory. - allocatedMemoryBlock = - PipeDataNodeResourceManager.memory() - .forceAllocateIfSufficient( - PipeConfig.getInstance().getPipeMemoryAllocateForTsFileSequenceReaderInBytes(), - MEMORY_SUFFICIENT_THRESHOLD); - if (allocatedMemoryBlock == null) { - LOGGER.info( - "PipeTsFileResource: Failed to create TsFileSequenceReader for tsfile {} in cache, because memory usage is high", - hardlinkOrCopiedFile.getPath()); - return false; - } - - long memoryRequiredInBytes = 0L; - try (TsFileSequenceReader sequenceReader = - new TsFileSequenceReader(hardlinkOrCopiedFile.getPath(), true, false)) { - deviceIsAlignedMap = new HashMap<>(); - final TsFileDeviceIterator deviceIsAlignedIterator = - sequenceReader.getAllDevicesIteratorWithIsAligned(); - while (deviceIsAlignedIterator.hasNext()) { - final Pair deviceIsAlignedPair = deviceIsAlignedIterator.next(); - deviceIsAlignedMap.put(deviceIsAlignedPair.getLeft(), deviceIsAlignedPair.getRight()); - } - memoryRequiredInBytes += PipeMemoryWeightUtil.memoryOfIDeviceId2Bool(deviceIsAlignedMap); - } - // Release memory of TsFileSequenceReader. - allocatedMemoryBlock.close(); - allocatedMemoryBlock = null; - - // Allocate again for the cached objects. - allocatedMemoryBlock = - PipeDataNodeResourceManager.memory() - .forceAllocateIfSufficient(memoryRequiredInBytes, MEMORY_SUFFICIENT_THRESHOLD); - if (allocatedMemoryBlock == null) { - LOGGER.info( - "PipeTsFileResource: Failed to cache objects for tsfile {} in cache, because memory usage is high", - hardlinkOrCopiedFile.getPath()); - deviceIsAlignedMap = null; - return false; - } - - LOGGER.info( - "PipeTsFileResource: Cached deviceIsAlignedMap for tsfile {}.", - hardlinkOrCopiedFile.getPath()); - return true; - } - - synchronized boolean cacheObjectsIfAbsent() throws IOException { - if (!isTsFile) { - return false; - } - - if (allocatedMemoryBlock != null) { - if (deviceMeasurementsMap != null) { - return true; - } else { - // Recalculate it again because only deviceIsAligned map is cached - allocatedMemoryBlock.close(); - allocatedMemoryBlock = null; - } + public synchronized void close() { + boolean successful = false; + try { + successful = Files.deleteIfExists(hardlinkOrCopiedFile.toPath()); + } catch (final Exception e) { + LOGGER.error( + "PipeTsFileResource: Failed to delete tsfile {} when closing, because {}. Please MANUALLY delete it.", + hardlinkOrCopiedFile, + e.getMessage(), + e); } - // See if pipe memory is sufficient to be allocated for TsFileSequenceReader. - // Only allocate when pipe memory used is less than 50%, because memory here - // is hard to shrink and may consume too much memory. - allocatedMemoryBlock = - PipeDataNodeResourceManager.memory() - .forceAllocateIfSufficient( - PipeConfig.getInstance().getPipeMemoryAllocateForTsFileSequenceReaderInBytes(), - MEMORY_SUFFICIENT_THRESHOLD); - if (allocatedMemoryBlock == null) { - LOGGER.info( - "PipeTsFileResource: Failed to create TsFileSequenceReader for tsfile {} in cache, because memory usage is high", - hardlinkOrCopiedFile.getPath()); - return false; + if (successful) { + LOGGER.info("PipeTsFileResource: Closed tsfile {} and cleaned up.", hardlinkOrCopiedFile); } - - long memoryRequiredInBytes = 0L; - try (TsFileSequenceReader sequenceReader = - new TsFileSequenceReader(hardlinkOrCopiedFile.getPath(), true, true)) { - deviceMeasurementsMap = sequenceReader.getDeviceMeasurementsMap(); - memoryRequiredInBytes += - PipeMemoryWeightUtil.memoryOfIDeviceID2StrList(deviceMeasurementsMap); - - if (Objects.isNull(deviceIsAlignedMap)) { - deviceIsAlignedMap = new HashMap<>(); - final TsFileDeviceIterator deviceIsAlignedIterator = - sequenceReader.getAllDevicesIteratorWithIsAligned(); - while (deviceIsAlignedIterator.hasNext()) { - final Pair deviceIsAlignedPair = deviceIsAlignedIterator.next(); - deviceIsAlignedMap.put(deviceIsAlignedPair.getLeft(), deviceIsAlignedPair.getRight()); - } - } - memoryRequiredInBytes += PipeMemoryWeightUtil.memoryOfIDeviceId2Bool(deviceIsAlignedMap); - - measurementDataTypeMap = sequenceReader.getFullPathDataTypeMap(); - memoryRequiredInBytes += PipeMemoryWeightUtil.memoryOfStr2TSDataType(measurementDataTypeMap); - } - // Release memory of TsFileSequenceReader. - allocatedMemoryBlock.close(); - allocatedMemoryBlock = null; - - // Allocate again for the cached objects. - allocatedMemoryBlock = - PipeDataNodeResourceManager.memory() - .forceAllocateIfSufficient(memoryRequiredInBytes, MEMORY_SUFFICIENT_THRESHOLD); - if (allocatedMemoryBlock == null) { - LOGGER.info( - "PipeTsFileResource: Failed to cache objects for tsfile {} in cache, because memory usage is high", - hardlinkOrCopiedFile.getPath()); - deviceIsAlignedMap = null; - deviceMeasurementsMap = null; - measurementDataTypeMap = null; - return false; - } - - LOGGER.info( - "PipeTsFileResource: Cached objects for tsfile {}.", hardlinkOrCopiedFile.getPath()); - return true; } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResourceManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResourceManager.java index e736795612675..2bc3ebf12dc59 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResourceManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResourceManager.java @@ -22,9 +22,7 @@ import org.apache.iotdb.commons.conf.IoTDBConstant; import org.apache.iotdb.commons.pipe.config.PipeConfig; import org.apache.iotdb.commons.utils.FileUtils; -import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; -import org.apache.iotdb.db.pipe.agent.runtime.PipePeriodicalJobExecutor; -import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; +import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.db.storageengine.dataregion.modification.ModificationFile; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; @@ -33,79 +31,34 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; + import java.io.File; import java.io.IOException; -import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Optional; +import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.ReentrantLock; public class PipeTsFileResourceManager { private static final Logger LOGGER = LoggerFactory.getLogger(PipeTsFileResourceManager.class); - private final Map hardlinkOrCopiedFileToPipeTsFileResourceMap = - new ConcurrentHashMap<>(); - private final ReentrantLock lock = new ReentrantLock(); - - public PipeTsFileResourceManager() { - PipeDataNodeAgent.runtime() - .registerPeriodicalJob( - "PipeTsFileResourceManager#ttlCheck()", - this::tryTtlCheck, - Math.max(PipeTsFileResource.TSFILE_MIN_TIME_TO_LIVE_IN_MS / 1000, 1)); - } + // This is used to hold the assigner pinned tsFiles. + // Also, it is used to provide metadata cache of the tsFile, and is shared by all the pipe's + // tsFiles. + private final Map + hardlinkOrCopiedFileToTsFilePublicResourceMap = new ConcurrentHashMap<>(); - private void tryTtlCheck() { - try { - final long timeout = PipePeriodicalJobExecutor.getMinIntervalSeconds() >> 1; - if (lock.tryLock(timeout, TimeUnit.SECONDS)) { - try { - ttlCheck(); - } finally { - lock.unlock(); - } - } else { - LOGGER.warn("failed to try lock when checking TTL because of timeout ({}s)", timeout); - } - } catch (final InterruptedException e) { - Thread.currentThread().interrupt(); - LOGGER.warn("failed to try lock when checking TTL because of interruption", e); - } - } + // PipeName -> TsFilePath -> PipeTsFileResource + private final Map> + hardlinkOrCopiedFileToPipeTsFileResourceMap = new ConcurrentHashMap<>(); + private final PipeTsFileResourceSegmentLock segmentLock = new PipeTsFileResourceSegmentLock(); - private void ttlCheck() { - final Iterator> iterator = - hardlinkOrCopiedFileToPipeTsFileResourceMap.entrySet().iterator(); - final Optional logger = - PipeDataNodeResourceManager.log() - .schedule( - PipeTsFileResourceManager.class, - PipeConfig.getInstance().getPipeTsFilePinMaxLogNumPerRound(), - PipeConfig.getInstance().getPipeTsFilePinMaxLogIntervalRounds(), - hardlinkOrCopiedFileToPipeTsFileResourceMap.size()); - - while (iterator.hasNext()) { - final Map.Entry entry = iterator.next(); - - try { - if (entry.getValue().closeIfOutOfTimeToLive()) { - iterator.remove(); - } else { - logger.ifPresent( - l -> - l.info( - "Pipe file (file name: {}) is still referenced {} times", - entry.getKey(), - entry.getValue().getReferenceCount())); - } - } catch (final IOException e) { - LOGGER.warn("failed to close PipeTsFileResource when checking TTL: ", e); - } - } + public File increaseFileReference( + final File file, final boolean isTsFile, final @Nullable String pipeName) throws IOException { + return increaseFileReference(file, isTsFile, pipeName, null); } /** @@ -124,60 +77,96 @@ private void ttlCheck() { * @param file tsfile, resource file or mod file. can be original file or hardlink/copy of * original file * @param isTsFile {@code true} to create hardlink, {@code false} to copy file - * @param tsFileResource the TsFileResource of original TsFile. Ignored if {@param isTsFile} is - * {@code false}. + * @param pipeName Nonnull if the pipe is from historical or assigner -> extractors, null if is + * dataRegion -> assigner + * @param sourceFile for inner use, historical extractor will use this to create hardlink from + * pipe tsFile -> common tsFile * @return the hardlink or copied file * @throws IOException when create hardlink or copy file failed */ - public File increaseFileReference( - final File file, final boolean isTsFile, final TsFileResource tsFileResource) + private File increaseFileReference( + final File file, + final boolean isTsFile, + final @Nullable String pipeName, + final @Nullable File sourceFile) throws IOException { - lock.lock(); - try { - // If the file is already a hardlink or copied file, - // just increase reference count and return it - if (increaseReferenceIfExists(file.getPath())) { - return file; - } + // If the file is already a hardlink or copied file, + // just increase reference count and return it + if (increaseReferenceIfExists(file, pipeName, isTsFile)) { + return file; + } - // If the file is not a hardlink or copied file, check if there is a related hardlink or - // copied file in pipe dir. if so, increase reference count and return it - final File hardlinkOrCopiedFile = getHardlinkOrCopiedFileInPipeDir(file); - if (increaseReferenceIfExists(hardlinkOrCopiedFile.getPath())) { - return hardlinkOrCopiedFileToPipeTsFileResourceMap - .get(hardlinkOrCopiedFile.getPath()) - .getFile(); - } + // If the file is not a hardlink or copied file, check if there is a related hardlink or + // copied file in pipe dir. if so, increase reference count and return it + final File hardlinkOrCopiedFile = + Objects.isNull(sourceFile) ? getHardlinkOrCopiedFileInPipeDir(file, pipeName) : file; - // If the file is a tsfile, create a hardlink in pipe dir and will return it. - // otherwise, copy the file (.mod or .resource) to pipe dir and will return it. - final File resultFile = + if (increaseReferenceIfExists(hardlinkOrCopiedFile, pipeName, isTsFile)) { + return getResourceMap(pipeName).get(hardlinkOrCopiedFile.getPath()).getFile(); + } + + // If the file is a tsfile, create a hardlink in pipe dir and will return it. + // otherwise, copy the file (.mod or .resource) to pipe dir and will return it. + final File source = Objects.isNull(sourceFile) ? file : sourceFile; + final File resultFile; + + segmentLock.lock(hardlinkOrCopiedFile); + try { + resultFile = isTsFile - ? FileUtils.createHardLink(file, hardlinkOrCopiedFile) - : FileUtils.copyFile(file, hardlinkOrCopiedFile); + ? FileUtils.createHardLink(source, hardlinkOrCopiedFile) + : FileUtils.copyFile(source, hardlinkOrCopiedFile); + // If the file is not a hardlink or copied file, and there is no related hardlink or copied // file in pipe dir, create a hardlink or copy it to pipe dir, maintain a reference count for // the hardlink or copied file, and return the hardlink or copied file. - hardlinkOrCopiedFileToPipeTsFileResourceMap.put( - resultFile.getPath(), new PipeTsFileResource(resultFile, isTsFile, tsFileResource)); - return resultFile; + if (Objects.nonNull(pipeName)) { + hardlinkOrCopiedFileToPipeTsFileResourceMap + .computeIfAbsent(pipeName, k -> new ConcurrentHashMap<>()) + .put(resultFile.getPath(), new PipeTsFileResource(resultFile)); + } else { + hardlinkOrCopiedFileToTsFilePublicResourceMap.put( + resultFile.getPath(), new PipeTsFilePublicResource(resultFile)); + } + } finally { + segmentLock.unlock(hardlinkOrCopiedFile); + } + increasePublicReference(resultFile, pipeName, isTsFile); + return resultFile; + } + + private boolean increaseReferenceIfExists( + final File file, final @Nullable String pipeName, final boolean isTsFile) throws IOException { + segmentLock.lock(file); + try { + final String path = file.getPath(); + final PipeTsFileResource resource = getResourceMap(pipeName).get(path); + if (resource != null) { + resource.increaseReferenceCount(); + } else { + return false; + } } finally { - lock.unlock(); + segmentLock.unlock(file); } + increasePublicReference(file, pipeName, isTsFile); + return true; } - private boolean increaseReferenceIfExists(final String path) { - final PipeTsFileResource resource = hardlinkOrCopiedFileToPipeTsFileResourceMap.get(path); - if (resource != null) { - resource.increaseAndGetReference(); - return true; + private void increasePublicReference( + final File file, final String pipeName, final boolean isTsFile) throws IOException { + if (Objects.isNull(pipeName)) { + return; } - return false; + // Increase the assigner's file to avoid hard-link or memory cache cleaning + // Note that it does not exist for historical files + increaseFileReference(new File(getCommonFilePath(file)), isTsFile, null, file); } - public static File getHardlinkOrCopiedFileInPipeDir(final File file) throws IOException { + public static File getHardlinkOrCopiedFileInPipeDir( + final File file, final @Nullable String pipeName) throws IOException { try { - return new File(getPipeTsFileDirPath(file), getRelativeFilePath(file)); + return new File(getPipeTsFileDirPath(file, pipeName), getRelativeFilePath(file)); } catch (final Exception e) { throw new IOException( String.format( @@ -188,22 +177,28 @@ public static File getHardlinkOrCopiedFileInPipeDir(final File file) throws IOEx } } - private static String getPipeTsFileDirPath(File file) throws IOException { + private static String getPipeTsFileDirPath(File file, final @Nullable String pipeName) + throws IOException { while (!file.getName().equals(IoTDBConstant.SEQUENCE_FOLDER_NAME) - && !file.getName().equals(IoTDBConstant.UNSEQUENCE_FOLDER_NAME)) { + && !file.getName().equals(IoTDBConstant.UNSEQUENCE_FOLDER_NAME) + && !file.getName().equals(PipeConfig.getInstance().getPipeHardlinkBaseDirName())) { file = file.getParentFile(); } return file.getParentFile().getCanonicalPath() + File.separator + PipeConfig.getInstance().getPipeHardlinkBaseDirName() + File.separator - + PipeConfig.getInstance().getPipeHardlinkTsFileDirName(); + + PipeConfig.getInstance().getPipeHardlinkTsFileDirName() + + (Objects.nonNull(pipeName) ? File.separator + pipeName : ""); } private static String getRelativeFilePath(File file) { StringBuilder builder = new StringBuilder(file.getName()); while (!file.getName().equals(IoTDBConstant.SEQUENCE_FOLDER_NAME) - && !file.getName().equals(IoTDBConstant.UNSEQUENCE_FOLDER_NAME)) { + && !file.getName().equals(IoTDBConstant.UNSEQUENCE_FOLDER_NAME) + && !file.getParentFile() + .getName() + .equals(PipeConfig.getInstance().getPipeHardlinkTsFileDirName())) { file = file.getParentFile(); builder = new StringBuilder(file.getName()) @@ -217,19 +212,43 @@ private static String getRelativeFilePath(File file) { * Given a hardlink or copied file, decrease its reference count, if the reference count is 0, * delete the file. if the given file is not a hardlink or copied file, do nothing. * - * @param hardlinkOrCopiedFile the copied or hardlinked file + * @param hardlinkOrCopiedFile the copied or hard-linked file */ - public void decreaseFileReference(final File hardlinkOrCopiedFile) { - lock.lock(); + public void decreaseFileReference( + final File hardlinkOrCopiedFile, final @Nullable String pipeName) { + segmentLock.lock(hardlinkOrCopiedFile); try { final String filePath = hardlinkOrCopiedFile.getPath(); - final PipeTsFileResource resource = hardlinkOrCopiedFileToPipeTsFileResourceMap.get(filePath); - if (resource != null) { - resource.decreaseAndGetReference(); + final PipeTsFileResource resource = getResourceMap(pipeName).get(filePath); + if (resource != null && resource.decreaseReferenceCount()) { + getResourceMap(pipeName).remove(filePath); } } finally { - lock.unlock(); + segmentLock.unlock(hardlinkOrCopiedFile); + } + + // Decrease the assigner's file to clear hard-link and memory cache + // Note that it does not exist for historical files + decreasePublicReferenceIfExists(hardlinkOrCopiedFile, pipeName); + } + + private void decreasePublicReferenceIfExists(final File file, final @Nullable String pipeName) { + if (Objects.isNull(pipeName)) { + return; } + // Increase the assigner's file to avoid hard-link or memory cache cleaning + // Note that it does not exist for historical files + decreaseFileReference(new File(getCommonFilePath(file)), null); + } + + // Warning: Shall not be called by the assigner + private String getCommonFilePath(final @Nonnull File file) { + // If the parent or grandparent is null then this is testing scenario + // Skip the "pipeName" of this file + return Objects.isNull(file.getParentFile()) + || Objects.isNull(file.getParentFile().getParentFile()) + ? file.getPath() + : file.getParentFile().getParent() + File.separator + file.getName(); } /** @@ -238,14 +257,21 @@ public void decreaseFileReference(final File hardlinkOrCopiedFile) { * @param hardlinkOrCopiedFile the copied or hardlinked file * @return the reference count of the file */ - public int getFileReferenceCount(final File hardlinkOrCopiedFile) { - lock.lock(); + @TestOnly + public int getFileReferenceCount( + final File hardlinkOrCopiedFile, final @Nullable String pipeName) { + segmentLock.lock(hardlinkOrCopiedFile); try { - final String filePath = hardlinkOrCopiedFile.getPath(); - final PipeTsFileResource resource = hardlinkOrCopiedFileToPipeTsFileResourceMap.get(filePath); + final PipeTsFileResource resource = + Objects.nonNull(pipeName) + ? hardlinkOrCopiedFileToPipeTsFileResourceMap + .computeIfAbsent(pipeName, pipe -> new ConcurrentHashMap<>()) + .get(hardlinkOrCopiedFile.getPath()) + : hardlinkOrCopiedFileToTsFilePublicResourceMap.get( + getCommonFilePath(hardlinkOrCopiedFile)); return resource != null ? resource.getReferenceCount() : 0; } finally { - lock.unlock(); + segmentLock.unlock(hardlinkOrCopiedFile); } } @@ -256,110 +282,114 @@ public int getFileReferenceCount(final File hardlinkOrCopiedFile) { * false} if they can not be cached. */ public boolean cacheObjectsIfAbsent(final File hardlinkOrCopiedTsFile) throws IOException { - lock.lock(); + segmentLock.lock(hardlinkOrCopiedTsFile); try { - final PipeTsFileResource resource = - hardlinkOrCopiedFileToPipeTsFileResourceMap.get(hardlinkOrCopiedTsFile.getPath()); - return resource != null && resource.cacheObjectsIfAbsent(); + if (hardlinkOrCopiedTsFile.getParentFile() == null + || hardlinkOrCopiedTsFile.getParentFile().getParentFile() == null) { + return false; + } + final PipeTsFilePublicResource resource = + hardlinkOrCopiedFileToTsFilePublicResourceMap.get( + getCommonFilePath(hardlinkOrCopiedTsFile)); + return resource != null && resource.cacheObjectsIfAbsent(hardlinkOrCopiedTsFile); } finally { - lock.unlock(); + segmentLock.unlock(hardlinkOrCopiedTsFile); } } public Map> getDeviceMeasurementsMapFromCache( final File hardlinkOrCopiedTsFile) throws IOException { - lock.lock(); + segmentLock.lock(hardlinkOrCopiedTsFile); try { - final PipeTsFileResource resource = - hardlinkOrCopiedFileToPipeTsFileResourceMap.get(hardlinkOrCopiedTsFile.getPath()); - return resource == null ? null : resource.tryGetDeviceMeasurementsMap(); + final PipeTsFilePublicResource resource = + hardlinkOrCopiedFileToTsFilePublicResourceMap.get( + getCommonFilePath(hardlinkOrCopiedTsFile)); + return resource == null ? null : resource.tryGetDeviceMeasurementsMap(hardlinkOrCopiedTsFile); } finally { - lock.unlock(); + segmentLock.unlock(hardlinkOrCopiedTsFile); } } public Map getDeviceIsAlignedMapFromCache( final File hardlinkOrCopiedTsFile, final boolean cacheOtherMetadata) throws IOException { - lock.lock(); + segmentLock.lock(hardlinkOrCopiedTsFile); try { - final PipeTsFileResource resource = - hardlinkOrCopiedFileToPipeTsFileResourceMap.get(hardlinkOrCopiedTsFile.getPath()); - return resource == null ? null : resource.tryGetDeviceIsAlignedMap(cacheOtherMetadata); + final PipeTsFilePublicResource resource = + hardlinkOrCopiedFileToTsFilePublicResourceMap.get( + getCommonFilePath(hardlinkOrCopiedTsFile)); + return resource == null + ? null + : resource.tryGetDeviceIsAlignedMap(cacheOtherMetadata, hardlinkOrCopiedTsFile); } finally { - lock.unlock(); + segmentLock.unlock(hardlinkOrCopiedTsFile); } } public Map getMeasurementDataTypeMapFromCache( final File hardlinkOrCopiedTsFile) throws IOException { - lock.lock(); + segmentLock.lock(hardlinkOrCopiedTsFile); try { - final PipeTsFileResource resource = - hardlinkOrCopiedFileToPipeTsFileResourceMap.get(hardlinkOrCopiedTsFile.getPath()); - return resource == null ? null : resource.tryGetMeasurementDataTypeMap(); + final PipeTsFilePublicResource resource = + hardlinkOrCopiedFileToTsFilePublicResourceMap.get( + getCommonFilePath(hardlinkOrCopiedTsFile)); + return resource == null + ? null + : resource.tryGetMeasurementDataTypeMap(hardlinkOrCopiedTsFile); } finally { - lock.unlock(); + segmentLock.unlock(hardlinkOrCopiedTsFile); } } - public void pinTsFileResource(final TsFileResource resource, final boolean withMods) + public Map getResourceMap(final @Nullable String pipeName) { + return Objects.nonNull(pipeName) + ? hardlinkOrCopiedFileToPipeTsFileResourceMap.computeIfAbsent( + pipeName, k -> new ConcurrentHashMap<>()) + : hardlinkOrCopiedFileToTsFilePublicResourceMap; + } + + public void pinTsFileResource( + final TsFileResource resource, final boolean withMods, final @Nullable String pipeName) throws IOException { - lock.lock(); - try { - increaseFileReference(resource.getTsFile(), true, resource); - if (withMods && resource.getModFile().exists()) { - increaseFileReference(new File(resource.getModFile().getFilePath()), false, null); + increaseFileReference(resource.getTsFile(), true, pipeName); + if (withMods && resource.getModFile().exists()) { + // Avoid mod compaction + synchronized (resource.getModFile()) { + increaseFileReference(new File(resource.getModFile().getFilePath()), false, pipeName); } - } finally { - lock.unlock(); } } - public void unpinTsFileResource(final TsFileResource resource) throws IOException { - lock.lock(); - try { - final File pinnedFile = getHardlinkOrCopiedFileInPipeDir(resource.getTsFile()); - decreaseFileReference(pinnedFile); + public void unpinTsFileResource(final TsFileResource resource, final @Nullable String pipeName) + throws IOException { + final File pinnedFile = getHardlinkOrCopiedFileInPipeDir(resource.getTsFile(), pipeName); + decreaseFileReference(pinnedFile, pipeName); - final File modFile = new File(pinnedFile + ModificationFile.FILE_SUFFIX); - if (modFile.exists()) { - decreaseFileReference(modFile); - } - } finally { - lock.unlock(); + final File modFile = new File(pinnedFile + ModificationFile.FILE_SUFFIX); + if (modFile.exists()) { + decreaseFileReference(modFile, pipeName); } } - public int getLinkedTsfileCount() { - lock.lock(); - try { - return hardlinkOrCopiedFileToPipeTsFileResourceMap.size(); - } finally { - lock.unlock(); - } + public int getLinkedTsFileCount(final @Nonnull String pipeName) { + return hardlinkOrCopiedFileToPipeTsFileResourceMap + .computeIfAbsent(pipeName, pipe -> new ConcurrentHashMap<>()) + .size(); } - /** - * Get the total size of linked TsFiles whose original TsFile is deleted (by compaction or else) - */ - public long getTotalLinkedButDeletedTsfileSize() { - lock.lock(); - try { - return hardlinkOrCopiedFileToPipeTsFileResourceMap.values().parallelStream() - .filter(PipeTsFileResource::isOriginalTsFileDeleted) - .mapToLong( - resource -> { - try { - return resource.getFileSize(); - } catch (Exception e) { - LOGGER.warn( - "failed to get file size of linked but deleted TsFile {}: ", resource, e); - return 0; - } - }) - .sum(); - } finally { - lock.unlock(); - } + public long getTotalLinkedTsFileSize(final @Nonnull String pipeName) { + return hardlinkOrCopiedFileToPipeTsFileResourceMap + .computeIfAbsent(pipeName, pipe -> new ConcurrentHashMap<>()) + .values() + .stream() + .mapToLong( + resource -> { + try { + return resource.getFileSize(); + } catch (Exception e) { + LOGGER.warn("failed to get file size of linked TsFile {}: ", resource, e); + return 0; + } + }) + .sum(); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResourceSegmentLock.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResourceSegmentLock.java new file mode 100644 index 0000000000000..d1bf7fef2ad3d --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResourceSegmentLock.java @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.resource.tsfile; + +import org.apache.iotdb.db.storageengine.StorageEngine; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.ReentrantLock; + +public class PipeTsFileResourceSegmentLock { + + private static final Logger LOGGER = LoggerFactory.getLogger(PipeTsFileResourceSegmentLock.class); + + private static final int SEGMENT_LOCK_MIN_SIZE = 32; + private static final int SEGMENT_LOCK_MAX_SIZE = 128; + + private volatile ReentrantLock[] locks; + + private void initIfNecessary() { + if (locks == null) { + synchronized (this) { + if (locks == null) { + int lockSegmentSize = SEGMENT_LOCK_MIN_SIZE; + try { + lockSegmentSize = StorageEngine.getInstance().getAllDataRegionIds().size(); + } catch (final Exception e) { + LOGGER.warn( + "Cannot get data region ids, use default lock segment size: {}", lockSegmentSize); + } + lockSegmentSize = Math.min(SEGMENT_LOCK_MAX_SIZE, lockSegmentSize); + lockSegmentSize = Math.max(SEGMENT_LOCK_MIN_SIZE, lockSegmentSize); + + final ReentrantLock[] tmpLocks = new ReentrantLock[lockSegmentSize]; + for (int i = 0; i < tmpLocks.length; i++) { + tmpLocks[i] = new ReentrantLock(); + } + + // publish this variable + locks = tmpLocks; + } + } + } + } + + public void lock(final File file) { + initIfNecessary(); + locks[Math.abs(file.hashCode()) % locks.length].lock(); + } + + public boolean tryLock(final File file, final long timeout, final TimeUnit timeUnit) + throws InterruptedException { + initIfNecessary(); + return locks[Math.abs(file.hashCode()) % locks.length].tryLock(timeout, timeUnit); + } + + public boolean tryLockAll(final long timeout, final TimeUnit timeUnit) + throws InterruptedException { + initIfNecessary(); + int alreadyLocked = 0; + for (final ReentrantLock lock : locks) { + if (lock.tryLock(timeout, timeUnit)) { + alreadyLocked++; + } else { + break; + } + } + + if (alreadyLocked == locks.length) { + return true; + } else { + unlockUntil(alreadyLocked); + return false; + } + } + + private void unlockUntil(final int index) { + for (int i = 0; i < index; i++) { + locks[i].unlock(); + } + } + + public void unlock(final File file) { + initIfNecessary(); + locks[Math.abs(file.hashCode()) % locks.length].unlock(); + } + + public void unlockAll() { + initIfNecessary(); + unlockUntil(locks.length); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/PipeWALResource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/PipeWALResource.java deleted file mode 100644 index 9d1e530a19d17..0000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/PipeWALResource.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.pipe.resource.wal; - -import org.apache.iotdb.commons.exception.pipe.PipeRuntimeCriticalException; -import org.apache.iotdb.commons.exception.pipe.PipeRuntimeNonCriticalException; -import org.apache.iotdb.db.storageengine.dataregion.wal.exception.MemTablePinException; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; - -public abstract class PipeWALResource implements Closeable { - - private static final Logger LOGGER = LoggerFactory.getLogger(PipeWALResource.class); - - protected final WALEntryHandler walEntryHandler; - - private final AtomicInteger referenceCount; - - public static final long WAL_MIN_TIME_TO_LIVE_IN_MS = 1000L * 20; - private final AtomicLong lastLogicalPinTime; - private final AtomicBoolean isPhysicallyPinned; - - protected PipeWALResource(WALEntryHandler walEntryHandler) { - this.walEntryHandler = walEntryHandler; - - referenceCount = new AtomicInteger(0); - - lastLogicalPinTime = new AtomicLong(0); - isPhysicallyPinned = new AtomicBoolean(false); - } - - public final void pin() throws PipeRuntimeNonCriticalException { - if (referenceCount.get() == 0) { - if (!isPhysicallyPinned.get()) { - try { - pinInternal(); - } catch (MemTablePinException e) { - throw new PipeRuntimeNonCriticalException( - String.format( - "failed to pin wal %d, because %s", - walEntryHandler.getMemTableId(), e.getMessage())); - } - isPhysicallyPinned.set(true); - LOGGER.info("wal {} is pinned by pipe engine", walEntryHandler.getMemTableId()); - } // else means the wal is already pinned, do nothing - - // no matter the wal is pinned or not, update the last pin time - lastLogicalPinTime.set(System.currentTimeMillis()); - } - - referenceCount.incrementAndGet(); - } - - protected abstract void pinInternal() - throws MemTablePinException, PipeRuntimeNonCriticalException; - - public final void unpin() throws PipeRuntimeNonCriticalException { - final int finalReferenceCount = referenceCount.get(); - - if (finalReferenceCount == 1) { - unpinPhysicallyIfOutOfTimeToLive(); - } else if (finalReferenceCount < 1) { - throw new PipeRuntimeCriticalException( - String.format( - "wal %d is unpinned more than pinned, this should not happen", - walEntryHandler.getMemTableId())); - } - - referenceCount.decrementAndGet(); - } - - protected abstract void unpinInternal() - throws MemTablePinException, PipeRuntimeNonCriticalException; - - /** - * Invalidate the wal if it is unpinned and out of time to live. - * - * @return true if the wal is invalidated, false otherwise - */ - public final boolean invalidateIfPossible() { - if (referenceCount.get() > 0) { - return false; - } - - // referenceCount.get() == 0 - return unpinPhysicallyIfOutOfTimeToLive(); - } - - /** - * Unpin the wal if it is out of time to live. - * - * @return true if the wal is unpinned physically (then it can be invalidated), false otherwise - * @throws PipeRuntimeNonCriticalException if failed to unpin WAL of memtable. - */ - private boolean unpinPhysicallyIfOutOfTimeToLive() { - if (isPhysicallyPinned.get()) { - if (System.currentTimeMillis() - lastLogicalPinTime.get() > WAL_MIN_TIME_TO_LIVE_IN_MS) { - try { - unpinInternal(); - } catch (MemTablePinException e) { - throw new PipeRuntimeNonCriticalException( - String.format( - "failed to unpin wal %d, because %s", - walEntryHandler.getMemTableId(), e.getMessage())); - } - isPhysicallyPinned.set(false); - LOGGER.info( - "wal {} is unpinned by pipe engine when checking time to live", - walEntryHandler.getMemTableId()); - return true; - } else { - return false; - } - } else { - LOGGER.info( - "wal {} is not pinned physically when checking time to live", - walEntryHandler.getMemTableId()); - return true; - } - } - - @Override - public final void close() { - if (isPhysicallyPinned.get()) { - try { - unpinInternal(); - } catch (MemTablePinException e) { - LOGGER.error( - "failed to unpin wal {} when closing pipe wal resource, because {}", - walEntryHandler.getMemTableId(), - e.getMessage()); - } - isPhysicallyPinned.set(false); - LOGGER.info( - "wal {} is unpinned by pipe engine when closing pipe wal resource", - walEntryHandler.getMemTableId()); - } - - referenceCount.set(0); - } - - public int getReferenceCount() { - return referenceCount.get(); - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/PipeWALResourceManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/PipeWALResourceManager.java deleted file mode 100644 index 9bc3a4a030aaf..0000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/PipeWALResourceManager.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.pipe.resource.wal; - -import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; -import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ConcurrentModificationException; -import java.util.Iterator; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.locks.ReentrantLock; - -public abstract class PipeWALResourceManager { - - private static final Logger LOGGER = LoggerFactory.getLogger(PipeWALResourceManager.class); - - protected final Map memtableIdToPipeWALResourceMap; - - private static final int SEGMENT_LOCK_COUNT = 32; - private final ReentrantLock[] memTableIdSegmentLocks; - - protected PipeWALResourceManager() { - // memTableIdToPipeWALResourceMap can be concurrently accessed by multiple threads - memtableIdToPipeWALResourceMap = new ConcurrentHashMap<>(); - - memTableIdSegmentLocks = new ReentrantLock[SEGMENT_LOCK_COUNT]; - for (int i = 0; i < SEGMENT_LOCK_COUNT; i++) { - memTableIdSegmentLocks[i] = new ReentrantLock(); - } - - PipeDataNodeAgent.runtime() - .registerPeriodicalJob( - "PipeWALResourceManager#ttlCheck()", - this::ttlCheck, - Math.max(PipeWALResource.WAL_MIN_TIME_TO_LIVE_IN_MS / 1000, 1)); - } - - @SuppressWarnings("java:S2222") - private void ttlCheck() { - final Iterator> iterator = - memtableIdToPipeWALResourceMap.entrySet().iterator(); - final Optional logger = - PipeDataNodeResourceManager.log() - .schedule( - PipeWALResourceManager.class, - PipeConfig.getInstance().getPipeWalPinMaxLogNumPerRound(), - PipeConfig.getInstance().getPipeWalPinMaxLogIntervalRounds(), - memtableIdToPipeWALResourceMap.size()); - - try { - while (iterator.hasNext()) { - final Map.Entry entry = iterator.next(); - final ReentrantLock lock = - memTableIdSegmentLocks[(int) (entry.getKey() % SEGMENT_LOCK_COUNT)]; - - lock.lock(); - try { - if (entry.getValue().invalidateIfPossible()) { - iterator.remove(); - } else { - logger.ifPresent( - l -> - l.info( - "WAL (memtableId {}) is still referenced {} times", - entry.getKey(), - entry.getValue().getReferenceCount())); - } - } finally { - lock.unlock(); - } - } - } catch (final ConcurrentModificationException e) { - LOGGER.error( - "Concurrent modification issues happened, skipping the WAL in this round of ttl check", - e); - } - } - - public final void pin(final WALEntryHandler walEntryHandler) throws IOException { - final long memTableId = walEntryHandler.getMemTableId(); - final ReentrantLock lock = memTableIdSegmentLocks[(int) (memTableId % SEGMENT_LOCK_COUNT)]; - - lock.lock(); - try { - pinInternal(memTableId, walEntryHandler); - } finally { - lock.unlock(); - } - } - - protected abstract void pinInternal(final long memTableId, final WALEntryHandler walEntryHandler) - throws IOException; - - public final void unpin(final WALEntryHandler walEntryHandler) throws IOException { - final long memTableId = walEntryHandler.getMemTableId(); - final ReentrantLock lock = memTableIdSegmentLocks[(int) (memTableId % SEGMENT_LOCK_COUNT)]; - - lock.lock(); - try { - unpinInternal(memTableId, walEntryHandler); - } finally { - lock.unlock(); - } - } - - protected abstract void unpinInternal( - final long memTableId, final WALEntryHandler walEntryHandler) throws IOException; - - public int getPinnedWalCount() { - return Objects.nonNull(memtableIdToPipeWALResourceMap) - ? memtableIdToPipeWALResourceMap.size() - : 0; - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/hardlink/PipeWALHardlinkResource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/hardlink/PipeWALHardlinkResource.java deleted file mode 100644 index f1ad513ccc4c9..0000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/hardlink/PipeWALHardlinkResource.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.pipe.resource.wal.hardlink; - -import org.apache.iotdb.db.pipe.resource.wal.PipeWALResource; -import org.apache.iotdb.db.storageengine.dataregion.wal.exception.MemTablePinException; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler; - -public class PipeWALHardlinkResource extends PipeWALResource { - - private final PipeWALHardlinkResourceManager resourceManager; - - protected PipeWALHardlinkResource( - WALEntryHandler walEntryHandler, PipeWALHardlinkResourceManager resourceManager) { - super(walEntryHandler); - this.resourceManager = resourceManager; - } - - @Override - protected void pinInternal() throws MemTablePinException { - // TODO: hardlink - walEntryHandler.pinMemTable(); - } - - @Override - protected void unpinInternal() throws MemTablePinException { - // TODO: hardlink - walEntryHandler.unpinMemTable(); - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/hardlink/PipeWALHardlinkResourceManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/hardlink/PipeWALHardlinkResourceManager.java deleted file mode 100644 index 7570b83fc73c0..0000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/hardlink/PipeWALHardlinkResourceManager.java +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.pipe.resource.wal.hardlink; - -import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.db.pipe.resource.wal.PipeWALResourceManager; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler; - -import java.io.File; -import java.io.IOException; -import java.nio.file.FileSystems; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.HashMap; -import java.util.Map; - -public class PipeWALHardlinkResourceManager extends PipeWALResourceManager { - - @Override - protected void pinInternal(final long memTableId, final WALEntryHandler walEntryHandler) { - memtableIdToPipeWALResourceMap - .computeIfAbsent(memTableId, id -> new PipeWALHardlinkResource(walEntryHandler, this)) - .pin(); - } - - @Override - protected void unpinInternal(final long memTableId, final WALEntryHandler walEntryHandler) { - memtableIdToPipeWALResourceMap.get(memTableId).unpin(); - } - - //////////////////////////// hardlink related //////////////////////////// - - private final Map hardlinkToReferenceMap = new HashMap<>(); - - /** - * given a file, create a hardlink, maintain a reference count for the hardlink, and return the - * hardlink. - * - *

if the given file is already a hardlink, increase its reference count and return it. - * - *

if the given file is a wal, create a hardlink in pipe dir, increase the reference count of - * the hardlink and return it. - * - * @param file wal file. can be original file or the hardlink of original file - * @return the hardlink - * @throws IOException when create hardlink failed - */ - public synchronized File increaseFileReference(final File file) throws IOException { - // if the file is already a hardlink, just increase reference count and return it - if (increaseReferenceIfExists(file.getPath())) { - return file; - } - - // if the file is not a hardlink, check if there is a related hardlink in pipe dir. if so, - // increase reference count and return it. - final File hardlink = getHardlinkInPipeWALDir(file); - if (increaseReferenceIfExists(hardlink.getPath())) { - return hardlink; - } - - // if the file is a wal, and there is no related hardlink in pipe dir, create a hardlink to pipe - // dir, maintain a reference count for the hardlink, and return the hardlink. - hardlinkToReferenceMap.put(hardlink.getPath(), 1); - return createHardlink(file, hardlink); - } - - private boolean increaseReferenceIfExists(final String path) { - hardlinkToReferenceMap.computeIfPresent(path, (k, v) -> v + 1); - return hardlinkToReferenceMap.containsKey(path); - } - - // TODO: Check me! Make sure the file is not a hardlink. - // TODO: IF user specify a wal by config, will the method work? - private static File getHardlinkInPipeWALDir(final File file) throws IOException { - try { - return new File(getPipeWALDirPath(file), getRelativeFilePath(file)); - } catch (final Exception e) { - throw new IOException( - String.format( - "failed to get hardlink in pipe dir " + "for file %s, it is not a wal", - file.getPath()), - e); - } - } - - private static String getPipeWALDirPath(File file) throws IOException { - while (!file.getName().equals(IoTDBConstant.WAL_FOLDER_NAME)) { - file = file.getParentFile(); - } - - return file.getParentFile().getCanonicalPath() - + File.separator - + IoTDBConstant.DATA_FOLDER_NAME - + File.separator - + PipeConfig.getInstance().getPipeHardlinkBaseDirName() - + File.separator - + PipeConfig.getInstance().getPipeHardlinkWALDirName(); - } - - private static String getRelativeFilePath(File file) { - StringBuilder builder = new StringBuilder(file.getName()); - while (!file.getParentFile().getName().equals(IoTDBConstant.WAL_FOLDER_NAME)) { - file = file.getParentFile(); - builder = - new StringBuilder(file.getName()) - .append(IoTDBConstant.FILE_NAME_SEPARATOR) - .append(builder); - } - return builder.toString(); - } - - private static File createHardlink(final File sourceFile, final File hardlink) - throws IOException { - if (!hardlink.getParentFile().exists() && !hardlink.getParentFile().mkdirs()) { - throw new IOException( - String.format( - "failed to create hardlink %s for file %s: failed to create parent dir %s", - hardlink.getPath(), sourceFile.getPath(), hardlink.getParentFile().getPath())); - } - - final Path sourcePath = FileSystems.getDefault().getPath(sourceFile.getAbsolutePath()); - final Path linkPath = FileSystems.getDefault().getPath(hardlink.getAbsolutePath()); - Files.createLink(linkPath, sourcePath); - return hardlink; - } - - /** - * given a hardlink, decrease its reference count, if the reference count is 0, delete the file. - * if the given file is not a hardlink, do nothing. - * - * @param hardlink the hardlinked file - * @throws IOException when delete file failed - */ - public synchronized void decreaseFileReference(final File hardlink) throws IOException { - final Integer updatedReference = - hardlinkToReferenceMap.computeIfPresent( - hardlink.getPath(), (file, reference) -> reference - 1); - - if (updatedReference != null && updatedReference == 0) { - Files.deleteIfExists(hardlink.toPath()); - hardlinkToReferenceMap.remove(hardlink.getPath()); - } - } - - @TestOnly - public synchronized int getFileReferenceCount(final File hardlink) { - return hardlinkToReferenceMap.getOrDefault(hardlink.getPath(), 0); - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/client/IoTDBDataNodeAsyncClientManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/client/IoTDBDataNodeAsyncClientManager.java similarity index 52% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/client/IoTDBDataNodeAsyncClientManager.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/client/IoTDBDataNodeAsyncClientManager.java index eff84ab5a5a61..e6a481d7155ac 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/client/IoTDBDataNodeAsyncClientManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/client/IoTDBDataNodeAsyncClientManager.java @@ -17,40 +17,47 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.client; +package org.apache.iotdb.db.pipe.sink.client; import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.commons.client.ClientPoolFactory; import org.apache.iotdb.commons.client.IClientManager; import org.apache.iotdb.commons.client.async.AsyncPipeDataTransferServiceClient; +import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory; +import org.apache.iotdb.commons.concurrent.ThreadName; import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.pipe.connector.client.IoTDBClientManager; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.common.PipeTransferHandshakeConstant; +import org.apache.iotdb.commons.pipe.sink.client.IoTDBClientManager; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.common.PipeTransferHandshakeConstant; import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferDataNodeHandshakeV1Req; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferDataNodeHandshakeV2Req; -import org.apache.iotdb.db.pipe.connector.protocol.thrift.async.IoTDBDataRegionAsyncConnector; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferDataNodeHandshakeV1Req; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferDataNodeHandshakeV2Req; import org.apache.iotdb.pipe.api.exception.PipeConnectionException; import org.apache.iotdb.pipe.api.exception.PipeException; import org.apache.iotdb.rpc.TSStatusCode; import org.apache.iotdb.service.rpc.thrift.TPipeTransferResp; +import org.apache.thrift.TException; import org.apache.thrift.async.AsyncMethodCallback; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Base64; import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_LOAD_BALANCE_PRIORITY_STRATEGY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_LOAD_BALANCE_RANDOM_STRATEGY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_LOAD_BALANCE_ROUND_ROBIN_STRATEGY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_LOAD_BALANCE_PRIORITY_STRATEGY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_LOAD_BALANCE_RANDOM_STRATEGY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_LOAD_BALANCE_ROUND_ROBIN_STRATEGY; public class IoTDBDataNodeAsyncClientManager extends IoTDBClientManager implements IoTDBDataNodeCacheLeaderClientManager { @@ -60,30 +67,87 @@ public class IoTDBDataNodeAsyncClientManager extends IoTDBClientManager private final Set endPointSet; - private static final AtomicReference< - IClientManager> - ASYNC_PIPE_DATA_TRANSFER_CLIENT_MANAGER_HOLDER = new AtomicReference<>(); + private static final Map RECEIVER_ATTRIBUTES_REF_COUNT = + new ConcurrentHashMap<>(); + private final String receiverAttributes; + + // receiverAttributes -> IClientManager + private static final Map> + ASYNC_PIPE_DATA_TRANSFER_CLIENT_MANAGER_HOLDER = new ConcurrentHashMap<>(); + private static final Map TS_FILE_ASYNC_EXECUTOR_HOLDER = + new ConcurrentHashMap<>(); + private static final AtomicInteger id = new AtomicInteger(0); + private final IClientManager endPoint2Client; + private ExecutorService executor; private final LoadBalancer loadBalancer; + private volatile boolean isClosed = false; + + private final Map unhealthyEndPointMap = new ConcurrentHashMap<>(); + public IoTDBDataNodeAsyncClientManager( - List endPoints, boolean useLeaderCache, String loadBalanceStrategy) { - super(endPoints, useLeaderCache); + final List endPoints, + /* The following parameters are used locally. */ + final boolean useLeaderCache, + final String loadBalanceStrategy, + /* The following parameters are used to handshake with the receiver. */ + final String username, + final String password, + final boolean shouldReceiverConvertOnTypeMismatch, + final String loadTsFileStrategy, + final boolean validateTsFile, + final boolean shouldMarkAsPipeRequest, + final boolean isTSFileUsed) { + super( + endPoints, + username, + password, + shouldReceiverConvertOnTypeMismatch, + loadTsFileStrategy, + useLeaderCache, + validateTsFile, + shouldMarkAsPipeRequest); endPointSet = new HashSet<>(endPoints); - if (ASYNC_PIPE_DATA_TRANSFER_CLIENT_MANAGER_HOLDER.get() == null) { - synchronized (IoTDBDataRegionAsyncConnector.class) { - if (ASYNC_PIPE_DATA_TRANSFER_CLIENT_MANAGER_HOLDER.get() == null) { - ASYNC_PIPE_DATA_TRANSFER_CLIENT_MANAGER_HOLDER.set( - new IClientManager.Factory() - .createClientManager( - new ClientPoolFactory.AsyncPipeDataTransferServiceClientPoolFactory())); + receiverAttributes = + String.format( + "%s-%s-%s-%s-%s-%s", + Base64.getEncoder().encodeToString((username + ":" + password).getBytes()), + shouldReceiverConvertOnTypeMismatch, + loadTsFileStrategy, + validateTsFile, + shouldMarkAsPipeRequest, + isTSFileUsed); + synchronized (IoTDBDataNodeAsyncClientManager.class) { + if (!ASYNC_PIPE_DATA_TRANSFER_CLIENT_MANAGER_HOLDER.containsKey(receiverAttributes)) { + ASYNC_PIPE_DATA_TRANSFER_CLIENT_MANAGER_HOLDER.putIfAbsent( + receiverAttributes, + new IClientManager.Factory() + .createClientManager( + isTSFileUsed + ? new ClientPoolFactory + .AsyncPipeTsFileDataTransferServiceClientPoolFactory() + : new ClientPoolFactory.AsyncPipeDataTransferServiceClientPoolFactory())); + } + endPoint2Client = ASYNC_PIPE_DATA_TRANSFER_CLIENT_MANAGER_HOLDER.get(receiverAttributes); + + if (isTSFileUsed) { + if (!TS_FILE_ASYNC_EXECUTOR_HOLDER.containsKey(receiverAttributes)) { + TS_FILE_ASYNC_EXECUTOR_HOLDER.putIfAbsent( + receiverAttributes, + IoTDBThreadPoolFactory.newFixedThreadPool( + PipeConfig.getInstance().getPipeRealTimeQueueMaxWaitingTsFileSize(), + ThreadName.PIPE_TSFILE_ASYNC_SEND_POOL.getName() + "-" + id.getAndIncrement())); } + executor = TS_FILE_ASYNC_EXECUTOR_HOLDER.get(receiverAttributes); } + + RECEIVER_ATTRIBUTES_REF_COUNT.compute( + receiverAttributes, (attributes, refCount) -> refCount == null ? 1 : refCount + 1); } - endPoint2Client = ASYNC_PIPE_DATA_TRANSFER_CLIENT_MANAGER_HOLDER.get(); switch (loadBalanceStrategy) { case CONNECTOR_LOAD_BALANCE_ROUND_ROBIN_STRATEGY: @@ -107,7 +171,7 @@ public AsyncPipeDataTransferServiceClient borrowClient() throws Exception { return loadBalancer.borrowClient(); } - public AsyncPipeDataTransferServiceClient borrowClient(String deviceId) throws Exception { + public AsyncPipeDataTransferServiceClient borrowClient(final String deviceId) throws Exception { if (!useLeaderCache || Objects.isNull(deviceId)) { return borrowClient(); } @@ -115,8 +179,9 @@ public AsyncPipeDataTransferServiceClient borrowClient(String deviceId) throws E return borrowClient(LEADER_CACHE_MANAGER.getLeaderEndPoint(deviceId)); } - public AsyncPipeDataTransferServiceClient borrowClient(TEndPoint endPoint) throws Exception { - if (!useLeaderCache || Objects.isNull(endPoint)) { + public AsyncPipeDataTransferServiceClient borrowClient(final TEndPoint endPoint) + throws Exception { + if (!useLeaderCache || Objects.isNull(endPoint) || isUnhealthy(endPoint)) { return borrowClient(); } @@ -125,7 +190,7 @@ public AsyncPipeDataTransferServiceClient borrowClient(TEndPoint endPoint) throw if (handshakeIfNecessary(endPoint, client)) { return client; } - } catch (Exception e) { + } catch (final Exception e) { LOGGER.warn( "failed to borrow client {}:{} for cached leader.", endPoint.getIp(), @@ -145,7 +210,8 @@ public AsyncPipeDataTransferServiceClient borrowClient(TEndPoint endPoint) throw * @throws Exception if an error occurs. */ private boolean handshakeIfNecessary( - TEndPoint targetNodeUrl, AsyncPipeDataTransferServiceClient client) throws Exception { + final TEndPoint targetNodeUrl, final AsyncPipeDataTransferServiceClient client) + throws Exception { if (client.isHandshakeFinished()) { return true; } @@ -157,7 +223,7 @@ private boolean handshakeIfNecessary( final AsyncMethodCallback callback = new AsyncMethodCallback() { @Override - public void onComplete(TPipeTransferResp response) { + public void onComplete(final TPipeTransferResp response) { resp.set(response); if (response.getStatus().getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { @@ -187,7 +253,7 @@ public void onComplete(TPipeTransferResp response) { } @Override - public void onError(Exception e) { + public void onError(final Exception e) { LOGGER.warn( "Handshake error with receiver {}:{}.", targetNodeUrl.getIp(), @@ -209,6 +275,19 @@ public void onError(Exception e) { params.put( PipeTransferHandshakeConstant.HANDSHAKE_KEY_TIME_PRECISION, CommonDescriptor.getInstance().getConfig().getTimestampPrecision()); + params.put( + PipeTransferHandshakeConstant.HANDSHAKE_KEY_CONVERT_ON_TYPE_MISMATCH, + Boolean.toString(shouldReceiverConvertOnTypeMismatch)); + params.put( + PipeTransferHandshakeConstant.HANDSHAKE_KEY_LOAD_TSFILE_STRATEGY, loadTsFileStrategy); + params.put(PipeTransferHandshakeConstant.HANDSHAKE_KEY_USERNAME, username); + params.put(PipeTransferHandshakeConstant.HANDSHAKE_KEY_PASSWORD, password); + params.put( + PipeTransferHandshakeConstant.HANDSHAKE_KEY_VALIDATE_TSFILE, + Boolean.toString(validateTsFile)); + params.put( + PipeTransferHandshakeConstant.HANDSHAKE_KEY_MARK_AS_PIPE_REQUEST, + Boolean.toString(shouldMarkAsPipeRequest)); client.setTimeoutDynamically(PipeConfig.getInstance().getPipeConnectorHandshakeTimeoutMs()); client.pipeTransfer(PipeTransferDataNodeHandshakeV2Req.toTPipeTransferReq(params), callback); @@ -217,7 +296,7 @@ public void onError(Exception e) { // Retry to handshake by PipeTransferHandshakeV1Req. if (resp.get() != null && resp.get().getStatus().getCode() == TSStatusCode.PIPE_TYPE_ERROR.getStatusCode()) { - LOGGER.info( + LOGGER.warn( "Handshake error by PipeTransferHandshakeV2Req with receiver {}:{} " + "retry to handshake by PipeTransferHandshakeV1Req.", targetNodeUrl.getIp(), @@ -236,9 +315,28 @@ public void onError(Exception e) { waitHandshakeFinished(isHandshakeFinished); } if (exception.get() != null) { + markUnhealthy(targetNodeUrl); throw new PipeConnectionException("Failed to handshake.", exception.get()); + } else { + markHealthy(targetNodeUrl); } + } catch (TException e) { + client.resetMethodStateIfStopped(); + markUnhealthy(targetNodeUrl); + throw e; } finally { + if (isClosed) { + try { + client.close(); + client.invalidateAll(); + } catch (final Exception e) { + LOGGER.warn( + "Failed to close client {}:{} after handshake failure when the manager is closed.", + targetNodeUrl.getIp(), + targetNodeUrl.getPort(), + e); + } + } client.setShouldReturnSelf(true); client.returnSelf(); } @@ -246,18 +344,24 @@ public void onError(Exception e) { return false; } - private void waitHandshakeFinished(AtomicBoolean isHandshakeFinished) { + private void waitHandshakeFinished(final AtomicBoolean isHandshakeFinished) { try { + final long startTime = System.currentTimeMillis(); while (!isHandshakeFinished.get()) { + if (isClosed + || System.currentTimeMillis() - startTime + > PipeConfig.getInstance().getPipeConnectorHandshakeTimeoutMs() * 2L) { + throw new PipeConnectionException("Timed out when waiting for client handshake finish."); + } Thread.sleep(10); } - } catch (InterruptedException e) { + } catch (final InterruptedException e) { Thread.currentThread().interrupt(); throw new PipeException("Interrupted while waiting for handshake response.", e); } } - public void updateLeaderCache(String deviceId, TEndPoint endPoint) { + public void updateLeaderCache(final String deviceId, final TEndPoint endPoint) { if (!useLeaderCache || deviceId == null || endPoint == null) { return; } @@ -270,6 +374,51 @@ public void updateLeaderCache(String deviceId, TEndPoint endPoint) { LEADER_CACHE_MANAGER.updateLeaderEndPoint(deviceId, endPoint); } + public ExecutorService getExecutor() { + return executor; + } + + public void close() { + isClosed = true; + synchronized (IoTDBDataNodeAsyncClientManager.class) { + RECEIVER_ATTRIBUTES_REF_COUNT.computeIfPresent( + receiverAttributes, + (attributes, refCount) -> { + if (refCount <= 1) { + final IClientManager clientManager = + ASYNC_PIPE_DATA_TRANSFER_CLIENT_MANAGER_HOLDER.remove(receiverAttributes); + if (clientManager != null) { + try { + clientManager.close(); + LOGGER.info( + "Closed AsyncPipeDataTransferServiceClientManager for receiver attributes: {}", + receiverAttributes); + } catch (final Exception e) { + LOGGER.warn( + "Failed to close AsyncPipeDataTransferServiceClientManager for receiver attributes: {}", + receiverAttributes, + e); + } + } + + final ExecutorService executor = + TS_FILE_ASYNC_EXECUTOR_HOLDER.remove(receiverAttributes); + if (executor != null) { + try { + executor.shutdown(); + LOGGER.info("Successfully shutdown executor {}.", executor); + } catch (final Exception e) { + LOGGER.warn("Failed to shutdown executor {}.", executor); + } + } + + return null; + } + return refCount - 1; + }); + } + } + /////////////////////// Strategies for load balance ////////////////////////// private interface LoadBalancer { @@ -280,8 +429,14 @@ private class RoundRobinLoadBalancer implements LoadBalancer { @Override public AsyncPipeDataTransferServiceClient borrowClient() throws Exception { final int clientSize = endPointList.size(); + long n = 0; while (true) { final TEndPoint targetNodeUrl = endPointList.get((int) (currentClientIndex++ % clientSize)); + if (isUnhealthy(targetNodeUrl) && n < clientSize) { + n++; + continue; + } + final AsyncPipeDataTransferServiceClient client = endPoint2Client.borrowClient(targetNodeUrl); if (handshakeIfNecessary(targetNodeUrl, client)) { @@ -295,8 +450,15 @@ private class RandomLoadBalancer implements LoadBalancer { @Override public AsyncPipeDataTransferServiceClient borrowClient() throws Exception { final int clientSize = endPointList.size(); + long n = 0; + while (true) { final TEndPoint targetNodeUrl = endPointList.get((int) (Math.random() * clientSize)); + if (isUnhealthy(targetNodeUrl) && n <= clientSize) { + n++; + continue; + } + final AsyncPipeDataTransferServiceClient client = endPoint2Client.borrowClient(targetNodeUrl); if (handshakeIfNecessary(targetNodeUrl, client)) { @@ -309,8 +471,15 @@ public AsyncPipeDataTransferServiceClient borrowClient() throws Exception { private class PriorityLoadBalancer implements LoadBalancer { @Override public AsyncPipeDataTransferServiceClient borrowClient() throws Exception { + final int clientSize = endPointList.size(); + long n = 0; while (true) { for (final TEndPoint targetNodeUrl : endPointList) { + if (isUnhealthy(targetNodeUrl) && n <= clientSize) { + n++; + continue; + } + final AsyncPipeDataTransferServiceClient client = endPoint2Client.borrowClient(targetNodeUrl); if (handshakeIfNecessary(targetNodeUrl, client)) { @@ -320,4 +489,25 @@ public AsyncPipeDataTransferServiceClient borrowClient() throws Exception { } } } + + private boolean isUnhealthy(TEndPoint endPoint) { + Long downTime = unhealthyEndPointMap.get(endPoint); + if (downTime == null) { + return false; + } + if (System.currentTimeMillis() - downTime + > PipeConfig.getInstance().getPipeCheckAllSyncClientLiveTimeIntervalMs()) { + markHealthy(endPoint); + return false; + } + return true; + } + + private void markUnhealthy(TEndPoint endPoint) { + unhealthyEndPointMap.put(endPoint, System.currentTimeMillis()); + } + + private void markHealthy(TEndPoint endPoint) { + unhealthyEndPointMap.remove(endPoint); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/client/IoTDBDataNodeCacheLeaderClientManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/client/IoTDBDataNodeCacheLeaderClientManager.java similarity index 89% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/client/IoTDBDataNodeCacheLeaderClientManager.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/client/IoTDBDataNodeCacheLeaderClientManager.java index e45954e7e4de2..dc24295f936d2 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/client/IoTDBDataNodeCacheLeaderClientManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/client/IoTDBDataNodeCacheLeaderClientManager.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.client; +package org.apache.iotdb.db.pipe.sink.client; import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.commons.pipe.config.PipeConfig; @@ -51,11 +51,7 @@ class LeaderCacheManager { public LeaderCacheManager() { final long initMemorySizeInBytes = - PipeDataNodeResourceManager.memory().getTotalMemorySizeInBytes() / 10; - final long maxMemorySizeInBytes = - (long) - (PipeDataNodeResourceManager.memory().getTotalMemorySizeInBytes() - * CONFIG.getPipeLeaderCacheMemoryUsagePercentage()); + PipeDataNodeResourceManager.memory().getTotalNonFloatingMemorySizeInBytes() / 10; // properties required by pipe memory control framework final PipeMemoryBlock allocatedMemoryBlock = @@ -72,7 +68,13 @@ public LeaderCacheManager() { newMemory); }) .setExpandMethod( - oldMemory -> Math.min(Math.max(oldMemory, 1) * 2, maxMemorySizeInBytes)) + oldMemory -> + Math.min( + Math.max(oldMemory, 1) * 2, + (long) + (PipeDataNodeResourceManager.memory() + .getTotalNonFloatingMemorySizeInBytes() + * CONFIG.getPipeLeaderCacheMemoryUsagePercentage()))) .setExpandCallback( (oldMemory, newMemory) -> { memoryUsageCheatFactor.updateAndGet( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/client/IoTDBDataNodeSyncClientManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/client/IoTDBDataNodeSyncClientManager.java similarity index 66% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/client/IoTDBDataNodeSyncClientManager.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/client/IoTDBDataNodeSyncClientManager.java index 2c23cba454212..9177eb04531ff 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/client/IoTDBDataNodeSyncClientManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/client/IoTDBDataNodeSyncClientManager.java @@ -17,16 +17,16 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.client; +package org.apache.iotdb.db.pipe.sink.client; import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.commons.conf.CommonDescriptor; -import org.apache.iotdb.commons.pipe.connector.client.IoTDBSyncClient; -import org.apache.iotdb.commons.pipe.connector.client.IoTDBSyncClientManager; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferHandshakeV2Req; +import org.apache.iotdb.commons.pipe.sink.client.IoTDBSyncClient; +import org.apache.iotdb.commons.pipe.sink.client.IoTDBSyncClientManager; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferHandshakeV2Req; import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferDataNodeHandshakeV1Req; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferDataNodeHandshakeV2Req; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferDataNodeHandshakeV1Req; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferDataNodeHandshakeV2Req; import org.apache.tsfile.utils.Pair; import org.slf4j.Logger; @@ -43,13 +43,31 @@ public class IoTDBDataNodeSyncClientManager extends IoTDBSyncClientManager LoggerFactory.getLogger(IoTDBDataNodeSyncClientManager.class); public IoTDBDataNodeSyncClientManager( - List endPoints, - boolean useSSL, - String trustStorePath, - String trustStorePwd, - boolean useLeaderCache, - String loadBalanceStrategy) { - super(endPoints, useSSL, trustStorePath, trustStorePwd, useLeaderCache, loadBalanceStrategy); + final List endPoints, + final String username, + final String password, + final boolean useSSL, + final String trustStorePath, + final String trustStorePwd, + final boolean useLeaderCache, + final String loadBalanceStrategy, + final boolean shouldReceiverConvertOnTypeMismatch, + final String loadTsFileStrategy, + final boolean validateTsFile, + final boolean shouldMarkAsPipeRequest) { + super( + endPoints, + username, + password, + useSSL, + trustStorePath, + trustStorePwd, + useLeaderCache, + loadBalanceStrategy, + shouldReceiverConvertOnTypeMismatch, + loadTsFileStrategy, + validateTsFile, + shouldMarkAsPipeRequest); } @Override @@ -59,7 +77,7 @@ protected PipeTransferDataNodeHandshakeV1Req buildHandshakeV1Req() throws IOExce } @Override - protected PipeTransferHandshakeV2Req buildHandshakeV2Req(Map params) + protected PipeTransferHandshakeV2Req buildHandshakeV2Req(final Map params) throws IOException { return PipeTransferDataNodeHandshakeV2Req.toTPipeTransferReq(params); } @@ -69,7 +87,7 @@ protected String getClusterId() { return IoTDBDescriptor.getInstance().getConfig().getClusterId(); } - public Pair getClient(String deviceId) { + public Pair getClient(final String deviceId) { final TEndPoint endPoint = LEADER_CACHE_MANAGER.getLeaderEndPoint(deviceId); return useLeaderCache && endPoint != null @@ -79,7 +97,7 @@ public Pair getClient(String deviceId) { : getClient(); } - public Pair getClient(TEndPoint endPoint) { + public Pair getClient(final TEndPoint endPoint) { return useLeaderCache && endPoint != null && endPoint2ClientAndStatus.containsKey(endPoint) @@ -88,7 +106,7 @@ public Pair getClient(TEndPoint endPoint) { : getClient(); } - public void updateLeaderCache(String deviceId, TEndPoint endPoint) { + public void updateLeaderCache(final String deviceId, final TEndPoint endPoint) { if (!useLeaderCache || deviceId == null || endPoint == null) { return; } @@ -101,7 +119,7 @@ public void updateLeaderCache(String deviceId, TEndPoint endPoint) { } LEADER_CACHE_MANAGER.updateLeaderEndPoint(deviceId, endPoint); - } catch (Exception e) { + } catch (final Exception e) { LOGGER.warn( "Failed to update leader cache for device {} with endpoint {}:{}.", deviceId, diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventBatch.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTabletEventBatch.java similarity index 59% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventBatch.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTabletEventBatch.java index db5ba85d7ba86..0e13feb8ac4bc 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventBatch.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTabletEventBatch.java @@ -17,14 +17,16 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.payload.evolvable.batch; +package org.apache.iotdb.db.pipe.sink.payload.evolvable.batch; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; +import org.apache.iotdb.db.pipe.sink.protocol.thrift.async.IoTDBDataRegionAsyncSink; import org.apache.iotdb.db.storageengine.dataregion.wal.exception.WALPipeException; import org.apache.iotdb.pipe.api.event.Event; import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; -import org.apache.tsfile.exception.write.WriteProcessException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; @@ -33,7 +35,11 @@ public abstract class PipeTabletEventBatch implements AutoCloseable { + private static final Logger LOGGER = LoggerFactory.getLogger(PipeTabletEventBatch.class); + private final long maxBatchSizeInBytes; + protected final List events = new ArrayList<>(); + protected final TriLongConsumer recordMetric; private final int maxDelayInMs; private long firstEventProcessingTime = Long.MIN_VALUE; @@ -42,8 +48,22 @@ public abstract class PipeTabletEventBatch implements AutoCloseable { protected volatile boolean isClosed = false; - protected PipeTabletEventBatch(final int maxDelayInMs) { + protected PipeTabletEventBatch( + final int maxDelayInMs, + final long requestMaxBatchSizeInBytes, + final TriLongConsumer recordMetric) { this.maxDelayInMs = maxDelayInMs; + + // limit in buffer size + this.maxBatchSizeInBytes = requestMaxBatchSizeInBytes; + if (recordMetric != null) { + this.recordMetric = recordMetric; + } else { + this.recordMetric = + (timeInterval, bufferSize, events) -> { + // do nothing + }; + } } /** @@ -53,7 +73,7 @@ protected PipeTabletEventBatch(final int maxDelayInMs) { * @return {@code true} if the batch can be transferred */ public synchronized boolean onEvent(final TabletInsertionEvent event) - throws WALPipeException, IOException, WriteProcessException { + throws WALPipeException, IOException { if (isClosed || !(event instanceof EnrichedEvent)) { return false; } @@ -65,16 +85,23 @@ public synchronized boolean onEvent(final TabletInsertionEvent event) if (((EnrichedEvent) event) .increaseReferenceCount(PipeTransferBatchReqBuilder.class.getName())) { - if (constructBatch(event)) { - events.add((EnrichedEvent) event); + try { + if (constructBatch(event)) { + events.add((EnrichedEvent) event); + } + } catch (final Exception e) { + // If the event is not added to the batch, we need to decrease the reference count. + ((EnrichedEvent) event) + .decreaseReferenceCount(PipeTransferBatchReqBuilder.class.getName(), false); + // Will cause a retry + throw e; } if (firstEventProcessingTime == Long.MIN_VALUE) { firstEventProcessingTime = System.currentTimeMillis(); } } else { - ((EnrichedEvent) event) - .decreaseReferenceCount(PipeTransferBatchReqBuilder.class.getName(), false); + LOGGER.warn("Cannot increase reference count for event: {}, ignore it in batch.", event); } } @@ -90,15 +117,17 @@ public synchronized boolean onEvent(final TabletInsertionEvent event) * exceptions and do not return {@code false} here. */ protected abstract boolean constructBatch(final TabletInsertionEvent event) - throws WALPipeException, IOException, WriteProcessException; + throws WALPipeException, IOException; public boolean shouldEmit() { - return totalBufferSize >= getMaxBatchSizeInBytes() - || System.currentTimeMillis() - firstEventProcessingTime >= maxDelayInMs; + final long diff = System.currentTimeMillis() - firstEventProcessingTime; + if (totalBufferSize >= maxBatchSizeInBytes || diff >= maxDelayInMs) { + recordMetric.accept(diff, totalBufferSize, events.size()); + return true; + } + return false; } - protected abstract long getMaxBatchSizeInBytes(); - public synchronized void onSuccess() { events.clear(); @@ -115,7 +144,23 @@ public synchronized void close() { events.clear(); } - public void decreaseEventsReferenceCount(final String holderMessage, final boolean shouldReport) { + /** + * Discard all events of the given pipe. This method only clears the reference count of the events + * and discard them, but do not modify other objects (such as buffers) for simplicity. + */ + public synchronized void discardEventsOfPipe(final String pipeNameToDrop, final int regionId) { + events.removeIf( + event -> { + if (pipeNameToDrop.equals(event.getPipeName()) && regionId == event.getRegionId()) { + event.clearReferenceCount(IoTDBDataRegionAsyncSink.class.getName()); + return true; + } + return false; + }); + } + + public synchronized void decreaseEventsReferenceCount( + final String holderMessage, final boolean shouldReport) { events.forEach(event -> event.decreaseReferenceCount(holderMessage, shouldReport)); } @@ -127,7 +172,12 @@ public List deepCopyEvents() { return new ArrayList<>(events); } - boolean isEmpty() { + public boolean isEmpty() { return events.isEmpty(); } + + @FunctionalInterface + public interface TriLongConsumer { + void accept(long l1, long l2, long l3); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventPlainBatch.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTabletEventPlainBatch.java similarity index 72% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventPlainBatch.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTabletEventPlainBatch.java index a2d7315ae0a53..d450be8200c84 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventPlainBatch.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTabletEventPlainBatch.java @@ -17,14 +17,12 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.payload.evolvable.batch; +package org.apache.iotdb.db.pipe.sink.payload.evolvable.batch; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTabletBatchReq; import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent; -import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; -import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlock; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTabletBatchReq; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; import org.apache.iotdb.db.storageengine.dataregion.wal.exception.WALPipeException; import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; @@ -46,42 +44,24 @@ public class PipeTabletEventPlainBatch extends PipeTabletEventBatch { - private static final Logger LOGGER = LoggerFactory.getLogger(PipeTabletEventPlainBatch.class); + private static final Logger LOGGER = LoggerFactory.getLogger(PipeTabletEventBatch.class); private final List binaryBuffers = new ArrayList<>(); private final List insertNodeBuffers = new ArrayList<>(); private final List tabletBuffers = new ArrayList<>(); - // limit in buffer size - private final PipeMemoryBlock allocatedMemoryBlock; - // Used to rate limit when transferring data private final Map, Long> pipe2BytesAccumulated = new HashMap<>(); PipeTabletEventPlainBatch(final int maxDelayInMs, final long requestMaxBatchSizeInBytes) { - super(maxDelayInMs); - this.allocatedMemoryBlock = - PipeDataNodeResourceManager.memory() - .tryAllocate(requestMaxBatchSizeInBytes) - .setShrinkMethod(oldMemory -> Math.max(oldMemory / 2, 0)) - .setShrinkCallback( - (oldMemory, newMemory) -> - LOGGER.info( - "The batch size limit has shrunk from {} to {}.", oldMemory, newMemory)) - .setExpandMethod( - oldMemory -> Math.min(Math.max(oldMemory, 1) * 2, requestMaxBatchSizeInBytes)) - .setExpandCallback( - (oldMemory, newMemory) -> - LOGGER.info( - "The batch size limit has expanded from {} to {}.", oldMemory, newMemory)); - - if (getMaxBatchSizeInBytes() != requestMaxBatchSizeInBytes) { - LOGGER.info( - "PipeTabletEventBatch: the max batch size is adjusted from {} to {} due to the " - + "memory restriction", - requestMaxBatchSizeInBytes, - getMaxBatchSizeInBytes()); - } + super(maxDelayInMs, requestMaxBatchSizeInBytes, null); + } + + PipeTabletEventPlainBatch( + final int maxDelayInMs, + final long requestMaxBatchSizeInBytes, + final TriLongConsumer recordMetric) { + super(maxDelayInMs, requestMaxBatchSizeInBytes, recordMetric); } @Override @@ -113,11 +93,6 @@ public PipeTransferTabletBatchReq toTPipeTransferReq() throws IOException { binaryBuffers, insertNodeBuffers, tabletBuffers); } - @Override - protected long getMaxBatchSizeInBytes() { - return allocatedMemoryBlock.getMemoryUsageInBytes(); - } - public Map, Long> deepCopyPipeName2BytesAccumulated() { return new HashMap<>(pipe2BytesAccumulated); } @@ -134,8 +109,7 @@ private int buildTabletInsertionBuffer(final TabletInsertionEvent event) (PipeInsertNodeTabletInsertionEvent) event; // Read the bytebuffer from the wal file and transfer it directly without serializing or // deserializing if possible - final InsertNode insertNode = - pipeInsertNodeTabletInsertionEvent.getInsertNodeViaCacheIfPossible(); + final InsertNode insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNode(); if (Objects.isNull(insertNode)) { buffer = pipeInsertNodeTabletInsertionEvent.getByteBuffer(); binaryBuffers.add(buffer); @@ -156,11 +130,4 @@ private int buildTabletInsertionBuffer(final TabletInsertionEvent event) } return buffer.limit(); } - - @Override - public synchronized void close() { - super.close(); - - allocatedMemoryBlock.close(); - } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventTsFileBatch.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTabletEventTsFileBatch.java similarity index 57% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventTsFileBatch.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTabletEventTsFileBatch.java index f74b7749c4126..a5af1b68ad412 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventTsFileBatch.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTabletEventTsFileBatch.java @@ -17,46 +17,63 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.payload.evolvable.batch; +package org.apache.iotdb.db.pipe.sink.payload.evolvable.batch; +import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.exception.DiskSpaceInsufficientException; -import org.apache.iotdb.db.pipe.connector.util.PipeTabletEventSorter; import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent; import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryWeightUtil; +import org.apache.iotdb.db.pipe.sink.util.PipeTabletEventSorter; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeId; +import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertTabletNode; +import org.apache.iotdb.db.storageengine.dataregion.flush.MemTableFlushTask; +import org.apache.iotdb.db.storageengine.dataregion.memtable.IMemTable; +import org.apache.iotdb.db.storageengine.dataregion.memtable.PrimitiveMemTable; import org.apache.iotdb.db.storageengine.rescon.disk.FolderManager; import org.apache.iotdb.db.storageengine.rescon.disk.strategy.DirectoryStrategyType; import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; import org.apache.iotdb.pipe.api.exception.PipeException; +import org.apache.iotdb.session.util.RetryUtils; import org.apache.commons.io.FileUtils; import org.apache.tsfile.common.constant.TsFileConstant; +import org.apache.tsfile.enums.TSDataType; import org.apache.tsfile.exception.write.WriteProcessException; import org.apache.tsfile.read.common.Path; +import org.apache.tsfile.utils.BitMap; +import org.apache.tsfile.utils.DateUtils; import org.apache.tsfile.utils.Pair; import org.apache.tsfile.write.TsFileWriter; import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.IMeasurementSchema; import org.apache.tsfile.write.schema.MeasurementSchema; +import org.apache.tsfile.write.writer.RestorableTsFileIOWriter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; import java.io.IOException; +import java.time.LocalDate; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Objects; +import java.util.Set; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; +import java.util.stream.IntStream; public class PipeTabletEventTsFileBatch extends PipeTabletEventBatch { @@ -70,8 +87,6 @@ public class PipeTabletEventTsFileBatch extends PipeTabletEventBatch { private static final String TS_FILE_PREFIX = "tb"; // tb means tablet batch private final AtomicLong tsFileIdGenerator = new AtomicLong(0); - private final long maxSizeInBytes; - private final Map, Double> pipeName2WeightMap = new HashMap<>(); private final List tabletList = new ArrayList<>(); @@ -81,9 +96,15 @@ public class PipeTabletEventTsFileBatch extends PipeTabletEventBatch { private volatile TsFileWriter fileWriter; public PipeTabletEventTsFileBatch(final int maxDelayInMs, final long requestMaxBatchSizeInBytes) { - super(maxDelayInMs); + this(maxDelayInMs, requestMaxBatchSizeInBytes, null); + } + + public PipeTabletEventTsFileBatch( + final int maxDelayInMs, + final long requestMaxBatchSizeInBytes, + final TriLongConsumer recordMetric) { + super(maxDelayInMs, requestMaxBatchSizeInBytes, recordMetric); - this.maxSizeInBytes = requestMaxBatchSizeInBytes; try { this.batchFileBaseDir = getNextBaseDir(); } catch (final Exception e) { @@ -174,7 +195,9 @@ private void bufferTablet( final boolean isAligned) { new PipeTabletEventSorter(tablet).deduplicateAndSortTimestampsIfNecessary(); - totalBufferSize += PipeMemoryWeightUtil.calculateTabletSizeInBytes(tablet); + // TODO: Currently, PipeTsFileBuilderV2 still uses a fallback builder, so memory table writing + // and storing temporary tablets require double the memory. + totalBufferSize += PipeMemoryWeightUtil.calculateTabletSizeInBytes(tablet) * 2; pipeName2WeightMap.compute( new Pair<>(pipeName, creationTime), @@ -194,7 +217,18 @@ public Map, Double> deepCopyPipe2WeightMap() { } public synchronized List sealTsFiles() throws IOException, WriteProcessException { - return isClosed ? Collections.emptyList() : writeTabletsToTsFiles(); + if (isClosed) { + return Collections.emptyList(); + } + try { + return new PipeTsFileBuilderV2().writeTabletsToTsFiles(); + } catch (org.apache.iotdb.db.exception.WriteProcessException e) { + LOGGER.warn( + "Exception occurred when PipeTsFileBuilderV2 writing tablets to tsfile, use fallback tsfile builder: {}", + e.getMessage(), + e); + return writeTabletsToTsFiles(); + } } private List writeTabletsToTsFiles() throws IOException, WriteProcessException { @@ -242,18 +276,7 @@ private List writeTabletsToTsFiles() throws IOException, WriteProcessExcep // Try making the tsfile size as large as possible while (!device2TabletsLinkedList.isEmpty()) { if (Objects.isNull(fileWriter)) { - fileWriter = - new TsFileWriter( - new File( - batchFileBaseDir, - TS_FILE_PREFIX - + "_" - + IoTDBDescriptor.getInstance().getConfig().getDataNodeId() - + "_" - + currentBatchId.get() - + "_" - + tsFileIdGenerator.getAndIncrement() - + TsFileConstant.TSFILE_SUFFIX)); + fileWriter = new TsFileWriter(createFile()); } try { @@ -311,6 +334,62 @@ private List writeTabletsToTsFiles() throws IOException, WriteProcessExcep return sealedFiles; } + private Tablet tryBestToAggregateTablets( + final String deviceId, final LinkedList tablets) { + if (tablets.isEmpty()) { + return null; + } + + // Retrieve the first tablet to serve as the basis for the aggregation + final Tablet firstTablet = tablets.peekFirst(); + final long[] aggregationTimestamps = firstTablet.timestamps; + final int aggregationRow = firstTablet.rowSize; + final int aggregationMaxRow = firstTablet.getMaxRowNumber(); + + // Prepare lists to accumulate schemas, values, and bitMaps + final List aggregatedSchemas = new ArrayList<>(); + final List aggregatedValues = new ArrayList<>(); + final List aggregatedBitMaps = new ArrayList<>(); + + // Iterate and poll tablets from the head that satisfy the aggregation criteria + while (!tablets.isEmpty()) { + final Tablet tablet = tablets.peekFirst(); + if (Arrays.equals(tablet.timestamps, aggregationTimestamps) + && tablet.rowSize == aggregationRow + && tablet.getMaxRowNumber() == aggregationMaxRow) { + // Aggregate the current tablet's data + aggregatedSchemas.addAll(tablet.getSchemas()); + aggregatedValues.addAll(Arrays.asList(tablet.values)); + aggregatedBitMaps.addAll(Arrays.asList(tablet.bitMaps)); + // Remove the aggregated tablet + tablets.pollFirst(); + } else { + // Stop aggregating once a tablet does not meet the criteria + break; + } + } + + // Remove duplicates from aggregatedSchemas, record the index of the first occurrence, and + // filter out the corresponding values in aggregatedValues and aggregatedBitMaps based on that + // index + final Set seen = new HashSet<>(); + final List distinctIndices = + IntStream.range(0, aggregatedSchemas.size()) + .filter(i -> Objects.nonNull(aggregatedSchemas.get(i))) + .filter(i -> seen.add(aggregatedSchemas.get(i))) // Only keep the first occurrence index + .boxed() + .collect(Collectors.toList()); + + // Construct a new aggregated Tablet using the deduplicated data + return new Tablet( + deviceId, + distinctIndices.stream().map(aggregatedSchemas::get).collect(Collectors.toList()), + aggregationTimestamps, + distinctIndices.stream().map(aggregatedValues::get).toArray(), + distinctIndices.stream().map(aggregatedBitMaps::get).toArray(BitMap[]::new), + aggregationRow); + } + private void tryBestToWriteTabletsIntoOneFile( final LinkedHashMap> device2TabletsLinkedList, final Map device2Aligned) @@ -327,14 +406,14 @@ private void tryBestToWriteTabletsIntoOneFile( Tablet lastTablet = null; while (!tablets.isEmpty()) { - final Tablet tablet = tablets.peekFirst(); + final Tablet tablet = tryBestToAggregateTablets(deviceId, tablets); if (Objects.isNull(lastTablet) // lastTablet.rowSize is not 0 || lastTablet.timestamps[lastTablet.rowSize - 1] < tablet.timestamps[0]) { tabletsToWrite.add(tablet); lastTablet = tablet; - tablets.pollFirst(); } else { + tablets.addFirst(tablet); break; } } @@ -344,16 +423,30 @@ private void tryBestToWriteTabletsIntoOneFile( } final boolean isAligned = device2Aligned.get(deviceId); - for (final Tablet tablet : tabletsToWrite) { - if (isAligned) { - try { - fileWriter.registerAlignedTimeseries(new Path(tablet.deviceId), tablet.getSchemas()); - } catch (final WriteProcessException ignore) { - // Do nothing if the timeSeries has been registered - } - + if (isAligned) { + final Map> deviceId2MeasurementSchemas = new HashMap<>(); + tabletsToWrite.forEach( + tablet -> + deviceId2MeasurementSchemas.compute( + tablet.deviceId, + (k, v) -> { + if (Objects.isNull(v)) { + return new ArrayList<>(tablet.getSchemas()); + } + v.addAll(tablet.getSchemas()); + return v; + })); + for (final Entry> deviceIdWithMeasurementSchemas : + deviceId2MeasurementSchemas.entrySet()) { + fileWriter.registerAlignedTimeseries( + new Path(deviceIdWithMeasurementSchemas.getKey()), + deviceIdWithMeasurementSchemas.getValue()); + } + for (final Tablet tablet : tabletsToWrite) { fileWriter.writeAligned(tablet); - } else { + } + } else { + for (final Tablet tablet : tabletsToWrite) { for (final MeasurementSchema schema : tablet.getSchemas()) { try { fileWriter.registerTimeseries(new Path(tablet.deviceId), schema); @@ -368,11 +461,6 @@ private void tryBestToWriteTabletsIntoOneFile( } } - @Override - protected long getMaxBatchSizeInBytes() { - return maxSizeInBytes; - } - @Override public synchronized void onSuccess() { super.onSuccess(); @@ -409,7 +497,7 @@ public synchronized void close() { } try { - FileUtils.delete(fileWriter.getIOWriter().getFile()); + RetryUtils.retryOnException(() -> FileUtils.delete(fileWriter.getIOWriter().getFile())); } catch (final Exception e) { LOGGER.info( "Batch id = {}: Failed to delete the tsfile {} when trying to close batch, because {}", @@ -422,4 +510,127 @@ public synchronized void close() { fileWriter = null; } } + + protected File createFile() throws IOException { + return new File( + batchFileBaseDir, + TS_FILE_PREFIX + + "_" + + IoTDBDescriptor.getInstance().getConfig().getDataNodeId() + + "_" + + currentBatchId.get() + + "_" + + tsFileIdGenerator.getAndIncrement() + + TsFileConstant.TSFILE_SUFFIX); + } + + /////////////////////// PipeTsFileBuilderV2 ////////////////////////// + + private static final PlanNodeId PLACEHOLDER_PLAN_NODE_ID = + new PlanNodeId("PipeTreeModelTsFileBuilderV2"); + + private class PipeTsFileBuilderV2 { + + private List writeTabletsToTsFiles() + throws org.apache.iotdb.db.exception.WriteProcessException { + final IMemTable memTable = new PrimitiveMemTable(null, null); + final List sealedFiles = new ArrayList<>(); + try (final RestorableTsFileIOWriter writer = new RestorableTsFileIOWriter(createFile())) { + writeTabletsIntoOneFile(memTable, writer); + sealedFiles.add(writer.getFile()); + } catch (final Exception e) { + LOGGER.warn( + "Batch id = {}: Failed to write tablets into tsfile, because {}", + currentBatchId.get(), + e.getMessage(), + e); + // TODO: handle ex + throw new org.apache.iotdb.db.exception.WriteProcessException(e); + } finally { + memTable.release(); + } + + return sealedFiles; + } + + private void writeTabletsIntoOneFile( + final IMemTable memTable, final RestorableTsFileIOWriter writer) throws Exception { + for (int i = 0, size = tabletList.size(); i < size; ++i) { + final Tablet tablet = tabletList.get(i); + MeasurementSchema[] measurementSchemas = + tablet.getSchemas().stream() + .map(schema -> (MeasurementSchema) schema) + .toArray(MeasurementSchema[]::new); + Object[] values = Arrays.copyOf(tablet.values, tablet.values.length); + BitMap[] bitMaps = Arrays.copyOf(tablet.bitMaps, tablet.bitMaps.length); + + // convert date value to int refer to + // org.apache.iotdb.db.storageengine.dataregion.memtable.WritableMemChunk.writeNonAlignedTablet + int validatedIndex = 0; + for (int j = 0; j < tablet.getSchemas().size(); ++j) { + final IMeasurementSchema schema = measurementSchemas[j]; + if (Objects.isNull(schema)) { + continue; + } + + if (Objects.equals(TSDataType.DATE, schema.getType()) + && values[j] instanceof LocalDate[]) { + final LocalDate[] dates = ((LocalDate[]) values[j]); + final int[] dateValues = new int[dates.length]; + for (int k = 0; k < Math.min(dates.length, tablet.rowSize); k++) { + dateValues[k] = DateUtils.parseDateExpressionToInt(dates[k]); + } + values[j] = dateValues; + } + measurementSchemas[validatedIndex] = measurementSchemas[j]; + values[validatedIndex] = values[j]; + bitMaps[validatedIndex] = bitMaps[j]; + validatedIndex++; + } + + if (validatedIndex != measurementSchemas.length) { + values = Arrays.copyOf(values, validatedIndex); + measurementSchemas = Arrays.copyOf(measurementSchemas, validatedIndex); + bitMaps = Arrays.copyOf(bitMaps, validatedIndex); + } + + final InsertTabletNode insertTabletNode = + new InsertTabletNode( + PLACEHOLDER_PLAN_NODE_ID, + new PartialPath(tablet.deviceId), + isTabletAlignedList.get(i), + Arrays.stream(measurementSchemas) + .map(IMeasurementSchema::getMeasurementId) + .toArray(String[]::new), + Arrays.stream(measurementSchemas) + .map(IMeasurementSchema::getType) + .toArray(TSDataType[]::new), + // TODO: cast + measurementSchemas, + tablet.timestamps, + bitMaps, + values, + tablet.rowSize); + + final int start = 0; + final int end = insertTabletNode.getRowCount(); + + try { + if (insertTabletNode.isAligned()) { + memTable.insertAlignedTablet(insertTabletNode, start, end); + } else { + memTable.insertTablet(insertTabletNode, start, end); + } + } catch (final org.apache.iotdb.db.exception.WriteProcessException e) { + throw new org.apache.iotdb.db.exception.WriteProcessException(e); + } + } + + final MemTableFlushTask memTableFlushTask = + new MemTableFlushTask(memTable, writer, null, null); + memTableFlushTask.syncFlushMemTable(); + + writer.endFile(); + } + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTransferBatchReqBuilder.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTransferBatchReqBuilder.java new file mode 100644 index 0000000000000..9fa706e985f1e --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTransferBatchReqBuilder.java @@ -0,0 +1,265 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.sink.payload.evolvable.batch; + +import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.commons.pipe.event.EnrichedEvent; +import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; +import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent; +import org.apache.iotdb.db.pipe.sink.client.IoTDBDataNodeCacheLeaderClientManager; +import org.apache.iotdb.db.storageengine.dataregion.wal.exception.WALPipeException; +import org.apache.iotdb.metrics.impl.DoNothingHistogram; +import org.apache.iotdb.metrics.type.Histogram; +import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; +import org.apache.iotdb.pipe.api.event.Event; +import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; + +import org.apache.tsfile.utils.Pair; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; + +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_FORMAT_HYBRID_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_FORMAT_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_FORMAT_TS_FILE_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_BATCH_DELAY_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_BATCH_DELAY_MS_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_BATCH_DELAY_MS_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_BATCH_SIZE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_PLAIN_BATCH_SIZE_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_TS_FILE_BATCH_DELAY_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_TS_FILE_BATCH_SIZE_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_LEADER_CACHE_ENABLE_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_LEADER_CACHE_ENABLE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_FORMAT_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_IOTDB_BATCH_DELAY_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_IOTDB_BATCH_DELAY_MS_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_IOTDB_BATCH_SIZE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_LEADER_CACHE_ENABLE_KEY; + +public class PipeTransferBatchReqBuilder implements AutoCloseable { + + private static final Logger LOGGER = LoggerFactory.getLogger(PipeTransferBatchReqBuilder.class); + + private final boolean useLeaderCache; + + private final int requestMaxDelayInMs; + private final long requestMaxBatchSizeInBytes; + + private Histogram tabletBatchSizeHistogram = new DoNothingHistogram(); + private Histogram tsFileBatchSizeHistogram = new DoNothingHistogram(); + private Histogram tabletBatchTimeIntervalHistogram = new DoNothingHistogram(); + private Histogram tsFileBatchTimeIntervalHistogram = new DoNothingHistogram(); + + private Histogram eventSizeHistogram = new DoNothingHistogram(); + + // If the leader cache is disabled (or unable to find the endpoint of event in the leader cache), + // the event will be stored in the default batch. + private final PipeTabletEventBatch defaultBatch; + // If the leader cache is enabled, the batch will be divided by the leader endpoint, + // each endpoint has a batch. + // This is only used in plain batch since tsfile does not return redirection info. + private final Map endPointToBatch = + new ConcurrentHashMap<>(); + + public PipeTransferBatchReqBuilder(final PipeParameters parameters) { + final boolean usingTsFileBatch = + parameters + .getStringOrDefault( + Arrays.asList(CONNECTOR_FORMAT_KEY, SINK_FORMAT_KEY), CONNECTOR_FORMAT_HYBRID_VALUE) + .equals(CONNECTOR_FORMAT_TS_FILE_VALUE); + + useLeaderCache = + !usingTsFileBatch + && parameters.getBooleanOrDefault( + Arrays.asList(SINK_LEADER_CACHE_ENABLE_KEY, CONNECTOR_LEADER_CACHE_ENABLE_KEY), + CONNECTOR_LEADER_CACHE_ENABLE_DEFAULT_VALUE); + + final Integer requestMaxDelayInMillis = + parameters.getIntByKeys(CONNECTOR_IOTDB_BATCH_DELAY_MS_KEY, SINK_IOTDB_BATCH_DELAY_MS_KEY); + if (Objects.isNull(requestMaxDelayInMillis)) { + final int requestMaxDelayConfig = + parameters.getIntOrDefault( + Arrays.asList(CONNECTOR_IOTDB_BATCH_DELAY_KEY, SINK_IOTDB_BATCH_DELAY_KEY), + usingTsFileBatch + ? CONNECTOR_IOTDB_TS_FILE_BATCH_DELAY_DEFAULT_VALUE * 1000 + : CONNECTOR_IOTDB_BATCH_DELAY_MS_DEFAULT_VALUE); + requestMaxDelayInMs = requestMaxDelayConfig < 0 ? Integer.MAX_VALUE : requestMaxDelayConfig; + } else { + requestMaxDelayInMs = + requestMaxDelayInMillis < 0 ? Integer.MAX_VALUE : requestMaxDelayInMillis; + } + requestMaxBatchSizeInBytes = + parameters.getLongOrDefault( + Arrays.asList(CONNECTOR_IOTDB_BATCH_SIZE_KEY, SINK_IOTDB_BATCH_SIZE_KEY), + usingTsFileBatch + ? CONNECTOR_IOTDB_TS_FILE_BATCH_SIZE_DEFAULT_VALUE + : CONNECTOR_IOTDB_PLAIN_BATCH_SIZE_DEFAULT_VALUE); + this.defaultBatch = + usingTsFileBatch + ? new PipeTabletEventTsFileBatch( + requestMaxDelayInMs, requestMaxBatchSizeInBytes, this::recordTsFileMetric) + : new PipeTabletEventPlainBatch( + requestMaxDelayInMs, requestMaxBatchSizeInBytes, this::recordTabletMetric); + } + + /** + * Try offer {@link Event} into the corresponding batch if the given {@link Event} is not + * duplicated. + * + * @param event the given {@link Event} + */ + public synchronized void onEvent(final TabletInsertionEvent event) + throws IOException, WALPipeException { + if (!(event instanceof EnrichedEvent)) { + LOGGER.warn( + "Unsupported event {} type {} when building transfer request", event, event.getClass()); + return; + } + + if (!useLeaderCache) { + defaultBatch.onEvent(event); + return; + } + + String deviceId = null; + if (event instanceof PipeRawTabletInsertionEvent) { + deviceId = ((PipeRawTabletInsertionEvent) event).getDeviceId(); + } else if (event instanceof PipeInsertNodeTabletInsertionEvent) { + deviceId = ((PipeInsertNodeTabletInsertionEvent) event).getDeviceId(); + } + + if (Objects.isNull(deviceId)) { + defaultBatch.onEvent(event); + return; + } + + final TEndPoint endPoint = + IoTDBDataNodeCacheLeaderClientManager.LEADER_CACHE_MANAGER.getLeaderEndPoint(deviceId); + if (Objects.isNull(endPoint)) { + defaultBatch.onEvent(event); + return; + } + endPointToBatch + .computeIfAbsent( + endPoint, + k -> + new PipeTabletEventPlainBatch( + requestMaxDelayInMs, requestMaxBatchSizeInBytes, this::recordTabletMetric)) + .onEvent(event); + } + + /** Get all batches that have at least 1 event. */ + public synchronized List> + getAllNonEmptyAndShouldEmitBatches() { + final List> nonEmptyAndShouldEmitBatches = + new ArrayList<>(); + if (!defaultBatch.isEmpty() && defaultBatch.shouldEmit()) { + nonEmptyAndShouldEmitBatches.add(new Pair<>(null, defaultBatch)); + } + endPointToBatch.forEach( + (endPoint, batch) -> { + if (!batch.isEmpty() && batch.shouldEmit()) { + nonEmptyAndShouldEmitBatches.add(new Pair<>(endPoint, batch)); + } + }); + return nonEmptyAndShouldEmitBatches; + } + + public boolean isEmpty() { + return defaultBatch.isEmpty() + && endPointToBatch.values().stream().allMatch(PipeTabletEventPlainBatch::isEmpty); + } + + public synchronized void discardEventsOfPipe(final String pipeNameToDrop, final int regionId) { + defaultBatch.discardEventsOfPipe(pipeNameToDrop, regionId); + endPointToBatch.values().forEach(batch -> batch.discardEventsOfPipe(pipeNameToDrop, regionId)); + } + + public int size() { + try { + return defaultBatch.events.size() + + endPointToBatch.values().stream() + .map(batch -> batch.events.size()) + .reduce(0, Integer::sum); + } catch (final Exception e) { + LOGGER.warn( + "Failed to get the size of PipeTransferBatchReqBuilder, return 0. Exception: {}", + e.getMessage(), + e); + return 0; + } + } + + @Override + public synchronized void close() { + defaultBatch.close(); + endPointToBatch.values().forEach(PipeTabletEventPlainBatch::close); + } + + public void recordTabletMetric(long timeInterval, long bufferSize, long eventSize) { + this.tabletBatchTimeIntervalHistogram.update(timeInterval); + this.tabletBatchSizeHistogram.update(bufferSize); + this.eventSizeHistogram.update(eventSize); + } + + public void recordTsFileMetric(long timeInterval, long bufferSize, long eventSize) { + this.tsFileBatchTimeIntervalHistogram.update(timeInterval); + this.tsFileBatchSizeHistogram.update(bufferSize); + this.eventSizeHistogram.update(eventSize); + } + + public void setTabletBatchSizeHistogram(Histogram tabletBatchSizeHistogram) { + if (tabletBatchSizeHistogram != null) { + this.tabletBatchSizeHistogram = tabletBatchSizeHistogram; + } + } + + public void setTsFileBatchSizeHistogram(Histogram tsFileBatchSizeHistogram) { + if (tsFileBatchSizeHistogram != null) { + this.tsFileBatchSizeHistogram = tsFileBatchSizeHistogram; + } + } + + public void setTabletBatchTimeIntervalHistogram(Histogram tabletBatchTimeIntervalHistogram) { + if (tabletBatchTimeIntervalHistogram != null) { + this.tabletBatchTimeIntervalHistogram = tabletBatchTimeIntervalHistogram; + } + } + + public void setTsFileBatchTimeIntervalHistogram(Histogram tsFileBatchTimeIntervalHistogram) { + if (tsFileBatchTimeIntervalHistogram != null) { + this.tsFileBatchTimeIntervalHistogram = tsFileBatchTimeIntervalHistogram; + } + } + + public void setEventSizeHistogram(Histogram eventSizeHistogram) { + if (eventSizeHistogram != null) { + this.eventSizeHistogram = eventSizeHistogram; + } + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferDataNodeHandshakeV1Req.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferDataNodeHandshakeV1Req.java similarity index 90% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferDataNodeHandshakeV1Req.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferDataNodeHandshakeV1Req.java index dfa103c802f6b..1b765406dbb8d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferDataNodeHandshakeV1Req.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferDataNodeHandshakeV1Req.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.payload.evolvable.request; +package org.apache.iotdb.db.pipe.sink.payload.evolvable.request; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeRequestType; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferHandshakeV1Req; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeRequestType; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferHandshakeV1Req; import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; import java.io.IOException; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferDataNodeHandshakeV2Req.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferDataNodeHandshakeV2Req.java similarity index 90% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferDataNodeHandshakeV2Req.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferDataNodeHandshakeV2Req.java index 4a46704ed47cc..4b9e7a265890f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferDataNodeHandshakeV2Req.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferDataNodeHandshakeV2Req.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.payload.evolvable.request; +package org.apache.iotdb.db.pipe.sink.payload.evolvable.request; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeRequestType; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferHandshakeV2Req; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeRequestType; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferHandshakeV2Req; import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; import java.io.IOException; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferPlanNodeReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferPlanNodeReq.java similarity index 87% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferPlanNodeReq.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferPlanNodeReq.java index 580dbe2ed4ca0..12eb95ece4f28 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferPlanNodeReq.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferPlanNodeReq.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.payload.evolvable.request; +package org.apache.iotdb.db.pipe.sink.payload.evolvable.request; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.IoTDBConnectorRequestVersion; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeRequestType; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.IoTDBSinkRequestVersion; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeRequestType; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeType; import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; @@ -52,7 +52,7 @@ public static PipeTransferPlanNodeReq toTPipeTransferReq(PlanNode planNode) { req.planNode = planNode; - req.version = IoTDBConnectorRequestVersion.VERSION_1.getVersion(); + req.version = IoTDBSinkRequestVersion.VERSION_1.getVersion(); req.type = PipeRequestType.TRANSFER_SCHEMA_PLAN.getType(); req.body = planNode.serializeToByteBuffer(); @@ -66,7 +66,6 @@ public static PipeTransferPlanNodeReq fromTPipeTransferReq(TPipeTransferReq tran planNodeReq.version = transferReq.version; planNodeReq.type = transferReq.type; - planNodeReq.body = transferReq.body; return planNodeReq; } @@ -76,7 +75,7 @@ public static PipeTransferPlanNodeReq fromTPipeTransferReq(TPipeTransferReq tran public static byte[] toTPipeTransferBytes(PlanNode planNode) throws IOException { try (final PublicBAOS byteArrayOutputStream = new PublicBAOS(); final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) { - ReadWriteIOUtils.write(IoTDBConnectorRequestVersion.VERSION_1.getVersion(), outputStream); + ReadWriteIOUtils.write(IoTDBSinkRequestVersion.VERSION_1.getVersion(), outputStream); ReadWriteIOUtils.write(PipeRequestType.TRANSFER_SCHEMA_PLAN.getType(), outputStream); return BytesUtils.concatByteArray( byteArrayOutputStream.toByteArray(), planNode.serializeToByteBuffer().array()); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferSchemaSnapshotPieceReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferSchemaSnapshotPieceReq.java similarity index 90% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferSchemaSnapshotPieceReq.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferSchemaSnapshotPieceReq.java index 10da15faa03a8..f1e9d77685cd2 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferSchemaSnapshotPieceReq.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferSchemaSnapshotPieceReq.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.payload.evolvable.request; +package org.apache.iotdb.db.pipe.sink.payload.evolvable.request; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeRequestType; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferFilePieceReq; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeRequestType; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferFilePieceReq; import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; import java.io.IOException; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferSchemaSnapshotSealReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferSchemaSnapshotSealReq.java similarity index 94% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferSchemaSnapshotSealReq.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferSchemaSnapshotSealReq.java index 70835e0ea4f78..2bfd213656838 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferSchemaSnapshotSealReq.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferSchemaSnapshotSealReq.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.payload.evolvable.request; +package org.apache.iotdb.db.pipe.sink.payload.evolvable.request; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeRequestType; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferFileSealReqV2; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeRequestType; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferFileSealReqV2; import org.apache.iotdb.db.queryengine.common.header.ColumnHeaderConstant; import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletBatchReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTabletBatchReq.java similarity index 96% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletBatchReq.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTabletBatchReq.java index 8090f6504897b..48bd1016763e2 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletBatchReq.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTabletBatchReq.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.payload.evolvable.request; +package org.apache.iotdb.db.pipe.sink.payload.evolvable.request; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.IoTDBConnectorRequestVersion; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeRequestType; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.IoTDBSinkRequestVersion; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeRequestType; import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.db.queryengine.plan.planner.plan.PlanFragment; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; @@ -126,7 +126,7 @@ public static PipeTransferTabletBatchReq toTPipeTransferReq( // batchReq.binaryReqs, batchReq.insertNodeReqs, batchReq.tabletReqs are empty // when this method is called from PipeTransferTabletBatchReqBuilder.toTPipeTransferReq() - batchReq.version = IoTDBConnectorRequestVersion.VERSION_1.getVersion(); + batchReq.version = IoTDBSinkRequestVersion.VERSION_1.getVersion(); batchReq.type = PipeRequestType.TRANSFER_TABLET_BATCH.getType(); try (final PublicBAOS byteArrayOutputStream = new PublicBAOS(); final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) { @@ -182,7 +182,6 @@ public static PipeTransferTabletBatchReq fromTPipeTransferReq( batchReq.version = transferReq.version; batchReq.type = transferReq.type; - batchReq.body = transferReq.body; return batchReq; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletBinaryReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTabletBinaryReq.java similarity index 90% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletBinaryReq.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTabletBinaryReq.java index 5e9e0a39103f3..180490a6af756 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletBinaryReq.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTabletBinaryReq.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.payload.evolvable.request; +package org.apache.iotdb.db.pipe.sink.payload.evolvable.request; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.IoTDBConnectorRequestVersion; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeRequestType; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.IoTDBSinkRequestVersion; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeRequestType; import org.apache.iotdb.db.pipe.receiver.protocol.thrift.IoTDBDataNodeReceiver; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; @@ -75,7 +75,7 @@ public static PipeTransferTabletBinaryReq toTPipeTransferReq(final ByteBuffer by final PipeTransferTabletBinaryReq req = new PipeTransferTabletBinaryReq(); req.byteBuffer = byteBuffer; - req.version = IoTDBConnectorRequestVersion.VERSION_1.getVersion(); + req.version = IoTDBSinkRequestVersion.VERSION_1.getVersion(); req.type = PipeRequestType.TRANSFER_TABLET_BINARY.getType(); req.body = byteBuffer; @@ -89,7 +89,6 @@ public static PipeTransferTabletBinaryReq fromTPipeTransferReq( binaryReq.version = transferReq.version; binaryReq.type = transferReq.type; - binaryReq.body = transferReq.body; return binaryReq; } @@ -99,7 +98,7 @@ public static PipeTransferTabletBinaryReq fromTPipeTransferReq( public static byte[] toTPipeTransferBytes(final ByteBuffer byteBuffer) throws IOException { try (final PublicBAOS byteArrayOutputStream = new PublicBAOS(); final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) { - ReadWriteIOUtils.write(IoTDBConnectorRequestVersion.VERSION_1.getVersion(), outputStream); + ReadWriteIOUtils.write(IoTDBSinkRequestVersion.VERSION_1.getVersion(), outputStream); ReadWriteIOUtils.write(PipeRequestType.TRANSFER_TABLET_BINARY.getType(), outputStream); return BytesUtils.concatByteArray(byteArrayOutputStream.toByteArray(), byteBuffer.array()); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletInsertNodeReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTabletInsertNodeReq.java similarity index 91% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletInsertNodeReq.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTabletInsertNodeReq.java index c45417ba99da8..484419559da0c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletInsertNodeReq.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTabletInsertNodeReq.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.payload.evolvable.request; +package org.apache.iotdb.db.pipe.sink.payload.evolvable.request; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.IoTDBConnectorRequestVersion; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeRequestType; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.IoTDBSinkRequestVersion; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeRequestType; import org.apache.iotdb.db.pipe.receiver.protocol.thrift.IoTDBDataNodeReceiver; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeType; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; @@ -81,7 +81,7 @@ public static PipeTransferTabletInsertNodeReq toTPipeTransferReq(final InsertNod req.insertNode = insertNode; - req.version = IoTDBConnectorRequestVersion.VERSION_1.getVersion(); + req.version = IoTDBSinkRequestVersion.VERSION_1.getVersion(); req.type = PipeRequestType.TRANSFER_TABLET_INSERT_NODE.getType(); req.body = insertNode.serializeToByteBuffer(); @@ -96,7 +96,6 @@ public static PipeTransferTabletInsertNodeReq fromTPipeTransferReq( insertNodeReq.version = transferReq.version; insertNodeReq.type = transferReq.type; - insertNodeReq.body = transferReq.body; return insertNodeReq; } @@ -105,7 +104,7 @@ public static PipeTransferTabletInsertNodeReq fromTPipeTransferReq( public static byte[] toTPipeTransferBytes(final InsertNode insertNode) throws IOException { try (final PublicBAOS byteArrayOutputStream = new PublicBAOS(); final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) { - ReadWriteIOUtils.write(IoTDBConnectorRequestVersion.VERSION_1.getVersion(), outputStream); + ReadWriteIOUtils.write(IoTDBSinkRequestVersion.VERSION_1.getVersion(), outputStream); ReadWriteIOUtils.write(PipeRequestType.TRANSFER_TABLET_INSERT_NODE.getType(), outputStream); return BytesUtils.concatByteArray( byteArrayOutputStream.toByteArray(), insertNode.serializeToByteBuffer().array()); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletRawReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTabletRawReq.java similarity index 76% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletRawReq.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTabletRawReq.java index 61790f883ae58..47bf4d4489700 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletRawReq.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTabletRawReq.java @@ -17,23 +17,18 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.payload.evolvable.request; +package org.apache.iotdb.db.pipe.sink.payload.evolvable.request; import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.IoTDBConnectorRequestVersion; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeRequestType; -import org.apache.iotdb.commons.utils.PathUtils; -import org.apache.iotdb.db.pipe.connector.util.PipeTabletEventSorter; -import org.apache.iotdb.db.queryengine.plan.parser.StatementGenerator; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.IoTDBSinkRequestVersion; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeRequestType; +import org.apache.iotdb.db.pipe.sink.util.PipeTabletEventSorter; import org.apache.iotdb.db.queryengine.plan.statement.crud.InsertTabletStatement; import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; -import org.apache.iotdb.service.rpc.thrift.TSInsertTabletReq; -import org.apache.iotdb.session.util.SessionUtils; import org.apache.tsfile.utils.PublicBAOS; import org.apache.tsfile.utils.ReadWriteIOUtils; import org.apache.tsfile.write.record.Tablet; -import org.apache.tsfile.write.schema.IMeasurementSchema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -68,22 +63,7 @@ public InsertTabletStatement constructStatement() { return new InsertTabletStatement(); } - final TSInsertTabletReq request = new TSInsertTabletReq(); - - for (final IMeasurementSchema measurementSchema : tablet.getSchemas()) { - request.addToMeasurements(measurementSchema.getMeasurementId()); - request.addToTypes(measurementSchema.getType().ordinal()); - } - - request.setPrefixPath(tablet.deviceId); - request.setIsAligned(isAligned); - request.setTimestamps(SessionUtils.getTimeBuffer(tablet)); - request.setValues(SessionUtils.getValueBuffer(tablet)); - request.setSize(tablet.rowSize); - request.setMeasurements( - PathUtils.checkIsLegalSingleMeasurementsAndUpdate(request.getMeasurements())); - - return StatementGenerator.createStatement(request); + return new InsertTabletStatement(tablet, isAligned); } catch (final MetadataException e) { LOGGER.warn("Generate Statement from tablet {} error.", tablet, e); return null; @@ -111,7 +91,7 @@ public static PipeTransferTabletRawReq toTPipeTransferReq( tabletReq.tablet = tablet; tabletReq.isAligned = isAligned; - tabletReq.version = IoTDBConnectorRequestVersion.VERSION_1.getVersion(); + tabletReq.version = IoTDBSinkRequestVersion.VERSION_1.getVersion(); tabletReq.type = PipeRequestType.TRANSFER_TABLET_RAW.getType(); try (final PublicBAOS byteArrayOutputStream = new PublicBAOS(); final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) { @@ -132,7 +112,6 @@ public static PipeTransferTabletRawReq fromTPipeTransferReq(final TPipeTransferR tabletReq.version = transferReq.version; tabletReq.type = transferReq.type; - tabletReq.body = transferReq.body; return tabletReq; } @@ -143,7 +122,7 @@ public static byte[] toTPipeTransferBytes(final Tablet tablet, final boolean isA throws IOException { try (final PublicBAOS byteArrayOutputStream = new PublicBAOS(); final DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) { - ReadWriteIOUtils.write(IoTDBConnectorRequestVersion.VERSION_1.getVersion(), outputStream); + ReadWriteIOUtils.write(IoTDBSinkRequestVersion.VERSION_1.getVersion(), outputStream); ReadWriteIOUtils.write(PipeRequestType.TRANSFER_TABLET_RAW.getType(), outputStream); tablet.serialize(outputStream); ReadWriteIOUtils.write(isAligned, outputStream); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTsFilePieceReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTsFilePieceReq.java similarity index 90% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTsFilePieceReq.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTsFilePieceReq.java index 5c9fe434b4b51..0435d9a4c3720 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTsFilePieceReq.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTsFilePieceReq.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.payload.evolvable.request; +package org.apache.iotdb.db.pipe.sink.payload.evolvable.request; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeRequestType; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferFilePieceReq; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeRequestType; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferFilePieceReq; import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; import java.io.IOException; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTsFilePieceWithModReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTsFilePieceWithModReq.java similarity index 90% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTsFilePieceWithModReq.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTsFilePieceWithModReq.java index 6a0706db322cc..7feb339f6d599 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTsFilePieceWithModReq.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTsFilePieceWithModReq.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.payload.evolvable.request; +package org.apache.iotdb.db.pipe.sink.payload.evolvable.request; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeRequestType; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferFilePieceReq; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeRequestType; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferFilePieceReq; import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; import java.io.IOException; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTsFileSealReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTsFileSealReq.java similarity index 89% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTsFileSealReq.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTsFileSealReq.java index 6d8312c4d742c..9e559db1a88d9 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTsFileSealReq.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTsFileSealReq.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.payload.evolvable.request; +package org.apache.iotdb.db.pipe.sink.payload.evolvable.request; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeRequestType; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferFileSealReqV1; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeRequestType; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferFileSealReqV1; import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; import java.io.IOException; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTsFileSealWithModReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTsFileSealWithModReq.java similarity index 91% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTsFileSealWithModReq.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTsFileSealWithModReq.java index 4080c176801f0..28959a1a0903c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTsFileSealWithModReq.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTsFileSealWithModReq.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.payload.evolvable.request; +package org.apache.iotdb.db.pipe.sink.payload.evolvable.request; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeRequestType; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferFileSealReqV2; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeRequestType; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferFileSealReqV2; import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; import java.io.IOException; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/legacy/DeletionPipeData.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/legacy/DeletionPipeData.java similarity index 97% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/legacy/DeletionPipeData.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/legacy/DeletionPipeData.java index 2dde22430a95b..0c2ed4c621bbf 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/legacy/DeletionPipeData.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/legacy/DeletionPipeData.java @@ -18,7 +18,7 @@ * */ -package org.apache.iotdb.db.pipe.connector.payload.legacy; +package org.apache.iotdb.db.pipe.sink.payload.legacy; import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.db.pipe.receiver.protocol.legacy.loader.DeletionLoader; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/legacy/PipeData.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/legacy/PipeData.java similarity index 98% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/legacy/PipeData.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/legacy/PipeData.java index d6417687a26a9..643ee849096a3 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/legacy/PipeData.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/legacy/PipeData.java @@ -18,7 +18,7 @@ * */ -package org.apache.iotdb.db.pipe.connector.payload.legacy; +package org.apache.iotdb.db.pipe.sink.payload.legacy; import org.apache.iotdb.db.pipe.receiver.protocol.legacy.loader.ILoader; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/legacy/TsFilePipeData.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/legacy/TsFilePipeData.java similarity index 98% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/legacy/TsFilePipeData.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/legacy/TsFilePipeData.java index e688e4cc79e1e..a41e62b705f13 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/legacy/TsFilePipeData.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/legacy/TsFilePipeData.java @@ -18,7 +18,7 @@ * */ -package org.apache.iotdb.db.pipe.connector.payload.legacy; +package org.apache.iotdb.db.pipe.sink.payload.legacy; import org.apache.iotdb.db.pipe.receiver.protocol.legacy.loader.ILoader; import org.apache.iotdb.db.pipe.receiver.protocol.legacy.loader.TsFileLoader; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/airgap/IoTDBDataNodeAirGapConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/airgap/IoTDBDataNodeAirGapSink.java similarity index 53% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/airgap/IoTDBDataNodeAirGapConnector.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/airgap/IoTDBDataNodeAirGapSink.java index b66c0012700c9..c968558eaafd9 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/airgap/IoTDBDataNodeAirGapConnector.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/airgap/IoTDBDataNodeAirGapSink.java @@ -17,68 +17,24 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.protocol.airgap; +package org.apache.iotdb.db.pipe.sink.protocol.airgap; -import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.conf.CommonDescriptor; -import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.common.PipeTransferHandshakeConstant; -import org.apache.iotdb.commons.pipe.connector.protocol.IoTDBAirGapConnector; -import org.apache.iotdb.commons.utils.NodeUrlUtils; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.common.PipeTransferHandshakeConstant; +import org.apache.iotdb.commons.pipe.sink.protocol.IoTDBAirGapSink; import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferDataNodeHandshakeV1Req; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferDataNodeHandshakeV2Req; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferPlanNodeReq; import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent; -import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferDataNodeHandshakeV1Req; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferDataNodeHandshakeV2Req; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferPlanNodeReq; import org.apache.iotdb.pipe.api.exception.PipeException; import org.apache.iotdb.rpc.TSStatusCode; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.IOException; -import java.net.UnknownHostException; import java.util.HashMap; -import java.util.Set; -import java.util.stream.Collectors; - -public abstract class IoTDBDataNodeAirGapConnector extends IoTDBAirGapConnector { - - private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBDataNodeAirGapConnector.class); - - @Override - public void validate(final PipeParameterValidator validator) throws Exception { - super.validate(validator); - - final PipeConfig pipeConfig = PipeConfig.getInstance(); - final Set givenNodeUrls = parseNodeUrls(validator.getParameters()); - validator.validate( - empty -> { - try { - // Ensure the sink doesn't point to the air gap receiver on DataNode itself - return !(pipeConfig.getPipeAirGapReceiverEnabled() - && NodeUrlUtils.containsLocalAddress( - givenNodeUrls.stream() - .filter( - tEndPoint -> - tEndPoint.getPort() == pipeConfig.getPipeAirGapReceiverPort()) - .map(TEndPoint::getIp) - .collect(Collectors.toList()))); - } catch (final UnknownHostException e) { - LOGGER.warn("Unknown host when checking pipe sink IP.", e); - return false; - } - }, - String.format( - "One of the endpoints %s of the receivers is pointing back to the air gap receiver %s on sender itself, or unknown host when checking pipe sink IP.", - givenNodeUrls, - new TEndPoint( - IoTDBDescriptor.getInstance().getConfig().getRpcAddress(), - pipeConfig.getPipeAirGapReceiverPort()))); - } +public abstract class IoTDBDataNodeAirGapSink extends IoTDBAirGapSink { @Override protected boolean mayNeedHandshakeWhenFail() { @@ -100,6 +56,19 @@ protected byte[] generateHandShakeV2Payload() throws IOException { params.put( PipeTransferHandshakeConstant.HANDSHAKE_KEY_TIME_PRECISION, CommonDescriptor.getInstance().getConfig().getTimestampPrecision()); + params.put( + PipeTransferHandshakeConstant.HANDSHAKE_KEY_CONVERT_ON_TYPE_MISMATCH, + Boolean.toString(shouldReceiverConvertOnTypeMismatch)); + params.put( + PipeTransferHandshakeConstant.HANDSHAKE_KEY_LOAD_TSFILE_STRATEGY, loadTsFileStrategy); + params.put(PipeTransferHandshakeConstant.HANDSHAKE_KEY_USERNAME, username); + params.put(PipeTransferHandshakeConstant.HANDSHAKE_KEY_PASSWORD, password); + params.put( + PipeTransferHandshakeConstant.HANDSHAKE_KEY_VALIDATE_TSFILE, + Boolean.toString(loadTsFileValidation)); + params.put( + PipeTransferHandshakeConstant.HANDSHAKE_KEY_MARK_AS_PIPE_REQUEST, + Boolean.toString(shouldMarkAsPipeRequest)); return PipeTransferDataNodeHandshakeV2Req.toTPipeTransferBytes(params); } @@ -108,16 +77,16 @@ protected void doTransferWrapper( final AirGapSocket socket, final PipeSchemaRegionWritePlanEvent pipeSchemaRegionWritePlanEvent) throws PipeException, IOException { + // We increase the reference count for this event to determine if the event may be released. + if (!pipeSchemaRegionWritePlanEvent.increaseReferenceCount( + IoTDBDataNodeAirGapSink.class.getName())) { + return; + } try { - // We increase the reference count for this event to determine if the event may be released. - if (!pipeSchemaRegionWritePlanEvent.increaseReferenceCount( - IoTDBDataNodeAirGapConnector.class.getName())) { - return; - } doTransfer(socket, pipeSchemaRegionWritePlanEvent); } finally { pipeSchemaRegionWritePlanEvent.decreaseReferenceCount( - IoTDBDataNodeAirGapConnector.class.getName(), false); + IoTDBDataNodeAirGapSink.class.getName(), false); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/airgap/IoTDBDataRegionAirGapConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/airgap/IoTDBDataRegionAirGapSink.java similarity index 75% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/airgap/IoTDBDataRegionAirGapConnector.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/airgap/IoTDBDataRegionAirGapSink.java index 9874741c7f094..e652325cb5288 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/airgap/IoTDBDataRegionAirGapConnector.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/airgap/IoTDBDataRegionAirGapSink.java @@ -17,25 +17,30 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.protocol.airgap; +package org.apache.iotdb.db.pipe.sink.protocol.airgap; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTabletBinaryReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTabletInsertNodeReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTabletRawReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTsFilePieceReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTsFilePieceWithModReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTsFileSealReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTsFileSealWithModReq; +import org.apache.iotdb.commons.pipe.sink.limiter.TsFileSendRateLimiter; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.terminate.PipeTerminateEvent; import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; +import org.apache.iotdb.db.pipe.metric.overview.PipeResourceMetrics; +import org.apache.iotdb.db.pipe.metric.sink.PipeDataRegionSinkMetrics; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTabletBinaryReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTabletInsertNodeReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTabletRawReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTsFilePieceReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTsFilePieceWithModReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTsFileSealReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTsFileSealWithModReq; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; import org.apache.iotdb.db.storageengine.dataregion.wal.exception.WALPipeException; +import org.apache.iotdb.pipe.api.customizer.configuration.PipeConnectorRuntimeConfiguration; +import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; import org.apache.iotdb.pipe.api.event.Event; import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; import org.apache.iotdb.pipe.api.event.dml.insertion.TsFileInsertionEvent; @@ -48,12 +53,30 @@ import java.io.File; import java.io.IOException; +import java.util.Arrays; import java.util.Objects; -public class IoTDBDataRegionAirGapConnector extends IoTDBDataNodeAirGapConnector { +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_ENABLE_SEND_TSFILE_LIMIT; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_ENABLE_SEND_TSFILE_LIMIT_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_ENABLE_SEND_TSFILE_LIMIT; - private static final Logger LOGGER = - LoggerFactory.getLogger(IoTDBDataRegionAirGapConnector.class); +public class IoTDBDataRegionAirGapSink extends IoTDBDataNodeAirGapSink { + + private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBDataRegionAirGapSink.class); + + private boolean enableSendTsFileLimit; + + @Override + public void customize( + final PipeParameters parameters, final PipeConnectorRuntimeConfiguration configuration) + throws Exception { + super.customize(parameters, configuration); + + enableSendTsFileLimit = + parameters.getBooleanOrDefault( + Arrays.asList(SINK_ENABLE_SEND_TSFILE_LIMIT, CONNECTOR_ENABLE_SEND_TSFILE_LIMIT), + CONNECTOR_ENABLE_SEND_TSFILE_LIMIT_DEFAULT_VALUE); + } @Override public void transfer(final TabletInsertionEvent tabletInsertionEvent) throws Exception { @@ -150,16 +173,16 @@ private void doTransferWrapper( final AirGapSocket socket, final PipeInsertNodeTabletInsertionEvent pipeInsertNodeTabletInsertionEvent) throws PipeException, WALPipeException, IOException { + // We increase the reference count for this event to determine if the event may be released. + if (!pipeInsertNodeTabletInsertionEvent.increaseReferenceCount( + IoTDBDataRegionAirGapSink.class.getName())) { + return; + } try { - // We increase the reference count for this event to determine if the event may be released. - if (!pipeInsertNodeTabletInsertionEvent.increaseReferenceCount( - IoTDBDataRegionAirGapConnector.class.getName())) { - return; - } doTransfer(socket, pipeInsertNodeTabletInsertionEvent); } finally { pipeInsertNodeTabletInsertionEvent.decreaseReferenceCount( - IoTDBDataRegionAirGapConnector.class.getName(), false); + IoTDBDataRegionAirGapSink.class.getName(), false); } } @@ -167,8 +190,7 @@ private void doTransfer( final AirGapSocket socket, final PipeInsertNodeTabletInsertionEvent pipeInsertNodeTabletInsertionEvent) throws PipeException, WALPipeException, IOException { - final InsertNode insertNode = - pipeInsertNodeTabletInsertionEvent.getInsertNodeViaCacheIfPossible(); + final InsertNode insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNode(); final byte[] bytes = Objects.isNull(insertNode) ? PipeTransferTabletBinaryReq.toTPipeTransferBytes( @@ -195,16 +217,16 @@ private void doTransfer( private void doTransferWrapper( final AirGapSocket socket, final PipeRawTabletInsertionEvent pipeRawTabletInsertionEvent) throws PipeException, IOException { + // We increase the reference count for this event to determine if the event may be released. + if (!pipeRawTabletInsertionEvent.increaseReferenceCount( + IoTDBDataRegionAirGapSink.class.getName())) { + return; + } try { - // We increase the reference count for this event to determine if the event may be released. - if (!pipeRawTabletInsertionEvent.increaseReferenceCount( - IoTDBDataRegionAirGapConnector.class.getName())) { - return; - } doTransfer(socket, pipeRawTabletInsertionEvent); } finally { pipeRawTabletInsertionEvent.decreaseReferenceCount( - IoTDBDataRegionAirGapConnector.class.getName(), false); + IoTDBDataRegionAirGapSink.class.getName(), false); } } @@ -233,16 +255,16 @@ private void doTransfer( private void doTransferWrapper( final AirGapSocket socket, final PipeTsFileInsertionEvent pipeTsFileInsertionEvent) throws PipeException, IOException { + // We increase the reference count for this event to determine if the event may be released. + if (!pipeTsFileInsertionEvent.increaseReferenceCount( + IoTDBDataRegionAirGapSink.class.getName())) { + return; + } try { - // We increase the reference count for this event to determine if the event may be released. - if (!pipeTsFileInsertionEvent.increaseReferenceCount( - IoTDBDataRegionAirGapConnector.class.getName())) { - return; - } doTransfer(socket, pipeTsFileInsertionEvent); } finally { pipeTsFileInsertionEvent.decreaseReferenceCount( - IoTDBDataRegionAirGapConnector.class.getName(), false); + IoTDBDataRegionAirGapSink.class.getName(), false); } } @@ -293,6 +315,14 @@ private void doTransfer( } } + @Override + protected void mayLimitRateAndRecordIO(final long requiredBytes) { + PipeResourceMetrics.getInstance().recordDiskIO(requiredBytes); + if (enableSendTsFileLimit) { + TsFileSendRateLimiter.getInstance().acquire(requiredBytes); + } + } + @Override protected byte[] getTransferSingleFilePieceBytes( final String fileName, final long position, final byte[] payLoad) throws IOException { @@ -304,4 +334,13 @@ protected byte[] getTransferMultiFilePieceBytes( final String fileName, final long position, final byte[] payLoad) throws IOException { return PipeTransferTsFilePieceWithModReq.toTPipeTransferBytes(fileName, position, payLoad); } + + @Override + protected byte[] compressIfNeeded(final byte[] reqInBytes) throws IOException { + if (Objects.isNull(compressionTimer) && Objects.nonNull(attributeSortedString)) { + compressionTimer = + PipeDataRegionSinkMetrics.getInstance().getCompressionTimer(attributeSortedString); + } + return super.compressIfNeeded(reqInBytes); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/airgap/IoTDBSchemaRegionAirGapConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/airgap/IoTDBSchemaRegionAirGapSink.java similarity index 88% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/airgap/IoTDBSchemaRegionAirGapConnector.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/airgap/IoTDBSchemaRegionAirGapSink.java index e53423779d0db..bcbf88d23857e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/airgap/IoTDBSchemaRegionAirGapConnector.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/airgap/IoTDBSchemaRegionAirGapSink.java @@ -17,15 +17,15 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.protocol.airgap; +package org.apache.iotdb.db.pipe.sink.protocol.airgap; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferSchemaSnapshotPieceReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferSchemaSnapshotSealReq; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionSnapshotEvent; import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferSchemaSnapshotPieceReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferSchemaSnapshotSealReq; import org.apache.iotdb.pipe.api.event.Event; import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; import org.apache.iotdb.pipe.api.event.dml.insertion.TsFileInsertionEvent; @@ -40,10 +40,9 @@ import java.io.IOException; import java.util.Objects; -public class IoTDBSchemaRegionAirGapConnector extends IoTDBDataNodeAirGapConnector { +public class IoTDBSchemaRegionAirGapSink extends IoTDBDataNodeAirGapSink { - private static final Logger LOGGER = - LoggerFactory.getLogger(IoTDBSchemaRegionAirGapConnector.class); + private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBSchemaRegionAirGapSink.class); @Override public void transfer(final TabletInsertionEvent tabletInsertionEvent) throws Exception { @@ -86,16 +85,16 @@ public void transfer(final Event event) throws Exception { private void doTransferWrapper( final AirGapSocket socket, final PipeSchemaRegionSnapshotEvent pipeSchemaRegionSnapshotEvent) throws PipeException, IOException { + // We increase the reference count for this event to determine if the event may be released. + if (!pipeSchemaRegionSnapshotEvent.increaseReferenceCount( + IoTDBSchemaRegionAirGapSink.class.getName())) { + return; + } try { - // We increase the reference count for this event to determine if the event may be released. - if (!pipeSchemaRegionSnapshotEvent.increaseReferenceCount( - IoTDBSchemaRegionAirGapConnector.class.getName())) { - return; - } doTransfer(socket, pipeSchemaRegionSnapshotEvent); } finally { pipeSchemaRegionSnapshotEvent.decreaseReferenceCount( - IoTDBSchemaRegionAirGapConnector.class.getName(), false); + IoTDBSchemaRegionAirGapSink.class.getName(), false); } } @@ -143,6 +142,11 @@ private void doTransfer( } } + @Override + protected void mayLimitRateAndRecordIO(final long requiredBytes) { + // Do nothing + } + @Override protected byte[] getTransferSingleFilePieceBytes( final String fileName, final long position, final byte[] payLoad) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/legacy/IoTDBLegacyPipeConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/legacy/IoTDBLegacyPipeSink.java similarity index 82% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/legacy/IoTDBLegacyPipeConnector.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/legacy/IoTDBLegacyPipeSink.java index 5edf485adfb27..a80293c94673d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/legacy/IoTDBLegacyPipeConnector.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/legacy/IoTDBLegacyPipeSink.java @@ -17,26 +17,26 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.protocol.legacy; +package org.apache.iotdb.db.pipe.sink.protocol.legacy; import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.client.property.ThriftClientProperty; import org.apache.iotdb.commons.conf.CommonConfig; import org.apache.iotdb.commons.conf.CommonDescriptor; -import org.apache.iotdb.commons.conf.IoTDBConstant; +import org.apache.iotdb.commons.consensus.DataRegionId; import org.apache.iotdb.commons.exception.pipe.PipeRuntimeCriticalException; import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.pipe.connector.client.IoTDBSyncClient; -import org.apache.iotdb.commons.utils.NodeUrlUtils; +import org.apache.iotdb.commons.pipe.sink.client.IoTDBSyncClient; import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.pipe.connector.payload.legacy.TsFilePipeData; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.terminate.PipeTerminateEvent; import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; +import org.apache.iotdb.db.pipe.sink.payload.legacy.TsFilePipeData; +import org.apache.iotdb.db.storageengine.StorageEngine; import org.apache.iotdb.pipe.api.PipeConnector; import org.apache.iotdb.pipe.api.customizer.configuration.PipeConnectorRuntimeConfiguration; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator; @@ -62,35 +62,35 @@ import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; -import java.net.UnknownHostException; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Objects; import java.util.Set; -import java.util.stream.Collectors; - -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_IOTDB_IP_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_IOTDB_PASSWORD_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_IOTDB_PASSWORD_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_IOTDB_PORT_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_IOTDB_SYNC_CONNECTOR_VERSION_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_IOTDB_SYNC_CONNECTOR_VERSION_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_IOTDB_USER_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_IOTDB_USER_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_IOTDB_IP_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_IOTDB_PASSWORD_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_IOTDB_PORT_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_IOTDB_SSL_ENABLE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_IOTDB_SSL_TRUST_STORE_PATH_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_IOTDB_SSL_TRUST_STORE_PWD_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_IOTDB_SYNC_CONNECTOR_VERSION_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_IOTDB_USER_KEY; - -public class IoTDBLegacyPipeConnector implements PipeConnector { - - private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBLegacyPipeConnector.class); + +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_IP_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_PASSWORD_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_PASSWORD_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_PORT_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_SYNC_CONNECTOR_VERSION_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_SYNC_CONNECTOR_VERSION_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_USERNAME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_USER_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_USER_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_IOTDB_IP_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_IOTDB_PASSWORD_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_IOTDB_PORT_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_IOTDB_SSL_ENABLE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_IOTDB_SSL_TRUST_STORE_PATH_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_IOTDB_SSL_TRUST_STORE_PWD_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_IOTDB_SYNC_CONNECTOR_VERSION_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_IOTDB_USERNAME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_IOTDB_USER_KEY; + +public class IoTDBLegacyPipeSink implements PipeConnector { + + private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBLegacyPipeSink.class); private static final CommonConfig COMMON_CONFIG = CommonDescriptor.getInstance().getConfig(); @@ -107,6 +107,7 @@ public class IoTDBLegacyPipeConnector implements PipeConnector { private String syncConnectorVersion; private String pipeName; + private String databaseName; private IoTDBSyncClient client; private SessionPool sessionPool; @@ -132,24 +133,6 @@ public void validate(final PipeParameterValidator validator) throws Exception { parameters.hasAttribute(CONNECTOR_IOTDB_PORT_KEY), parameters.hasAttribute(SINK_IOTDB_IP_KEY), parameters.hasAttribute(SINK_IOTDB_PORT_KEY)) - .validate( - empty -> { - try { - // Ensure the sink doesn't point to the legacy receiver on DataNode itself - return !NodeUrlUtils.containsLocalAddress( - givenNodeUrls.stream() - .filter(tEndPoint -> tEndPoint.getPort() == ioTDBConfig.getRpcPort()) - .map(TEndPoint::getIp) - .collect(Collectors.toList())); - } catch (final UnknownHostException e) { - LOGGER.warn("Unknown host when checking pipe sink IP.", e); - return false; - } - }, - String.format( - "One of the endpoints %s of the receivers is pointing back to the legacy receiver %s on sender itself, or unknown host when checking pipe sink IP.", - givenNodeUrls, - new TEndPoint(ioTDBConfig.getRpcAddress(), ioTDBConfig.getRpcPort()))) .validate( args -> !((boolean) args[0]) || ((boolean) args[1] && (boolean) args[2]), String.format( @@ -193,7 +176,11 @@ public void customize( user = parameters.getStringOrDefault( - Arrays.asList(CONNECTOR_IOTDB_USER_KEY, SINK_IOTDB_USER_KEY), + Arrays.asList( + CONNECTOR_IOTDB_USER_KEY, + SINK_IOTDB_USER_KEY, + CONNECTOR_IOTDB_USERNAME_KEY, + SINK_IOTDB_USERNAME_KEY), CONNECTOR_IOTDB_USER_DEFAULT_VALUE); password = parameters.getStringOrDefault( @@ -211,6 +198,11 @@ public void customize( useSSL = parameters.getBooleanOrDefault(SINK_IOTDB_SSL_ENABLE_KEY, false); trustStore = parameters.getString(SINK_IOTDB_SSL_TRUST_STORE_PATH_KEY); trustStorePwd = parameters.getString(SINK_IOTDB_SSL_TRUST_STORE_PWD_KEY); + + databaseName = + StorageEngine.getInstance() + .getDataRegion(new DataRegionId(configuration.getRuntimeEnvironment().getRegionId())) + .getDatabaseName(); } @Override @@ -221,7 +213,7 @@ public void handshake() throws Exception { client = new IoTDBSyncClient( new ThriftClientProperty.Builder() - .setConnectionTimeoutMs(COMMON_CONFIG.getConnectionTimeoutInMS()) + .setConnectionTimeoutMs(COMMON_CONFIG.getDnConnectionTimeoutInMS()) .setRpcThriftCompressionEnabled(COMMON_CONFIG.isRpcThriftCompressionEnabled()) .build(), ipAddress, @@ -231,7 +223,7 @@ public void handshake() throws Exception { trustStorePwd); final TSyncIdentityInfo identityInfo = new TSyncIdentityInfo( - pipeName, System.currentTimeMillis(), syncConnectorVersion, IoTDBConstant.PATH_ROOT); + pipeName, System.currentTimeMillis(), syncConnectorVersion, databaseName); final TSStatus status = client.handshake(identityInfo); if (status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { final String errorMsg = @@ -313,16 +305,15 @@ public void transfer(final Event event) throws Exception { private void doTransferWrapper( final PipeInsertNodeTabletInsertionEvent pipeInsertNodeInsertionEvent) throws IoTDBConnectionException, StatementExecutionException { + // We increase the reference count for this event to determine if the event may be released. + if (!pipeInsertNodeInsertionEvent.increaseReferenceCount(IoTDBLegacyPipeSink.class.getName())) { + return; + } try { - // We increase the reference count for this event to determine if the event may be released. - if (!pipeInsertNodeInsertionEvent.increaseReferenceCount( - IoTDBLegacyPipeConnector.class.getName())) { - return; - } doTransfer(pipeInsertNodeInsertionEvent); } finally { pipeInsertNodeInsertionEvent.decreaseReferenceCount( - IoTDBLegacyPipeConnector.class.getName(), false); + IoTDBLegacyPipeSink.class.getName(), false); } } @@ -344,16 +335,15 @@ private void doTransfer(final PipeInsertNodeTabletInsertionEvent pipeInsertNodeI private void doTransferWrapper(final PipeRawTabletInsertionEvent pipeRawTabletInsertionEvent) throws PipeException, IoTDBConnectionException, StatementExecutionException { + // We increase the reference count for this event to determine if the event may be released. + if (!pipeRawTabletInsertionEvent.increaseReferenceCount(IoTDBLegacyPipeSink.class.getName())) { + return; + } try { - // We increase the reference count for this event to determine if the event may be released. - if (!pipeRawTabletInsertionEvent.increaseReferenceCount( - IoTDBLegacyPipeConnector.class.getName())) { - return; - } doTransfer(pipeRawTabletInsertionEvent); } finally { pipeRawTabletInsertionEvent.decreaseReferenceCount( - IoTDBLegacyPipeConnector.class.getName(), false); + IoTDBLegacyPipeSink.class.getName(), false); } } @@ -369,16 +359,14 @@ private void doTransfer(final PipeRawTabletInsertionEvent pipeTabletInsertionEve private void doTransferWrapper(final PipeTsFileInsertionEvent pipeTsFileInsertionEvent) throws PipeException, TException, IOException { + // We increase the reference count for this event to determine if the event may be released. + if (!pipeTsFileInsertionEvent.increaseReferenceCount(IoTDBLegacyPipeSink.class.getName())) { + return; + } try { - // We increase the reference count for this event to determine if the event may be released. - if (!pipeTsFileInsertionEvent.increaseReferenceCount( - IoTDBLegacyPipeConnector.class.getName())) { - return; - } doTransfer(pipeTsFileInsertionEvent); } finally { - pipeTsFileInsertionEvent.decreaseReferenceCount( - IoTDBLegacyPipeConnector.class.getName(), false); + pipeTsFileInsertionEvent.decreaseReferenceCount(IoTDBLegacyPipeSink.class.getName(), false); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcda/OpcDaHeader.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcda/OpcDaHeader.java new file mode 100644 index 0000000000000..47909c997576d --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcda/OpcDaHeader.java @@ -0,0 +1,200 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.sink.protocol.opcda; + +import com.sun.jna.Pointer; +import com.sun.jna.Structure; +import com.sun.jna.WString; +import com.sun.jna.platform.win32.COM.Unknown; +import com.sun.jna.platform.win32.Guid; +import com.sun.jna.platform.win32.Variant; +import com.sun.jna.ptr.IntByReference; +import com.sun.jna.ptr.PointerByReference; + +import java.util.Arrays; +import java.util.List; + +/** We define the OPC DA Classes and interfaces here like C's .h file. */ +public class OpcDaHeader { + // IOPCServer + static final Guid.IID IID_IOPCServer = new Guid.IID("39C13A4D-011E-11D0-9675-0020AFD8ADB3"); + + // IOPCItemMgt + static final Guid.IID IID_IOPCItemMgt = new Guid.IID("39C13A54-011E-11D0-9675-0020AFD8ADB3"); + + // IOPCSyncIO + static final Guid.IID IID_IOPCSyncIO = new Guid.IID("39C13A52-011E-11D0-9675-0020AFD8ADB3"); + + // IUnknown + static final Guid.IID IID_IUNKNOWN = new Guid.IID("00000000-0000-0000-C000-000000000046"); + + public static class IOPCServer extends Unknown { + public IOPCServer(final Pointer p) { + super(p); + } + + // /* [string][in] */ LPCWSTR szName, + // /* [in] */ BOOL bActive, + // /* [in] */ DWORD dwRequestedUpdateRate, + // /* [in] */ OPCHANDLE hClientGroup, + // /* [in][unique] */ LONG *pTimeBias, + // /* [in][unique] */ FLOAT *pPercentDeadband, + // /* [in] */ DWORD dwLCID, + // /* [out] */ OPCHANDLE *phServerGroup, + // /* [out] */ DWORD *pRevisedUpdateRate, + // /* [in] */ REFIID riid, + // /* [iid_is][out] */ LPUNKNOWN *ppUnk) = 0; + public int addGroup( + final String szName, // Group name ("" means auto) + final boolean bActive, // Whether to activate the group + final int dwRequestedUpdateRate, // The update rate of request (ms) + final int hClientGroup, // The handle of client group + final Pointer pTimeBias, // Time zone bias + final Pointer pPercentDeadband, // Dead band + final int dwLCID, // Region ID + final PointerByReference phServerGroup, // Server group handler + final IntByReference pRevisedUpdateRate, // Real update rate + final Guid.GUID.ByReference riid, // Interface IID + final PointerByReference ppUnk // The OPC Group pointer returned + ) { + // Convert Java string into COM "bstr" + final WString wName = new WString(szName); + + return this._invokeNativeInt( + 3, + new Object[] { + this.getPointer(), + wName, + bActive ? 1 : 0, + dwRequestedUpdateRate, + hClientGroup, + pTimeBias, + pPercentDeadband, + dwLCID, + phServerGroup, + pRevisedUpdateRate, + riid != null ? riid.getPointer() : null, + ppUnk + }); + } + } + + // IOPCItemMgt( + // /* [in] */ DWORD dwCount, + // /* [in] */ OPCITEMDEF *pItemArray, + // /* [out] */ OPCITEMRESULT **ppAddResults, + // /* [out] */ HRESULT **ppErrors) = 0; + public static class IOPCItemMgt extends Unknown { + public IOPCItemMgt(final Pointer p) { + super(p); + } + + public int addItems( + final int dwCount, // Data count + final OPCITEMDEF[] pItemArray, // Items array to create + final PointerByReference pResults, // Results' handles + final PointerByReference pErrors // Error's pointers + ) { + return this._invokeNativeInt( + 3, new Object[] {this.getPointer(), dwCount, pItemArray, pResults, pErrors}); + } + } + + public static class IOPCSyncIO extends Unknown { + public IOPCSyncIO(final Pointer p) { + super(p); + } + + // /* [in] */ DWORD dwCount, + // /* [size_is][in] */ OPCHANDLE *phServer, + // /* [size_is][in] */ VARIANT *pItemValues, + // /* [size_is][size_is][out] */ HRESULT **ppErrors) = 0; + public int write( + final int dwCount, // Data count + final Pointer phServer, // Server handles of items + final Pointer pItemValues, // Values of items + final PointerByReference pErrors // Error codes + ) { + return this._invokeNativeInt( + 4, + new Object[] { // Write is the 4th method in vtable + this.getPointer(), dwCount, phServer, pItemValues, pErrors + }); + } + } + + // /* [string] */ LPWSTR szAccessPath; + // /* [string] */ LPWSTR szItemID; + // BOOL bActive; + // OPCHANDLE hClient; + // DWORD dwBlobSize; + // /* [size_is] */ BYTE *pBlob; + // VARTYPE vtRequestedDataType; + // WORD wReserved; + public static class OPCITEMDEF extends Structure { + public WString szAccessPath = new WString(""); // Access path (Usually empty) + public WString szItemID; // Item ID(Like "Channel1.Device1.Tag1") + public int bActive; // Whether to activate this item(TRUE=1, FALSE=0) + public int hClient; // Client handle, Used in async callback and remove item + public int dwBlobSize; // BLOB size + public Pointer pBlob; // BLOB's pointer + public short vtRequestedDataType = Variant.VT_UNKNOWN; // Requested datatype + public short wReserved; // Reserved + + // As C structure + @Override + protected List getFieldOrder() { + return Arrays.asList( + "szAccessPath", + "szItemID", + "bActive", + "hClient", + "dwBlobSize", + "pBlob", + "vtRequestedDataType", + "wReserved"); + } + } + + // OPCHANDLE hServer; + // VARTYPE vtCanonicalDataType; + // WORD wReserved; + // DWORD dwAccessRights; + // DWORD dwBlobSize; + // /* [size_is] */ BYTE *pBlob; + public static class OPCITEMRESULT extends Structure { + public int hServer; // Server handle, Used to write + public short vtCanonicalDataType; // Data type (like Variant.VT_R8) + public short wReserved; // Reserved word + public int dwAccessRights; // Access right + public int dwBlobSize; // BLOB size + public Pointer pBlob; // BLOB pointer + + public OPCITEMRESULT(final Pointer pointer) { + super(pointer); + } + + @Override + protected List getFieldOrder() { + return Arrays.asList( + "hServer", "vtCanonicalDataType", "wReserved", "dwAccessRights", "dwBlobSize", "pBlob"); + } + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcda/OpcDaServerHandle.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcda/OpcDaServerHandle.java new file mode 100644 index 0000000000000..1f3133f58b12d --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcda/OpcDaServerHandle.java @@ -0,0 +1,384 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.sink.protocol.opcda; + +import org.apache.iotdb.db.pipe.sink.util.PipeTabletEventSorter; +import org.apache.iotdb.pipe.api.exception.PipeException; + +import com.sun.jna.Memory; +import com.sun.jna.Native; +import com.sun.jna.Pointer; +import com.sun.jna.WString; +import com.sun.jna.platform.win32.COM.IUnknown; +import com.sun.jna.platform.win32.COM.Unknown; +import com.sun.jna.platform.win32.Guid; +import com.sun.jna.platform.win32.OaIdl; +import com.sun.jna.platform.win32.Ole32; +import com.sun.jna.platform.win32.OleAuto; +import com.sun.jna.platform.win32.Variant; +import com.sun.jna.platform.win32.WTypes; +import com.sun.jna.platform.win32.WinDef; +import com.sun.jna.platform.win32.WinError; +import com.sun.jna.platform.win32.WinNT; +import com.sun.jna.ptr.IntByReference; +import com.sun.jna.ptr.PointerByReference; +import org.apache.tsfile.common.constant.TsFileConstant; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.utils.Binary; +import org.apache.tsfile.write.UnSupportedDataTypeException; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.sql.Date; +import java.time.LocalDate; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; + +import static org.apache.iotdb.db.pipe.sink.protocol.opcda.OpcDaHeader.IID_IOPCItemMgt; +import static org.apache.iotdb.db.pipe.sink.protocol.opcda.OpcDaHeader.IID_IOPCServer; +import static org.apache.iotdb.db.pipe.sink.protocol.opcda.OpcDaHeader.IID_IOPCSyncIO; +import static org.apache.iotdb.db.pipe.sink.protocol.opcda.OpcDaHeader.IID_IUNKNOWN; + +public class OpcDaServerHandle implements Closeable { + + private static final Logger LOGGER = LoggerFactory.getLogger(OpcDaServerHandle.class); + + private final OpcDaHeader.IOPCServer opcServer; + private final OpcDaHeader.IOPCItemMgt itemMgt; + private final OpcDaHeader.IOPCSyncIO syncIO; + private final Map serverHandleMap = new ConcurrentHashMap<>(); + private final Map serverTimestampMap = new ConcurrentHashMap<>(); + + // Save it here to avoid memory leakage + private WTypes.BSTR bstr; + + OpcDaServerHandle(String clsOrProgID) { + final Guid.CLSID CLSID_OPC_SERVER = new Guid.CLSID(clsOrProgID); + + Ole32.INSTANCE.CoInitializeEx(null, Ole32.COINIT_MULTITHREADED); + final PointerByReference ppvServer = new PointerByReference(); + + WinNT.HRESULT hr = + Ole32.INSTANCE.CoCreateInstance(CLSID_OPC_SERVER, null, 0x17, IID_IOPCServer, ppvServer); + + if (hr.intValue() != WinError.S_OK.intValue()) { + throw new PipeException( + "Failed to connect to server, error code: 0x" + Integer.toHexString(hr.intValue())); + } + + opcServer = new OpcDaHeader.IOPCServer(ppvServer.getValue()); + + // 3. Create group + final PointerByReference phServerGroup = new PointerByReference(); + final PointerByReference phOPCGroup = new PointerByReference(); + final IntByReference pRevisedUpdateRate = new IntByReference(); + final int hr2 = + opcServer.addGroup( + "", + true, + 1000, + 0, + null, + null, + 0, + phServerGroup, + pRevisedUpdateRate, + new Guid.GUID.ByReference(IID_IUNKNOWN.getPointer()), + phOPCGroup); + + if (hr2 == WinError.S_OK.intValue()) { + LOGGER.info( + "Create group successfully! Server handle: {}, update rate: {} ms", + phServerGroup.getValue(), + pRevisedUpdateRate.getValue()); + } else { + throw new PipeException( + "Failed to create group,error code: 0x" + Integer.toHexString(hr.intValue())); + } + + final IUnknown groupUnknown = new Unknown(phOPCGroup.getValue()); + + // 4. Acquire IOPCItemMgt interface (To create Item) + final PointerByReference ppvItemMgt = new PointerByReference(); + hr = + groupUnknown.QueryInterface( + new Guid.REFIID(new Guid.GUID.ByReference(IID_IOPCItemMgt).getPointer()), ppvItemMgt); + if (hr.intValue() == WinError.S_OK.intValue()) { + LOGGER.info("Acquire IOPCItemMgt successfully! Interface address: {}", ppvItemMgt.getValue()); + } else { + throw new PipeException( + "Failed to acquire IOPCItemMgt, error code: 0x" + Integer.toHexString(hr.intValue())); + } + + itemMgt = new OpcDaHeader.IOPCItemMgt(ppvItemMgt.getValue()); + + // 5. Acquire IOPCSyncIO Interface + PointerByReference ppvSyncIO = new PointerByReference(); + hr = + groupUnknown.QueryInterface( + new Guid.REFIID(new Guid.GUID.ByReference(IID_IOPCSyncIO).getPointer()), ppvSyncIO); + if (hr.intValue() == WinError.S_OK.intValue()) { + LOGGER.info("Acquire IOPCSyncIO successfully! Interface address: {}", ppvSyncIO.getValue()); + } else { + throw new PipeException( + "Failed to acquire IOPCSyncIO, error code: 0x" + Integer.toHexString(hr.intValue())); + } + syncIO = new OpcDaHeader.IOPCSyncIO(ppvSyncIO.getValue()); + } + + static String getClsIDFromProgID(final String progID) { + // To receive CLSID struct + final Guid.CLSID.ByReference pclsid = new Guid.CLSID.ByReference(); + + final WinNT.HRESULT hr = Ole32.INSTANCE.CLSIDFromProgID(progID, pclsid); + + if (hr.intValue() == WinError.S_OK.intValue()) { // S_OK = 0 + // Format CLSID (like "{CAE8D0E1-117B-11D5-924B-11C0F023E91C}") + final String clsidStr = + String.format( + "%08X-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X", + pclsid.Data1, + pclsid.Data2, + pclsid.Data3, + pclsid.Data4[0], + pclsid.Data4[1], + pclsid.Data4[2], + pclsid.Data4[3], + pclsid.Data4[4], + pclsid.Data4[5], + pclsid.Data4[6], + pclsid.Data4[7]); + LOGGER.info("Successfully converted progID {} to CLSID: {{}}", progID, clsidStr); + return clsidStr; + } else { + throw new PipeException( + "Error: ProgID is invalid or unregistered, (HRESULT=0x" + + Integer.toHexString(hr.intValue()) + + ")"); + } + } + + void transfer(final Tablet tablet) { + new PipeTabletEventSorter(tablet).deduplicateAndSortTimestampsIfNecessary(); + final List schemas = tablet.getSchemas(); + + for (int i = 0; i < schemas.size(); ++i) { + final String itemId = + tablet.deviceId + TsFileConstant.PATH_SEPARATOR + schemas.get(i).getMeasurementId(); + if (!serverHandleMap.containsKey(itemId)) { + addItem(itemId, schemas.get(i).getType()); + } + for (int j = tablet.rowSize - 1; j >= 0; --j) { + if (Objects.isNull(tablet.bitMaps) + || Objects.isNull(tablet.bitMaps[i]) + || !tablet.bitMaps[i].isMarked(j)) { + if (serverTimestampMap.get(itemId) <= tablet.timestamps[j]) { + writeData( + itemId, getTabletObjectValue4Opc(tablet.values[i], j, schemas.get(i).getType())); + serverTimestampMap.put(itemId, tablet.timestamps[j]); + } + break; + } + } + } + } + + private void addItem(final String itemId, final TSDataType type) { + final OpcDaHeader.OPCITEMDEF[] itemDefs = new OpcDaHeader.OPCITEMDEF[1]; + itemDefs[0] = new OpcDaHeader.OPCITEMDEF(); + itemDefs[0].szAccessPath = new WString(""); + itemDefs[0].szItemID = new WString(itemId + "\0"); + itemDefs[0].bActive = 1; + itemDefs[0].hClient = 0; + itemDefs[0].dwBlobSize = 0; + itemDefs[0].pBlob = Pointer.NULL; + itemDefs[0].vtRequestedDataType = convertTsDataType2VariantType(type); + itemDefs[0].wReserved = 0; + itemDefs[0].write(); + + final PointerByReference ppItemResults = new PointerByReference(); + final PointerByReference ppErrors = new PointerByReference(); + final int hr = itemMgt.addItems(1, itemDefs, ppItemResults, ppErrors); + + final Pointer pErrors = ppErrors.getValue(); + if (Objects.nonNull(pErrors)) { + // Read errors + final int[] errors = + pErrors.getIntArray(0, 1); // Pick 1 element because only 1 element is added + final int itemError = errors[0]; + + try { + if (itemError == WinError.S_OK.intValue()) { + LOGGER.debug("Successfully added item {}.", itemId); + } else { + throw new PipeException( + "Failed to add item " + + itemId + + ", opc error code: 0x" + + Integer.toHexString(itemError)); + } + } finally { + Ole32.INSTANCE.CoTaskMemFree(pErrors); + } + } + + if (hr != WinError.S_OK.intValue()) { + throw new PipeException("Failed to add item, win error code: 0x" + Integer.toHexString(hr)); + } + + final Pointer pItemResults = ppItemResults.getValue(); + + final OpcDaHeader.OPCITEMRESULT[] itemResults = new OpcDaHeader.OPCITEMRESULT[1]; + itemResults[0] = new OpcDaHeader.OPCITEMRESULT(pItemResults); + itemResults[0].read(); + + serverHandleMap.put(itemId, itemResults[0].hServer); + serverTimestampMap.put(itemId, Long.MIN_VALUE); + } + + private void writeData(final String itemId, final Variant.VARIANT value) { + final Pointer phServer = new Memory(Native.getNativeSize(int.class)); + phServer.write(0, new int[] {serverHandleMap.get(itemId)}, 0, 1); + + final PointerByReference ppErrors = new PointerByReference(); + final int hr = syncIO.write(1, phServer, value.getPointer(), ppErrors); + // Free after write + if (Objects.nonNull(bstr)) { + OleAuto.INSTANCE.SysFreeString(bstr); + bstr = null; + } + + final Pointer pErrors = ppErrors.getValue(); + if (Objects.nonNull(pErrors)) { + // Read error code array, each for a result + final int[] errors = + pErrors.getIntArray(0, 1); // Read 1 element because only 1 point is written + final int itemError = errors[0]; + + try { + if (itemError != WinError.S_OK.intValue()) { + throw new PipeException( + "Failed to write " + + itemId + + ", value: " + + value + + ", opc error code: 0x" + + Integer.toHexString(itemError)); + } + } finally { + Ole32.INSTANCE.CoTaskMemFree(pErrors); + } + } + + if (hr != WinError.S_OK.intValue()) { + throw new PipeException("Failed to write, win error code: 0x" + Integer.toHexString(hr)); + } + } + + private short convertTsDataType2VariantType(final TSDataType dataType) { + switch (dataType) { + case BOOLEAN: + return Variant.VT_BOOL; + case INT32: + return Variant.VT_I4; + case INT64: + return Variant.VT_I8; + case DATE: + case TIMESTAMP: + return Variant.VT_DATE; + case FLOAT: + return Variant.VT_R4; + case DOUBLE: + return Variant.VT_R8; + case TEXT: + case STRING: + // Note that "Variant" does not support "VT_BLOB" data, and not all the DA server + // support this, thus we use "VT_BSTR" to substitute + case BLOB: + return Variant.VT_BSTR; + default: + throw new UnSupportedDataTypeException("UnSupported dataType " + dataType); + } + } + + private Variant.VARIANT getTabletObjectValue4Opc( + final Object column, final int rowIndex, final TSDataType type) { + final Variant.VARIANT value = new Variant.VARIANT(); + switch (type) { + case BOOLEAN: + value.setValue(Variant.VT_BOOL, new OaIdl.VARIANT_BOOL(((boolean[]) column)[rowIndex])); + break; + case INT32: + value.setValue(Variant.VT_I4, new WinDef.LONG(((int[]) column)[rowIndex])); + break; + case DATE: + value.setValue( + Variant.VT_DATE, new OaIdl.DATE((Date.valueOf(((LocalDate[]) column)[rowIndex])))); + break; + case INT64: + value.setValue(Variant.VT_I8, new WinDef.LONGLONG(((long[]) column)[rowIndex])); + break; + case TIMESTAMP: + value.setValue( + Variant.VT_DATE, new OaIdl.DATE(new java.util.Date(((long[]) column)[rowIndex]))); + break; + case FLOAT: + value.setValue(Variant.VT_R4, ((float[]) column)[rowIndex]); + break; + case DOUBLE: + value.setValue(Variant.VT_R8, ((double[]) column)[rowIndex]); + break; + case TEXT: + case STRING: + case BLOB: + bstr = OleAuto.INSTANCE.SysAllocString(((Binary[]) column)[rowIndex].toString()); + value.setValue(Variant.VT_BSTR, bstr); + break; + default: + throw new UnSupportedDataTypeException("UnSupported dataType " + type); + } + return value; + } + + @Override + public void close() { + // Help gc + serverTimestampMap.clear(); + serverHandleMap.clear(); + + // Release resource + if (Objects.nonNull(syncIO)) { + syncIO.Release(); + } + if (Objects.nonNull(itemMgt)) { + itemMgt.Release(); + } + if (Objects.nonNull(opcServer)) { + opcServer.Release(); + } + // Unload COM + Ole32.INSTANCE.CoUninitialize(); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcda/OpcDaSink.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcda/OpcDaSink.java new file mode 100644 index 0000000000000..366a93357850f --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcda/OpcDaSink.java @@ -0,0 +1,139 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.sink.protocol.opcda; + +import org.apache.iotdb.db.pipe.sink.protocol.opcua.OpcUaSink; +import org.apache.iotdb.pipe.api.PipeConnector; +import org.apache.iotdb.pipe.api.customizer.configuration.PipeConnectorRuntimeConfiguration; +import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator; +import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; +import org.apache.iotdb.pipe.api.event.Event; +import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; +import org.apache.iotdb.pipe.api.exception.PipeParameterNotValidException; + +import org.apache.tsfile.utils.Pair; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_OPC_DA_CLSID_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_OPC_DA_PROGID_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_OPC_DA_CLSID_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_OPC_DA_PROGID_KEY; + +/** + * Send data in IoTDB based on Opc Da protocol, using JNA. All data are converted into tablets, and + * then push the newest value to the local COM server in another process. + */ +public class OpcDaSink implements PipeConnector { + private static final Logger LOGGER = LoggerFactory.getLogger(OpcDaSink.class); + private static final Map> + CLS_ID_TO_REFERENCE_COUNT_AND_HANDLE_MAP = new ConcurrentHashMap<>(); + private String clsID; + private OpcDaServerHandle handle; + + @Override + public void validate(final PipeParameterValidator validator) throws Exception { + // TODO: upgrade this logic after "1 in 2" logic is supported + validator.validate( + args -> (boolean) args[0] || (boolean) args[1] || (boolean) args[2] || (boolean) args[3], + String.format( + "One of '%s', '%s', '%s' and '%s' must be specified", + SINK_OPC_DA_CLSID_KEY, + CONNECTOR_OPC_DA_CLSID_KEY, + SINK_OPC_DA_PROGID_KEY, + CONNECTOR_OPC_DA_PROGID_KEY), + validator.getParameters().hasAttribute(SINK_OPC_DA_CLSID_KEY), + validator.getParameters().hasAttribute(CONNECTOR_OPC_DA_CLSID_KEY), + validator.getParameters().hasAttribute(SINK_OPC_DA_PROGID_KEY), + validator.getParameters().hasAttribute(CONNECTOR_OPC_DA_PROGID_KEY)); + + if (!System.getProperty("os.name").toLowerCase().startsWith("windows")) { + throw new PipeParameterNotValidException("opc-da-sink must run on windows system."); + } + } + + @Override + public void customize( + final PipeParameters parameters, final PipeConnectorRuntimeConfiguration configuration) + throws Exception { + synchronized (CLS_ID_TO_REFERENCE_COUNT_AND_HANDLE_MAP) { + clsID = parameters.getStringByKeys(CONNECTOR_OPC_DA_CLSID_KEY, SINK_OPC_DA_CLSID_KEY); + if (Objects.isNull(clsID)) { + clsID = + OpcDaServerHandle.getClsIDFromProgID( + parameters.getStringByKeys(CONNECTOR_OPC_DA_PROGID_KEY, SINK_OPC_DA_PROGID_KEY)); + } + handle = + CLS_ID_TO_REFERENCE_COUNT_AND_HANDLE_MAP + .computeIfAbsent( + clsID, key -> new Pair<>(new AtomicInteger(0), new OpcDaServerHandle(clsID))) + .getRight(); + CLS_ID_TO_REFERENCE_COUNT_AND_HANDLE_MAP.get(clsID).getLeft().incrementAndGet(); + } + } + + @Override + public void handshake() throws Exception { + // Do nothing + } + + @Override + public void heartbeat() throws Exception { + // Do nothing + } + + @Override + public void transfer(final TabletInsertionEvent tabletInsertionEvent) throws Exception { + OpcUaSink.transferByTablet(tabletInsertionEvent, LOGGER, tablet -> handle.transfer(tablet)); + } + + @Override + public void transfer(final Event event) throws Exception { + // Do nothing + } + + @Override + public void close() throws Exception { + if (Objects.isNull(clsID)) { + return; + } + + synchronized (CLS_ID_TO_REFERENCE_COUNT_AND_HANDLE_MAP) { + final Pair pair = + CLS_ID_TO_REFERENCE_COUNT_AND_HANDLE_MAP.get(clsID); + if (pair == null) { + return; + } + + if (pair.getLeft().decrementAndGet() <= 0) { + try { + pair.getRight().close(); + } finally { + CLS_ID_TO_REFERENCE_COUNT_AND_HANDLE_MAP.remove(clsID); + } + } + } + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/opcua/OpcUaKeyStoreLoader.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/OpcUaKeyStoreLoader.java similarity index 80% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/opcua/OpcUaKeyStoreLoader.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/OpcUaKeyStoreLoader.java index 45632485e3f84..b17f27532d7ae 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/opcua/OpcUaKeyStoreLoader.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/OpcUaKeyStoreLoader.java @@ -17,7 +17,9 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.protocol.opcua; +package org.apache.iotdb.db.pipe.sink.protocol.opcua; + +import org.apache.iotdb.commons.utils.FileUtils; import com.google.common.collect.Sets; import org.eclipse.milo.opcua.sdk.server.util.HostnameUtil; @@ -27,8 +29,8 @@ import org.slf4j.LoggerFactory; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.file.Files; import java.nio.file.Path; import java.security.Key; import java.security.KeyPair; @@ -51,13 +53,22 @@ class OpcUaKeyStoreLoader { private X509Certificate serverCertificate; private KeyPair serverKeyPair; - OpcUaKeyStoreLoader load(Path baseDir, char[] password) throws Exception { + OpcUaKeyStoreLoader load(final Path baseDir, final char[] password) throws Exception { final KeyStore keyStore = KeyStore.getInstance("PKCS12"); final File serverKeyStore = baseDir.resolve("iotdb-server.pfx").toFile(); LOGGER.info("Loading KeyStore at {}", serverKeyStore); + if (serverKeyStore.exists()) { + try { + keyStore.load(Files.newInputStream(serverKeyStore.toPath()), password); + } catch (final IOException e) { + LOGGER.warn("Load keyStore failed, the existing keyStore may be stale, re-constructing..."); + FileUtils.deleteFileOrDirectory(serverKeyStore); + } + } + if (!serverKeyStore.exists()) { keyStore.load(null, password); @@ -81,21 +92,20 @@ OpcUaKeyStoreLoader load(Path baseDir, char[] password) throws Exception { Sets.newHashSet(HostnameUtil.getHostname()), HostnameUtil.getHostnames("0.0.0.0", false)); - for (String hostname : hostnames) { - if (IP_ADDR_PATTERN.matcher(hostname).matches()) { - builder.addIpAddress(hostname); - } else { - builder.addDnsName(hostname); - } - } + hostnames.forEach( + hostname -> { + if (IP_ADDR_PATTERN.matcher(hostname).matches()) { + builder.addIpAddress(hostname); + } else { + builder.addDnsName(hostname); + } + }); final X509Certificate certificate = builder.build(); keyStore.setKeyEntry( SERVER_ALIAS, keyPair.getPrivate(), password, new X509Certificate[] {certificate}); - keyStore.store(new FileOutputStream(serverKeyStore), password); - } else { - keyStore.load(new FileInputStream(serverKeyStore), password); + keyStore.store(Files.newOutputStream(serverKeyStore.toPath()), password); } final Key serverPrivateKey = keyStore.getKey(SERVER_ALIAS, password); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/OpcUaNameSpace.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/OpcUaNameSpace.java new file mode 100644 index 0000000000000..f9fbb2d2ad4dc --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/OpcUaNameSpace.java @@ -0,0 +1,390 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.sink.protocol.opcua; + +import org.apache.iotdb.commons.exception.pipe.PipeRuntimeCriticalException; +import org.apache.iotdb.commons.exception.pipe.PipeRuntimeNonCriticalException; +import org.apache.iotdb.db.pipe.sink.util.PipeTabletEventSorter; +import org.apache.iotdb.db.utils.DateTimeUtils; +import org.apache.iotdb.db.utils.TimestampPrecisionUtils; +import org.apache.iotdb.pipe.api.event.Event; + +import org.apache.tsfile.common.constant.TsFileConstant; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.utils.Binary; +import org.apache.tsfile.write.UnSupportedDataTypeException; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.MeasurementSchema; +import org.eclipse.milo.opcua.sdk.core.AccessLevel; +import org.eclipse.milo.opcua.sdk.core.Reference; +import org.eclipse.milo.opcua.sdk.server.Lifecycle; +import org.eclipse.milo.opcua.sdk.server.OpcUaServer; +import org.eclipse.milo.opcua.sdk.server.api.DataItem; +import org.eclipse.milo.opcua.sdk.server.api.ManagedNamespaceWithLifecycle; +import org.eclipse.milo.opcua.sdk.server.api.MonitoredItem; +import org.eclipse.milo.opcua.sdk.server.model.nodes.objects.BaseEventTypeNode; +import org.eclipse.milo.opcua.sdk.server.nodes.UaFolderNode; +import org.eclipse.milo.opcua.sdk.server.nodes.UaVariableNode; +import org.eclipse.milo.opcua.sdk.server.util.SubscriptionModel; +import org.eclipse.milo.opcua.stack.core.Identifiers; +import org.eclipse.milo.opcua.stack.core.UaException; +import org.eclipse.milo.opcua.stack.core.types.builtin.DataValue; +import org.eclipse.milo.opcua.stack.core.types.builtin.DateTime; +import org.eclipse.milo.opcua.stack.core.types.builtin.LocalizedText; +import org.eclipse.milo.opcua.stack.core.types.builtin.NodeId; +import org.eclipse.milo.opcua.stack.core.types.builtin.StatusCode; +import org.eclipse.milo.opcua.stack.core.types.builtin.Variant; + +import java.nio.file.Paths; +import java.sql.Date; +import java.time.LocalDate; +import java.time.ZoneId; +import java.util.List; +import java.util.Objects; +import java.util.UUID; + +public class OpcUaNameSpace extends ManagedNamespaceWithLifecycle { + public static final String NAMESPACE_URI = "urn:apache:iotdb:opc-server"; + private final boolean isClientServerModel; + private final SubscriptionModel subscriptionModel; + private final OpcUaServerBuilder builder; + + OpcUaNameSpace( + final OpcUaServer server, + final boolean isClientServerModel, + final OpcUaServerBuilder builder) { + super(server, NAMESPACE_URI); + this.isClientServerModel = isClientServerModel; + this.builder = builder; + + subscriptionModel = new SubscriptionModel(server, this); + getLifecycleManager().addLifecycle(subscriptionModel); + getLifecycleManager() + .addLifecycle( + new Lifecycle() { + @Override + public void startup() { + // Do nothing + } + + @Override + public void shutdown() { + getServer().shutdown(); + builder.close(); + } + }); + } + + void transfer(final Tablet tablet) throws UaException { + if (isClientServerModel) { + transferTabletForClientServerModel(tablet); + } else { + transferTabletForPubSubModel(tablet); + } + } + + private void transferTabletForClientServerModel(final Tablet tablet) { + new PipeTabletEventSorter(tablet).deduplicateAndSortTimestampsIfNecessary(); + + final String[] segments = tablet.deviceId.split("\\."); + if (segments.length == 0) { + throw new PipeRuntimeCriticalException("The segments of tablets must exist"); + } + final StringBuilder currentStr = new StringBuilder(); + UaFolderNode folderNode = null; + NodeId folderNodeId; + for (final String segment : segments) { + final UaFolderNode nextFolderNode; + + currentStr.append(segment); + folderNodeId = newNodeId(currentStr.toString()); + currentStr.append("/"); + + if (!getNodeManager().containsNode(folderNodeId)) { + nextFolderNode = + new UaFolderNode( + getNodeContext(), + folderNodeId, + newQualifiedName(segment), + LocalizedText.english(segment)); + getNodeManager().addNode(nextFolderNode); + if (Objects.nonNull(folderNode)) { + folderNode.addOrganizes(nextFolderNode); + } else { + nextFolderNode.addReference( + new Reference( + folderNodeId, + Identifiers.Organizes, + Identifiers.ObjectsFolder.expanded(), + false)); + } + folderNode = nextFolderNode; + } else { + folderNode = + (UaFolderNode) + getNodeManager() + .getNode(folderNodeId) + .orElseThrow( + () -> + new PipeRuntimeCriticalException( + String.format( + "The folder node for %s does not exist.", tablet.deviceId))); + } + } + + final String currentFolder = currentStr.toString(); + for (int i = 0; i < tablet.getSchemas().size(); ++i) { + final MeasurementSchema measurementSchema = tablet.getSchemas().get(i); + final String name = measurementSchema.getMeasurementId(); + final TSDataType type = measurementSchema.getType(); + final NodeId nodeId = newNodeId(currentFolder + name); + final UaVariableNode measurementNode; + if (!getNodeManager().containsNode(nodeId)) { + measurementNode = + new UaVariableNode.UaVariableNodeBuilder(getNodeContext()) + .setNodeId(newNodeId(currentFolder + name)) + .setAccessLevel(AccessLevel.READ_WRITE) + .setUserAccessLevel(AccessLevel.READ_ONLY) + .setBrowseName(newQualifiedName(name)) + .setDisplayName(LocalizedText.english(name)) + .setDataType(convertToOpcDataType(type)) + .setTypeDefinition(Identifiers.BaseDataVariableType) + .build(); + getNodeManager().addNode(measurementNode); + folderNode.addOrganizes(measurementNode); + } else { + // This must exist + measurementNode = + (UaVariableNode) + getNodeManager() + .getNode(nodeId) + .orElseThrow( + () -> + new PipeRuntimeCriticalException( + String.format("The Node %s does not exist.", nodeId))); + } + + int lastNonnullIndex = -1; + for (int j = tablet.rowSize - 1; j >= 0; --j) { + if (!tablet.bitMaps[i].isMarked(j)) { + lastNonnullIndex = j; + break; + } + } + + if (lastNonnullIndex != -1) { + final long utcTimestamp = timestampToUtc(tablet.timestamps[lastNonnullIndex]); + if (Objects.isNull(measurementNode.getValue()) + || Objects.requireNonNull(measurementNode.getValue().getSourceTime()).getUtcTime() + < utcTimestamp) { + measurementNode.setValue( + new DataValue( + new Variant(getTabletObjectValue4Opc(tablet.values[i], lastNonnullIndex, type)), + StatusCode.GOOD, + new DateTime(utcTimestamp), + new DateTime())); + } + } + } + } + + private static Object getTabletObjectValue4Opc( + final Object column, final int rowIndex, final TSDataType type) { + switch (type) { + case BOOLEAN: + return ((boolean[]) column)[rowIndex]; + case INT32: + return ((int[]) column)[rowIndex]; + case DATE: + return new DateTime(Date.valueOf(((LocalDate[]) column)[rowIndex])); + case INT64: + return ((long[]) column)[rowIndex]; + case TIMESTAMP: + return new DateTime(timestampToUtc(((long[]) column)[rowIndex])); + case FLOAT: + return ((float[]) column)[rowIndex]; + case DOUBLE: + return ((double[]) column)[rowIndex]; + case TEXT: + case BLOB: + case STRING: + return ((Binary[]) column)[rowIndex].toString(); + default: + throw new UnSupportedDataTypeException("UnSupported dataType " + type); + } + } + + private static long timestampToUtc(final long timeStamp) { + return TimestampPrecisionUtils.currPrecision.toNanos(timeStamp) / 100L + 116444736000000000L; + } + + /** + * Transfer {@link Tablet} into eventNodes and post it on the eventBus, so that they will be heard + * at the subscribers. Notice that an eventNode is reused to reduce object creation costs. + * + * @param tablet the tablet to send + * @throws UaException if failed to create {@link Event} + */ + private void transferTabletForPubSubModel(final Tablet tablet) throws UaException { + final BaseEventTypeNode eventNode = + getServer() + .getEventFactory() + .createEvent( + new NodeId(getNamespaceIndex(), UUID.randomUUID()), Identifiers.BaseEventType); + // Use eventNode here because other nodes doesn't support values and times simultaneously + for (int columnIndex = 0; columnIndex < tablet.getSchemas().size(); ++columnIndex) { + + final TSDataType dataType = tablet.getSchemas().get(columnIndex).getType(); + + // Source name --> Sensor path, like root.test.d_0.s_0 + eventNode.setSourceName( + tablet.deviceId + + TsFileConstant.PATH_SEPARATOR + + tablet.getSchemas().get(columnIndex).getMeasurementId()); + + // Source node --> Sensor type, like double + eventNode.setSourceNode(convertToOpcDataType(dataType)); + + for (int rowIndex = 0; rowIndex < tablet.rowSize; ++rowIndex) { + // Filter null value + if (tablet.bitMaps[columnIndex].isMarked(rowIndex)) { + continue; + } + + // Time --> TimeStamp + eventNode.setTime(new DateTime(timestampToUtc(tablet.timestamps[rowIndex]))); + + // Message --> Value + switch (dataType) { + case BOOLEAN: + eventNode.setMessage( + LocalizedText.english( + Boolean.toString(((boolean[]) tablet.values[columnIndex])[rowIndex]))); + break; + case INT32: + eventNode.setMessage( + LocalizedText.english( + Integer.toString(((int[]) tablet.values[columnIndex])[rowIndex]))); + break; + case DATE: + eventNode.setMessage( + LocalizedText.english( + (((LocalDate[]) tablet.values[columnIndex])[rowIndex]) + .atStartOfDay(ZoneId.systemDefault()) + .toString())); + break; + case INT64: + eventNode.setMessage( + LocalizedText.english( + Long.toString(((long[]) tablet.values[columnIndex])[rowIndex]))); + break; + case TIMESTAMP: + eventNode.setMessage( + LocalizedText.english( + DateTimeUtils.convertLongToDate( + ((long[]) tablet.values[columnIndex])[rowIndex]))); + break; + case FLOAT: + eventNode.setMessage( + LocalizedText.english( + Float.toString(((float[]) tablet.values[columnIndex])[rowIndex]))); + break; + case DOUBLE: + eventNode.setMessage( + LocalizedText.english( + Double.toString(((double[]) tablet.values[columnIndex])[rowIndex]))); + break; + case TEXT: + case BLOB: + case STRING: + eventNode.setMessage( + LocalizedText.english( + ((Binary[]) tablet.values[columnIndex])[rowIndex].toString())); + break; + case VECTOR: + case UNKNOWN: + default: + throw new PipeRuntimeNonCriticalException( + "Unsupported data type: " + tablet.getSchemas().get(columnIndex).getType()); + } + + // Send the event + getServer().getEventBus().post(eventNode); + } + } + eventNode.delete(); + } + + private NodeId convertToOpcDataType(final TSDataType type) { + switch (type) { + case BOOLEAN: + return Identifiers.Boolean; + case INT32: + return Identifiers.Int32; + case DATE: + case TIMESTAMP: + return Identifiers.DateTime; + case INT64: + return Identifiers.Int64; + case FLOAT: + return Identifiers.Float; + case DOUBLE: + return Identifiers.Double; + case TEXT: + case BLOB: + case STRING: + return Identifiers.String; + case VECTOR: + case UNKNOWN: + default: + throw new PipeRuntimeNonCriticalException("Unsupported data type: " + type); + } + } + + @Override + public void onDataItemsCreated(final List dataItems) { + subscriptionModel.onDataItemsCreated(dataItems); + } + + @Override + public void onDataItemsModified(final List dataItems) { + subscriptionModel.onDataItemsModified(dataItems); + } + + @Override + public void onDataItemsDeleted(final List dataItems) { + subscriptionModel.onDataItemsDeleted(dataItems); + } + + @Override + public void onMonitoringModeChanged(final List monitoredItems) { + subscriptionModel.onMonitoringModeChanged(monitoredItems); + } + + /////////////////////////////// Conflict detection /////////////////////////////// + + void checkEquals( + final String user, + final String password, + final String securityDir, + final boolean enableAnonymousAccess) { + builder.checkEquals(user, password, Paths.get(securityDir), enableAnonymousAccess); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/opcua/OpcUaServerBuilder.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/OpcUaServerBuilder.java similarity index 56% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/opcua/OpcUaServerBuilder.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/OpcUaServerBuilder.java index 859823184040b..bc2df4839e2bc 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/opcua/OpcUaServerBuilder.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/OpcUaServerBuilder.java @@ -17,9 +17,9 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.protocol.opcua; +package org.apache.iotdb.db.pipe.sink.protocol.opcua; -import org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant; +import org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant; import org.apache.iotdb.pipe.api.exception.PipeException; import org.eclipse.milo.opcua.sdk.server.OpcUaServer; @@ -49,7 +49,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.Closeable; import java.io.File; +import java.io.IOException; +import java.nio.file.FileSystems; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; @@ -57,6 +60,7 @@ import java.security.cert.X509Certificate; import java.util.LinkedHashSet; import java.util.List; +import java.util.Objects; import java.util.Set; import static com.google.common.collect.Lists.newArrayList; @@ -69,7 +73,7 @@ * OPC UA Server builder for IoTDB to send data. The coding style referenced ExampleServer.java in * Eclipse Milo. */ -public class OpcUaServerBuilder { +public class OpcUaServerBuilder implements Closeable { private static final Logger LOGGER = LoggerFactory.getLogger(OpcUaServerBuilder.class); private static final String WILD_CARD_ADDRESS = "0.0.0.0"; @@ -79,41 +83,49 @@ public class OpcUaServerBuilder { private String user; private String password; private Path securityDir; - - public OpcUaServerBuilder() { - tcpBindPort = PipeConnectorConstant.CONNECTOR_OPC_UA_TCP_BIND_PORT_DEFAULT_VALUE; - httpsBindPort = PipeConnectorConstant.CONNECTOR_OPC_UA_HTTPS_BIND_PORT_DEFAULT_VALUE; - user = PipeConnectorConstant.CONNECTOR_IOTDB_USER_DEFAULT_VALUE; - password = PipeConnectorConstant.CONNECTOR_IOTDB_PASSWORD_DEFAULT_VALUE; - securityDir = Paths.get(PipeConnectorConstant.CONNECTOR_OPC_UA_SECURITY_DIR_DEFAULT_VALUE); + private boolean enableAnonymousAccess; + private DefaultTrustListManager trustListManager; + + OpcUaServerBuilder() { + tcpBindPort = PipeSinkConstant.CONNECTOR_OPC_UA_TCP_BIND_PORT_DEFAULT_VALUE; + httpsBindPort = PipeSinkConstant.CONNECTOR_OPC_UA_HTTPS_BIND_PORT_DEFAULT_VALUE; + user = PipeSinkConstant.CONNECTOR_IOTDB_USER_DEFAULT_VALUE; + password = PipeSinkConstant.CONNECTOR_IOTDB_PASSWORD_DEFAULT_VALUE; + securityDir = Paths.get(PipeSinkConstant.CONNECTOR_OPC_UA_SECURITY_DIR_DEFAULT_VALUE); + enableAnonymousAccess = PipeSinkConstant.CONNECTOR_OPC_UA_ENABLE_ANONYMOUS_ACCESS_DEFAULT_VALUE; } - public OpcUaServerBuilder setTcpBindPort(int tcpBindPort) { + OpcUaServerBuilder setTcpBindPort(final int tcpBindPort) { this.tcpBindPort = tcpBindPort; return this; } - public OpcUaServerBuilder setHttpsBindPort(int httpsBindPort) { + OpcUaServerBuilder setHttpsBindPort(final int httpsBindPort) { this.httpsBindPort = httpsBindPort; return this; } - public OpcUaServerBuilder setUser(String user) { + OpcUaServerBuilder setUser(final String user) { this.user = user; return this; } - public OpcUaServerBuilder setPassword(String password) { + OpcUaServerBuilder setPassword(final String password) { this.password = password; return this; } - public OpcUaServerBuilder setSecurityDir(String securityDir) { + OpcUaServerBuilder setSecurityDir(final String securityDir) { this.securityDir = Paths.get(securityDir); return this; } - public OpcUaServer build() throws Exception { + OpcUaServerBuilder setEnableAnonymousAccess(final boolean enableAnonymousAccess) { + this.enableAnonymousAccess = enableAnonymousAccess; + return this; + } + + OpcUaServer build() throws Exception { Files.createDirectories(securityDir); if (!Files.exists(securityDir)) { throw new PipeException("Unable to create security dir: " + securityDir); @@ -133,76 +145,73 @@ public OpcUaServer build() throws Exception { new DefaultCertificateManager(loader.getServerKeyPair(), loader.getServerCertificate()); final OpcUaServerConfig serverConfig; - try (DefaultTrustListManager trustListManager = new DefaultTrustListManager(pkiDir)) { - LOGGER.info( - "Certificate directory is: {}, Please move certificates from the reject dir to the trusted directory to allow encrypted access", - pkiDir.getAbsolutePath()); - - final KeyPair httpsKeyPair = SelfSignedCertificateGenerator.generateRsaKeyPair(2048); - - final SelfSignedHttpsCertificateBuilder httpsCertificateBuilder = - new SelfSignedHttpsCertificateBuilder(httpsKeyPair); - httpsCertificateBuilder.setCommonName(HostnameUtil.getHostname()); - HostnameUtil.getHostnames(WILD_CARD_ADDRESS).forEach(httpsCertificateBuilder::addDnsName); - final X509Certificate httpsCertificate = httpsCertificateBuilder.build(); - - final DefaultServerCertificateValidator certificateValidator = - new DefaultServerCertificateValidator(trustListManager); - - final UsernameIdentityValidator identityValidator = - new UsernameIdentityValidator( - true, - authChallenge -> { - String inputUsername = authChallenge.getUsername(); - String inputPassword = authChallenge.getPassword(); - - return inputUsername.equals(user) && inputPassword.equals(password); - }); - - final X509IdentityValidator x509IdentityValidator = new X509IdentityValidator(c -> true); - - final X509Certificate certificate = - certificateManager.getCertificates().stream() - .findFirst() - .orElseThrow( - () -> - new UaRuntimeException( - StatusCodes.Bad_ConfigurationError, "No certificate found")); - - final String applicationUri = - CertificateUtil.getSanUri(certificate) - .orElseThrow( - () -> - new UaRuntimeException( - StatusCodes.Bad_ConfigurationError, - "Certificate is missing the application URI")); - - final Set endpointConfigurations = - createEndpointConfigurations(certificate, tcpBindPort, httpsBindPort); - - serverConfig = - OpcUaServerConfig.builder() - .setApplicationUri(applicationUri) - .setApplicationName(LocalizedText.english("Apache IoTDB OPC UA server")) - .setEndpoints(endpointConfigurations) - .setBuildInfo( - new BuildInfo( - "urn:apache:iotdb:opc-ua-server", - "apache", - "Apache IoTDB OPC UA server", - OpcUaServer.SDK_VERSION, - "", - DateTime.now())) - .setCertificateManager(certificateManager) - .setTrustListManager(trustListManager) - .setCertificateValidator(certificateValidator) - .setHttpsKeyPair(httpsKeyPair) - .setHttpsCertificateChain(new X509Certificate[] {httpsCertificate}) - .setIdentityValidator( - new CompositeValidator(identityValidator, x509IdentityValidator)) - .setProductUri("urn:apache:iotdb:opc-ua-server") - .build(); - } + + trustListManager = new DefaultTrustListManager(pkiDir); + + LOGGER.info( + "Certificate directory is: {}, Please move certificates from the reject dir to the trusted directory to allow encrypted access", + pkiDir.getAbsolutePath()); + + final KeyPair httpsKeyPair = SelfSignedCertificateGenerator.generateRsaKeyPair(2048); + + final SelfSignedHttpsCertificateBuilder httpsCertificateBuilder = + new SelfSignedHttpsCertificateBuilder(httpsKeyPair); + httpsCertificateBuilder.setCommonName(HostnameUtil.getHostname()); + HostnameUtil.getHostnames(WILD_CARD_ADDRESS).forEach(httpsCertificateBuilder::addDnsName); + final X509Certificate httpsCertificate = httpsCertificateBuilder.build(); + + final DefaultServerCertificateValidator certificateValidator = + new DefaultServerCertificateValidator(trustListManager); + + final UsernameIdentityValidator identityValidator = + new UsernameIdentityValidator( + enableAnonymousAccess, + authChallenge -> + authChallenge.getUsername().equals(user) + && authChallenge.getPassword().equals(password)); + + final X509IdentityValidator x509IdentityValidator = new X509IdentityValidator(c -> true); + + final X509Certificate certificate = + certificateManager.getCertificates().stream() + .findFirst() + .orElseThrow( + () -> + new UaRuntimeException( + StatusCodes.Bad_ConfigurationError, "No certificate found")); + + final String applicationUri = + CertificateUtil.getSanUri(certificate) + .orElseThrow( + () -> + new UaRuntimeException( + StatusCodes.Bad_ConfigurationError, + "Certificate is missing the application URI")); + + final Set endpointConfigurations = + createEndpointConfigurations(certificate, tcpBindPort, httpsBindPort); + + serverConfig = + OpcUaServerConfig.builder() + .setApplicationUri(applicationUri) + .setApplicationName(LocalizedText.english("Apache IoTDB OPC UA server")) + .setEndpoints(endpointConfigurations) + .setBuildInfo( + new BuildInfo( + "urn:apache:iotdb:opc-ua-server", + "apache", + "Apache IoTDB OPC UA server", + OpcUaServer.SDK_VERSION, + "", + DateTime.now())) + .setCertificateManager(certificateManager) + .setTrustListManager(trustListManager) + .setCertificateValidator(certificateValidator) + .setHttpsKeyPair(httpsKeyPair) + .setHttpsCertificateChain(new X509Certificate[] {httpsCertificate}) + .setIdentityValidator(new CompositeValidator(identityValidator, x509IdentityValidator)) + .setProductUri("urn:apache:iotdb:opc-ua-server") + .build(); // Setup server to enable event posting final OpcUaServer server = new OpcUaServer(serverConfig); @@ -215,7 +224,7 @@ public OpcUaServer build() throws Exception { } private Set createEndpointConfigurations( - X509Certificate certificate, int tcpBindPort, int httpsBindPort) { + final X509Certificate certificate, final int tcpBindPort, final int httpsBindPort) { final Set endpointConfigurations = new LinkedHashSet<>(); final List bindAddresses = newArrayList(); @@ -225,8 +234,8 @@ private Set createEndpointConfigurations( hostnames.add(HostnameUtil.getHostname()); hostnames.addAll(HostnameUtil.getHostnames(WILD_CARD_ADDRESS)); - for (String bindAddress : bindAddresses) { - for (String hostname : hostnames) { + for (final String bindAddress : bindAddresses) { + for (final String hostname : hostnames) { final EndpointConfiguration.Builder builder = EndpointConfiguration.newBuilder() .setBindAddress(bindAddress) @@ -279,7 +288,7 @@ private Set createEndpointConfigurations( } private EndpointConfiguration buildTcpEndpoint( - EndpointConfiguration.Builder base, int tcpBindPort) { + final EndpointConfiguration.Builder base, final int tcpBindPort) { return base.copy() .setTransportProfile(TransportProfile.TCP_UASC_UABINARY) .setBindPort(tcpBindPort) @@ -287,10 +296,46 @@ private EndpointConfiguration buildTcpEndpoint( } private EndpointConfiguration buildHttpsEndpoint( - EndpointConfiguration.Builder base, int httpsBindPort) { + final EndpointConfiguration.Builder base, final int httpsBindPort) { return base.copy() .setTransportProfile(TransportProfile.HTTPS_UABINARY) .setBindPort(httpsBindPort) .build(); } + + /////////////////////////////// Conflict detection /////////////////////////////// + + void checkEquals( + final String user, + final String password, + final Path securityDir, + final boolean enableAnonymousAccess) { + checkEquals("user", this.user, user); + checkEquals("password", this.password, password); + checkEquals( + "security dir", + FileSystems.getDefault().getPath(this.securityDir.toAbsolutePath().toString()), + FileSystems.getDefault().getPath(securityDir.toAbsolutePath().toString())); + checkEquals("enableAnonymousAccess option", this.enableAnonymousAccess, enableAnonymousAccess); + } + + private void checkEquals(final String attrName, final Object thisAttr, final Object thatAttr) { + if (!Objects.equals(thisAttr, thatAttr)) { + throw new PipeException( + String.format( + "The existing server with tcp port %s and https port %s's %s %s conflicts to the new %s %s, reject reusing.", + tcpBindPort, httpsBindPort, attrName, thisAttr, attrName, thatAttr)); + } + } + + @Override + public void close() { + if (Objects.nonNull(trustListManager)) { + try { + trustListManager.close(); + } catch (final IOException e) { + LOGGER.warn("Failed to close trustListManager, because {}.", e.getMessage()); + } + } + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/OpcUaSink.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/OpcUaSink.java new file mode 100644 index 0000000000000..5ce35e750538e --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/OpcUaSink.java @@ -0,0 +1,303 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.sink.protocol.opcua; + +import org.apache.iotdb.db.conf.IoTDBConfig; +import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; +import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent; +import org.apache.iotdb.pipe.api.PipeConnector; +import org.apache.iotdb.pipe.api.customizer.configuration.PipeConnectorRuntimeConfiguration; +import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator; +import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; +import org.apache.iotdb.pipe.api.event.Event; +import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; +import org.apache.iotdb.pipe.api.exception.PipeException; + +import org.apache.tsfile.utils.Pair; +import org.apache.tsfile.write.record.Tablet; +import org.eclipse.milo.opcua.sdk.server.OpcUaServer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.util.Arrays; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_PASSWORD_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_PASSWORD_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_USERNAME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_USER_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_IOTDB_USER_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_OPC_UA_ENABLE_ANONYMOUS_ACCESS_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_OPC_UA_ENABLE_ANONYMOUS_ACCESS_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_OPC_UA_HTTPS_BIND_PORT_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_OPC_UA_HTTPS_BIND_PORT_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_OPC_UA_MODEL_CLIENT_SERVER_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_OPC_UA_MODEL_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_OPC_UA_MODEL_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_OPC_UA_MODEL_PUB_SUB_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_OPC_UA_SECURITY_DIR_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_OPC_UA_SECURITY_DIR_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_OPC_UA_TCP_BIND_PORT_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_OPC_UA_TCP_BIND_PORT_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_IOTDB_PASSWORD_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_IOTDB_USERNAME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_IOTDB_USER_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_OPC_UA_ENABLE_ANONYMOUS_ACCESS_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_OPC_UA_HTTPS_BIND_PORT_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_OPC_UA_MODEL_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_OPC_UA_SECURITY_DIR_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_OPC_UA_TCP_BIND_PORT_KEY; + +/** + * Send data in IoTDB based on Opc Ua protocol, using Eclipse Milo. All data are converted into + * tablets, and then: + * + *

1. In pub-sub mode, converted to eventNodes to send to the subscriber clients. + * + *

2. In client-server mode, push the newest value to the local server. + */ +public class OpcUaSink implements PipeConnector { + + private static final Logger LOGGER = LoggerFactory.getLogger(OpcUaSink.class); + + private static final Map> + SERVER_KEY_TO_REFERENCE_COUNT_AND_NAME_SPACE_MAP = new ConcurrentHashMap<>(); + + private String serverKey; + private OpcUaNameSpace nameSpace; + + @Override + public void validate(final PipeParameterValidator validator) throws Exception { + validator + .validateAttributeValueRange( + CONNECTOR_OPC_UA_MODEL_KEY, + true, + CONNECTOR_OPC_UA_MODEL_CLIENT_SERVER_VALUE, + CONNECTOR_OPC_UA_MODEL_PUB_SUB_VALUE) + .validateAttributeValueRange( + SINK_OPC_UA_MODEL_KEY, + true, + CONNECTOR_OPC_UA_MODEL_CLIENT_SERVER_VALUE, + CONNECTOR_OPC_UA_MODEL_PUB_SUB_VALUE) + .validateSynonymAttributes( + Arrays.asList(CONNECTOR_IOTDB_USER_KEY, SINK_IOTDB_USER_KEY), + Arrays.asList(CONNECTOR_IOTDB_USERNAME_KEY, SINK_IOTDB_USERNAME_KEY), + false); + ; + } + + @Override + public void customize( + final PipeParameters parameters, final PipeConnectorRuntimeConfiguration configuration) + throws Exception { + final int tcpBindPort = + parameters.getIntOrDefault( + Arrays.asList(CONNECTOR_OPC_UA_TCP_BIND_PORT_KEY, SINK_OPC_UA_TCP_BIND_PORT_KEY), + CONNECTOR_OPC_UA_TCP_BIND_PORT_DEFAULT_VALUE); + final int httpsBindPort = + parameters.getIntOrDefault( + Arrays.asList(CONNECTOR_OPC_UA_HTTPS_BIND_PORT_KEY, SINK_OPC_UA_HTTPS_BIND_PORT_KEY), + CONNECTOR_OPC_UA_HTTPS_BIND_PORT_DEFAULT_VALUE); + + final String user = + parameters.getStringOrDefault( + Arrays.asList( + CONNECTOR_IOTDB_USER_KEY, + SINK_IOTDB_USER_KEY, + CONNECTOR_IOTDB_USERNAME_KEY, + SINK_IOTDB_USERNAME_KEY), + CONNECTOR_IOTDB_USER_DEFAULT_VALUE); + final String password = + parameters.getStringOrDefault( + Arrays.asList(CONNECTOR_IOTDB_PASSWORD_KEY, SINK_IOTDB_PASSWORD_KEY), + CONNECTOR_IOTDB_PASSWORD_DEFAULT_VALUE); + final String securityDir = + IoTDBConfig.addDataHomeDir( + parameters.getStringOrDefault( + Arrays.asList(CONNECTOR_OPC_UA_SECURITY_DIR_KEY, SINK_OPC_UA_SECURITY_DIR_KEY), + CONNECTOR_OPC_UA_SECURITY_DIR_DEFAULT_VALUE + + File.separatorChar + + httpsBindPort + + "_" + + tcpBindPort)); + final boolean enableAnonymousAccess = + parameters.getBooleanOrDefault( + Arrays.asList( + CONNECTOR_OPC_UA_ENABLE_ANONYMOUS_ACCESS_KEY, + SINK_OPC_UA_ENABLE_ANONYMOUS_ACCESS_KEY), + CONNECTOR_OPC_UA_ENABLE_ANONYMOUS_ACCESS_DEFAULT_VALUE); + + synchronized (SERVER_KEY_TO_REFERENCE_COUNT_AND_NAME_SPACE_MAP) { + serverKey = httpsBindPort + ":" + tcpBindPort; + + nameSpace = + SERVER_KEY_TO_REFERENCE_COUNT_AND_NAME_SPACE_MAP + .compute( + serverKey, + (key, oldValue) -> { + try { + if (Objects.isNull(oldValue)) { + final OpcUaServerBuilder builder = + new OpcUaServerBuilder() + .setTcpBindPort(tcpBindPort) + .setHttpsBindPort(httpsBindPort) + .setUser(user) + .setPassword(password) + .setSecurityDir(securityDir) + .setEnableAnonymousAccess(enableAnonymousAccess); + final OpcUaServer newServer = builder.build(); + nameSpace = + new OpcUaNameSpace( + newServer, + parameters + .getStringOrDefault( + Arrays.asList( + CONNECTOR_OPC_UA_MODEL_KEY, SINK_OPC_UA_MODEL_KEY), + CONNECTOR_OPC_UA_MODEL_DEFAULT_VALUE) + .equals(CONNECTOR_OPC_UA_MODEL_CLIENT_SERVER_VALUE), + builder); + nameSpace.startup(); + newServer.startup().get(); + return new Pair<>(new AtomicInteger(0), nameSpace); + } else { + oldValue + .getRight() + .checkEquals(user, password, securityDir, enableAnonymousAccess); + return oldValue; + } + } catch (final PipeException e) { + throw e; + } catch (final Exception e) { + throw new PipeException("Failed to build and startup OpcUaServer", e); + } + }) + .getRight(); + SERVER_KEY_TO_REFERENCE_COUNT_AND_NAME_SPACE_MAP.get(serverKey).getLeft().incrementAndGet(); + } + } + + @Override + public void handshake() throws Exception { + // Server side, do nothing + } + + @Override + public void heartbeat() throws Exception { + // Server side, do nothing + } + + @Override + public void transfer(final Event event) throws Exception { + // Do nothing when receive heartbeat or other events + } + + @Override + public void transfer(final TabletInsertionEvent tabletInsertionEvent) throws Exception { + transferByTablet(tabletInsertionEvent, LOGGER, tablet -> nameSpace.transfer(tablet)); + } + + public static void transferByTablet( + final TabletInsertionEvent tabletInsertionEvent, + final Logger logger, + final ThrowingConsumer transferTablet) + throws Exception { + // PipeProcessor can change the type of TabletInsertionEvent + if (!(tabletInsertionEvent instanceof PipeInsertNodeTabletInsertionEvent) + && !(tabletInsertionEvent instanceof PipeRawTabletInsertionEvent)) { + logger.warn( + "This Connector only support " + + "PipeInsertNodeTabletInsertionEvent and PipeRawTabletInsertionEvent. " + + "Ignore {}.", + tabletInsertionEvent); + return; + } + + if (tabletInsertionEvent instanceof PipeInsertNodeTabletInsertionEvent) { + transferTabletWrapper( + (PipeInsertNodeTabletInsertionEvent) tabletInsertionEvent, transferTablet); + } else { + transferTabletWrapper((PipeRawTabletInsertionEvent) tabletInsertionEvent, transferTablet); + } + } + + private static void transferTabletWrapper( + final PipeInsertNodeTabletInsertionEvent pipeInsertNodeTabletInsertionEvent, + final ThrowingConsumer transferTablet) + throws Exception { + // We increase the reference count for this event to determine if the event may be released. + if (!pipeInsertNodeTabletInsertionEvent.increaseReferenceCount(OpcUaSink.class.getName())) { + return; + } + try { + for (final Tablet tablet : pipeInsertNodeTabletInsertionEvent.convertToTablets()) { + transferTablet.accept(tablet); + } + } finally { + pipeInsertNodeTabletInsertionEvent.decreaseReferenceCount(OpcUaSink.class.getName(), false); + } + } + + private static void transferTabletWrapper( + final PipeRawTabletInsertionEvent pipeRawTabletInsertionEvent, + final ThrowingConsumer transferTablet) + throws Exception { + // We increase the reference count for this event to determine if the event may be released. + if (!pipeRawTabletInsertionEvent.increaseReferenceCount(OpcUaSink.class.getName())) { + return; + } + try { + transferTablet.accept(pipeRawTabletInsertionEvent.convertToTablet()); + } finally { + pipeRawTabletInsertionEvent.decreaseReferenceCount(OpcUaSink.class.getName(), false); + } + } + + @FunctionalInterface + public interface ThrowingConsumer { + void accept(final T t) throws E; + } + + @Override + public void close() throws Exception { + if (serverKey == null) { + return; + } + + synchronized (SERVER_KEY_TO_REFERENCE_COUNT_AND_NAME_SPACE_MAP) { + final Pair pair = + SERVER_KEY_TO_REFERENCE_COUNT_AND_NAME_SPACE_MAP.get(serverKey); + if (pair == null) { + return; + } + + if (pair.getLeft().decrementAndGet() <= 0) { + try { + pair.getRight().shutdown(); + } finally { + SERVER_KEY_TO_REFERENCE_COUNT_AND_NAME_SPACE_MAP.remove(serverKey); + } + } + } + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/PipeConsensusAsyncConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/pipeconsensus/PipeConsensusAsyncSink.java similarity index 85% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/PipeConsensusAsyncConnector.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/pipeconsensus/PipeConsensusAsyncSink.java index 7520f217df36f..a34781dee2efe 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/PipeConsensusAsyncConnector.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/pipeconsensus/PipeConsensusAsyncSink.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus; +package org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus; import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; @@ -27,29 +27,29 @@ import org.apache.iotdb.commons.client.container.PipeConsensusClientMgrContainer; import org.apache.iotdb.commons.consensus.ConsensusGroupId; import org.apache.iotdb.commons.consensus.index.ProgressIndex; -import org.apache.iotdb.commons.exception.pipe.PipeRuntimeConnectorRetryTimesConfigurableException; -import org.apache.iotdb.commons.pipe.connector.protocol.IoTDBConnector; +import org.apache.iotdb.commons.exception.pipe.PipeRuntimeSinkRetryTimesConfigurableException; +import org.apache.iotdb.commons.pipe.agent.task.progress.PipeEventCommitManager; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.commons.pipe.progress.PipeEventCommitManager; +import org.apache.iotdb.commons.pipe.sink.protocol.IoTDBSink; import org.apache.iotdb.commons.service.metric.MetricService; -import org.apache.iotdb.consensus.pipe.consensuspipe.ConsensusPipeConnector; +import org.apache.iotdb.consensus.pipe.consensuspipe.ConsensusPipeSink; import org.apache.iotdb.consensus.pipe.metric.PipeConsensusSyncLagManager; import org.apache.iotdb.consensus.pipe.thrift.TCommitId; import org.apache.iotdb.consensus.pipe.thrift.TPipeConsensusTransferReq; import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.handler.PipeConsensusTabletBatchEventHandler; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.handler.PipeConsensusTabletInsertNodeEventHandler; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.handler.PipeConsensusTsFileInsertionEventHandler; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.payload.builder.PipeConsensusAsyncBatchReqBuilder; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.payload.request.PipeConsensusTabletBinaryReq; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.payload.request.PipeConsensusTabletInsertNodeReq; -import org.apache.iotdb.db.pipe.consensus.PipeConsensusConnectorMetrics; +import org.apache.iotdb.db.pipe.consensus.PipeConsensusSinkMetrics; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.handler.PipeConsensusTabletBatchEventHandler; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.handler.PipeConsensusTabletInsertNodeEventHandler; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.handler.PipeConsensusTsFileInsertionEventHandler; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.payload.builder.PipeConsensusAsyncBatchReqBuilder; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.payload.request.PipeConsensusTabletBinaryReq; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.payload.request.PipeConsensusTabletInsertNodeReq; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; import org.apache.iotdb.pipe.api.customizer.configuration.PipeConnectorRuntimeConfiguration; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator; @@ -71,13 +71,13 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_CONSENSUS_GROUP_ID_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_CONSENSUS_PIPE_NAME; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_CONSENSUS_GROUP_ID_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_CONSENSUS_PIPE_NAME; // TODO: Optimize the network and disk io for TsFile onComplete // TODO: support Tablet Batch -public class PipeConsensusAsyncConnector extends IoTDBConnector implements ConsensusPipeConnector { - private static final Logger LOGGER = LoggerFactory.getLogger(PipeConsensusAsyncConnector.class); +public class PipeConsensusAsyncSink extends IoTDBSink implements ConsensusPipeSink { + private static final Logger LOGGER = LoggerFactory.getLogger(PipeConsensusAsyncSink.class); private static final String ENQUEUE_EXCEPTION_MSG = "Timeout: PipeConsensusConnector offers an event into transferBuffer failed, because transferBuffer is full."; private static final String THRIFT_ERROR_FORMATTER_WITHOUT_ENDPOINT = @@ -95,10 +95,10 @@ public class PipeConsensusAsyncConnector extends IoTDBConnector implements Conse new LinkedBlockingDeque<>(IOTDB_CONFIG.getPipeConsensusPipelineSize()); private final AtomicBoolean isClosed = new AtomicBoolean(false); private final int thisDataNodeId = IoTDBDescriptor.getInstance().getConfig().getDataNodeId(); - private PipeConsensusConnectorMetrics pipeConsensusConnectorMetrics; + private PipeConsensusSinkMetrics pipeConsensusSinkMetrics; private String consensusPipeName; private int consensusGroupId; - private PipeConsensusSyncConnector retryConnector; + private PipeConsensusSyncSink retryConnector; private IClientManager asyncTransferClientManager; private PipeConsensusAsyncBatchReqBuilder tabletBatchBuilder; private volatile long currentReplicateProgress = 0; @@ -128,17 +128,17 @@ public void customize(PipeParameters parameters, PipeConnectorRuntimeConfigurati consensusPipeName = parameters.getString(CONNECTOR_CONSENSUS_PIPE_NAME); // initialize metric components - pipeConsensusConnectorMetrics = new PipeConsensusConnectorMetrics(this); + pipeConsensusSinkMetrics = new PipeConsensusSinkMetrics(this); PipeConsensusSyncLagManager.getInstance(getConsensusGroupIdStr()) .addConsensusPipeConnector(this); - MetricService.getInstance().addMetricSet(this.pipeConsensusConnectorMetrics); + MetricService.getInstance().addMetricSet(this.pipeConsensusSinkMetrics); // In PipeConsensus, one pipeConsensusTask corresponds to a pipeConsensusConnector. Thus, // `nodeUrls` here actually is a singletonList that contains one peer's TEndPoint. But here we // retain the implementation of list to cope with possible future expansion retryConnector = - new PipeConsensusSyncConnector( - nodeUrls, consensusGroupId, thisDataNodeId, pipeConsensusConnectorMetrics); + new PipeConsensusSyncSink( + nodeUrls, consensusGroupId, thisDataNodeId, pipeConsensusSinkMetrics); retryConnector.customize(parameters, configuration); asyncTransferClientManager = PipeConsensusClientMgrContainer.getInstance().getAsyncClientManager(); @@ -167,20 +167,25 @@ private boolean addEvent2Buffer(EnrichedEvent event) { event.getCommitId(), event); } + // Special judge to avoid transfer stuck when re-transfer events that will not be put in + // retryQueue. + if (transferBuffer.contains(event)) { + return true; + } long currentTime = System.nanoTime(); boolean result = transferBuffer.offer( event, PIPE_CONSENSUS_EVENT_ENQUEUE_TIMEOUT_IN_MS, TimeUnit.MILLISECONDS); long duration = System.nanoTime() - currentTime; - pipeConsensusConnectorMetrics.recordConnectorEnqueueTimer(duration); + pipeConsensusSinkMetrics.recordConnectorEnqueueTimer(duration); // add reference if (result) { - event.increaseReferenceCount(PipeConsensusAsyncConnector.class.getName()); + event.increaseReferenceCount(PipeConsensusAsyncSink.class.getName()); } // if connector is closed when executing this method, need to clear this event's reference // count to avoid unnecessarily pinning some resource such as WAL. if (isClosed.get()) { - event.clearReferenceCount(PipeConsensusAsyncConnector.class.getName()); + event.clearReferenceCount(PipeConsensusAsyncSink.class.getName()); } return result; } catch (InterruptedException e) { @@ -203,6 +208,13 @@ public synchronized void removeEventFromBuffer(EnrichedEvent event) { transferBuffer.size(), IOTDB_CONFIG.getPipeConsensusPipelineSize()); } + if (transferBuffer.isEmpty()) { + LOGGER.info( + "PipeConsensus-ConsensusGroup-{}: try to remove event-{} after pipeConsensusAsyncConnector being closed. Ignore it.", + consensusGroupId, + event); + return; + } Iterator iterator = transferBuffer.iterator(); EnrichedEvent current = iterator.next(); while (!current.equalsInPipeConsensus(event) && iterator.hasNext()) { @@ -212,7 +224,7 @@ public synchronized void removeEventFromBuffer(EnrichedEvent event) { // update replicate progress currentReplicateProgress = Math.max(currentReplicateProgress, event.getCommitId()); // decrease reference count - event.decreaseReferenceCount(PipeConsensusAsyncConnector.class.getName(), true); + event.decreaseReferenceCount(PipeConsensusAsyncSink.class.getName(), true); } @Override @@ -233,7 +245,7 @@ public void transfer(TabletInsertionEvent tabletInsertionEvent) throws Exception boolean enqueueResult = addEvent2Buffer((EnrichedEvent) tabletInsertionEvent); if (!enqueueResult) { - throw new PipeRuntimeConnectorRetryTimesConfigurableException( + throw new PipeRuntimeSinkRetryTimesConfigurableException( ENQUEUE_EXCEPTION_MSG, Integer.MAX_VALUE); } // batch transfer tablets. @@ -241,7 +253,7 @@ public void transfer(TabletInsertionEvent tabletInsertionEvent) throws Exception if (tabletBatchBuilder.onEvent(tabletInsertionEvent)) { final PipeConsensusTabletBatchEventHandler pipeConsensusTabletBatchEventHandler = new PipeConsensusTabletBatchEventHandler( - tabletBatchBuilder, this, pipeConsensusConnectorMetrics); + tabletBatchBuilder, this, pipeConsensusSinkMetrics); transfer(pipeConsensusTabletBatchEventHandler); @@ -261,14 +273,11 @@ public void transfer(TabletInsertionEvent tabletInsertionEvent) throws Exception // We increase the reference count for this event to determine if the event may be released. if (!pipeInsertNodeTabletInsertionEvent.increaseReferenceCount( - PipeConsensusAsyncConnector.class.getName())) { - pipeInsertNodeTabletInsertionEvent.decreaseReferenceCount( - PipeConsensusAsyncConnector.class.getName(), false); + PipeConsensusAsyncSink.class.getName())) { return; } - final InsertNode insertNode = - pipeInsertNodeTabletInsertionEvent.getInsertNodeViaCacheIfPossible(); + final InsertNode insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNode(); final ProgressIndex progressIndex = pipeInsertNodeTabletInsertionEvent.getProgressIndex(); final TPipeConsensusTransferReq pipeConsensusTransferReq = Objects.isNull(insertNode) @@ -285,7 +294,7 @@ public void transfer(TabletInsertionEvent tabletInsertionEvent) throws Exception pipeInsertNodeTabletInsertionEvent, pipeConsensusTransferReq, this, - pipeConsensusConnectorMetrics); + pipeConsensusSinkMetrics); transfer(pipeConsensusInsertNodeReqHandler); } @@ -329,7 +338,7 @@ public void transfer(TsFileInsertionEvent tsFileInsertionEvent) throws Exception boolean enqueueResult = addEvent2Buffer((EnrichedEvent) tsFileInsertionEvent); if (!enqueueResult) { - throw new PipeRuntimeConnectorRetryTimesConfigurableException( + throw new PipeRuntimeSinkRetryTimesConfigurableException( ENQUEUE_EXCEPTION_MSG, Integer.MAX_VALUE); } final PipeTsFileInsertionEvent pipeTsFileInsertionEvent = @@ -340,10 +349,7 @@ public void transfer(TsFileInsertionEvent tsFileInsertionEvent) throws Exception TConsensusGroupId tConsensusGroupId = new TConsensusGroupId(TConsensusGroupType.DataRegion, consensusGroupId); // We increase the reference count for this event to determine if the event may be released. - if (!pipeTsFileInsertionEvent.increaseReferenceCount( - PipeConsensusAsyncConnector.class.getName())) { - pipeTsFileInsertionEvent.decreaseReferenceCount( - PipeConsensusAsyncConnector.class.getName(), false); + if (!pipeTsFileInsertionEvent.increaseReferenceCount(PipeConsensusAsyncSink.class.getName())) { return; } @@ -360,13 +366,13 @@ public void transfer(TsFileInsertionEvent tsFileInsertionEvent) throws Exception tCommitId, tConsensusGroupId, thisDataNodeId, - pipeConsensusConnectorMetrics); + pipeConsensusSinkMetrics); transfer(pipeConsensusTsFileInsertionEventHandler); } catch (Exception e) { // Just in case. To avoid the case that exception occurred when constructing the handler. pipeTsFileInsertionEvent.decreaseReferenceCount( - PipeConsensusAsyncConnector.class.getName(), false); + PipeConsensusAsyncSink.class.getName(), false); throw e; } } @@ -409,7 +415,7 @@ private void transferBatchedEventsIfNecessary() throws IOException { transfer( new PipeConsensusTabletBatchEventHandler( - tabletBatchBuilder, this, pipeConsensusConnectorMetrics)); + tabletBatchBuilder, this, pipeConsensusSinkMetrics)); tabletBatchBuilder.onSuccess(); } @@ -445,7 +451,7 @@ private void syncTransferQueuedEventsIfNecessary() throws Exception { // release resource if (peekedEvent instanceof EnrichedEvent) { ((EnrichedEvent) peekedEvent) - .decreaseReferenceCount(PipeConsensusAsyncConnector.class.getName(), true); + .decreaseReferenceCount(PipeConsensusAsyncSink.class.getName(), true); } final Event polledEvent = retryEventQueue.poll(); @@ -458,7 +464,7 @@ private void syncTransferQueuedEventsIfNecessary() throws Exception { polledEvent); } } - if (polledEvent != null && LOGGER.isDebugEnabled()) { + if (polledEvent != null) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Polled event {} from retry queue.", polledEvent); } @@ -478,7 +484,7 @@ private void syncTransferQueuedEventsIfNecessary() throws Exception { public void addFailureEventToRetryQueue(final Event event) { if (isClosed.get()) { if (event instanceof EnrichedEvent) { - ((EnrichedEvent) event).clearReferenceCount(PipeConsensusAsyncConnector.class.getName()); + ((EnrichedEvent) event).clearReferenceCount(PipeConsensusAsyncSink.class.getName()); } return; } @@ -493,7 +499,7 @@ public void addFailureEventToRetryQueue(final Event event) { if (isClosed.get()) { if (event instanceof EnrichedEvent) { - ((EnrichedEvent) event).clearReferenceCount(PipeConsensusAsyncConnector.class.getName()); + ((EnrichedEvent) event).clearReferenceCount(PipeConsensusAsyncSink.class.getName()); } } } @@ -513,7 +519,7 @@ public synchronized void clearRetryEventsReferenceCount() { while (!retryEventQueue.isEmpty()) { final Event event = retryEventQueue.poll(); if (event instanceof EnrichedEvent) { - ((EnrichedEvent) event).clearReferenceCount(PipeConsensusAsyncConnector.class.getName()); + ((EnrichedEvent) event).clearReferenceCount(PipeConsensusAsyncSink.class.getName()); } } } @@ -521,7 +527,7 @@ public synchronized void clearRetryEventsReferenceCount() { public synchronized void clearTransferBufferReferenceCount() { while (!transferBuffer.isEmpty()) { final EnrichedEvent event = transferBuffer.poll(); - event.clearReferenceCount(PipeConsensusAsyncConnector.class.getName()); + event.clearReferenceCount(PipeConsensusAsyncSink.class.getName()); } } @@ -561,7 +567,7 @@ public synchronized void close() { PipeConsensusSyncLagManager.getInstance(getConsensusGroupIdStr()) .removeConsensusPipeConnector(this); - MetricService.getInstance().removeMetricSet(this.pipeConsensusConnectorMetrics); + MetricService.getInstance().removeMetricSet(this.pipeConsensusSinkMetrics); } //////////////////////////// APIs provided for metric framework //////////////////////////// @@ -576,10 +582,11 @@ public int getRetryBufferSize() { @Override public long getConsensusPipeCommitProgress() { - long creationTime = PipeDataNodeAgent.task().getPipeCreationTime(consensusPipeName); - String committerKey = - String.format("%s_%s_%s", consensusPipeName, consensusGroupId, creationTime); - return PipeEventCommitManager.getInstance().getGivenConsensusPipeCommitId(committerKey); + return PipeEventCommitManager.getInstance() + .getGivenConsensusPipeCommitId( + consensusPipeName, + PipeDataNodeAgent.task().getPipeCreationTime(consensusPipeName), + consensusGroupId); } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/PipeConsensusSyncConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/pipeconsensus/PipeConsensusSyncSink.java similarity index 85% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/PipeConsensusSyncConnector.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/pipeconsensus/PipeConsensusSyncSink.java index ad7ae47b302b4..7527452c1347a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/PipeConsensusSyncConnector.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/pipeconsensus/PipeConsensusSyncSink.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus; +package org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus; import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; @@ -28,22 +28,22 @@ import org.apache.iotdb.commons.client.sync.SyncPipeConsensusServiceClient; import org.apache.iotdb.commons.consensus.index.ProgressIndex; import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.pipe.connector.payload.pipeconsensus.response.PipeConsensusTransferFilePieceResp; -import org.apache.iotdb.commons.pipe.connector.protocol.IoTDBConnector; +import org.apache.iotdb.commons.pipe.sink.payload.pipeconsensus.response.PipeConsensusTransferFilePieceResp; +import org.apache.iotdb.commons.pipe.sink.protocol.IoTDBSink; import org.apache.iotdb.consensus.pipe.thrift.TCommitId; import org.apache.iotdb.consensus.pipe.thrift.TPipeConsensusBatchTransferResp; import org.apache.iotdb.consensus.pipe.thrift.TPipeConsensusTransferResp; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.payload.builder.PipeConsensusSyncBatchReqBuilder; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.payload.request.PipeConsensusTabletBinaryReq; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.payload.request.PipeConsensusTabletInsertNodeReq; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.payload.request.PipeConsensusTsFilePieceReq; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.payload.request.PipeConsensusTsFilePieceWithModReq; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.payload.request.PipeConsensusTsFileSealReq; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.payload.request.PipeConsensusTsFileSealWithModReq; -import org.apache.iotdb.db.pipe.consensus.PipeConsensusConnectorMetrics; +import org.apache.iotdb.db.pipe.consensus.PipeConsensusSinkMetrics; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.payload.builder.PipeConsensusSyncBatchReqBuilder; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.payload.request.PipeConsensusTabletBinaryReq; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.payload.request.PipeConsensusTabletInsertNodeReq; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.payload.request.PipeConsensusTsFilePieceReq; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.payload.request.PipeConsensusTsFilePieceWithModReq; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.payload.request.PipeConsensusTsFileSealReq; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.payload.request.PipeConsensusTsFileSealWithModReq; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; import org.apache.iotdb.pipe.api.customizer.configuration.PipeConnectorRuntimeConfiguration; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; @@ -65,8 +65,8 @@ import java.util.stream.Collectors; /** This connector is used for PipeConsensus to transfer queued event. */ -public class PipeConsensusSyncConnector extends IoTDBConnector { - private static final Logger LOGGER = LoggerFactory.getLogger(PipeConsensusSyncConnector.class); +public class PipeConsensusSyncSink extends IoTDBSink { + private static final Logger LOGGER = LoggerFactory.getLogger(PipeConsensusSyncSink.class); private static final String PIPE_CONSENSUS_SYNC_CONNECTION_FAILED_FORMAT = "PipeConsensus: syncClient connection to %s:%s failed when %s, because: %s"; private static final String TABLET_INSERTION_NODE_SCENARIO = "transfer insertionNode tablet"; @@ -76,14 +76,14 @@ public class PipeConsensusSyncConnector extends IoTDBConnector { private final List peers; private final int thisDataNodeId; private final int consensusGroupId; - private final PipeConsensusConnectorMetrics pipeConsensusConnectorMetrics; + private final PipeConsensusSinkMetrics pipeConsensusSinkMetrics; private PipeConsensusSyncBatchReqBuilder tabletBatchBuilder; - public PipeConsensusSyncConnector( - List peers, - int consensusGroupId, - int thisDataNodeId, - PipeConsensusConnectorMetrics pipeConsensusConnectorMetrics) { + public PipeConsensusSyncSink( + final List peers, + final int consensusGroupId, + final int thisDataNodeId, + final PipeConsensusSinkMetrics pipeConsensusSinkMetrics) { // In PipeConsensus, one pipeConsensusTask corresponds to a pipeConsensusConnector. Thus, // `peers` here actually is a singletonList that contains one peer's TEndPoint. But here we // retain the implementation of list to cope with possible future expansion @@ -92,11 +92,12 @@ public PipeConsensusSyncConnector( this.thisDataNodeId = thisDataNodeId; this.syncRetryClientManager = PipeConsensusClientMgrContainer.getInstance().getSyncClientManager(); - this.pipeConsensusConnectorMetrics = pipeConsensusConnectorMetrics; + this.pipeConsensusSinkMetrics = pipeConsensusSinkMetrics; } @Override - public void customize(PipeParameters parameters, PipeConnectorRuntimeConfiguration configuration) + public void customize( + final PipeParameters parameters, final PipeConnectorRuntimeConfiguration configuration) throws Exception { super.customize(parameters, configuration); if (isTabletBatchModeEnabled) { @@ -123,7 +124,7 @@ public void heartbeat() throws Exception { } @Override - public void transfer(TabletInsertionEvent tabletInsertionEvent) throws Exception { + public void transfer(final TabletInsertionEvent tabletInsertionEvent) throws Exception { // Note: here we don't need to do type judgment here, because PipeConsensus uses // PIPE_CONSENSUS_PROCESSOR and will not change the event type like // org.apache.iotdb.db.pipe.connector.protocol.thrift.sync.IoTDBDataRegionSyncConnector @@ -133,12 +134,12 @@ public void transfer(TabletInsertionEvent tabletInsertionEvent) throws Exception doTransfer(); } } else { - long startTime = System.nanoTime(); + final long startTime = System.nanoTime(); doTransferWrapper((PipeInsertNodeTabletInsertionEvent) tabletInsertionEvent); - long duration = System.nanoTime() - startTime; - pipeConsensusConnectorMetrics.recordRetryWALTransferTimer(duration); + final long duration = System.nanoTime() - startTime; + pipeConsensusSinkMetrics.recordRetryWALTransferTimer(duration); } - } catch (Exception e) { + } catch (final Exception e) { throw new PipeConnectionException( String.format( "Failed to transfer tablet insertion event %s, because %s.", @@ -148,19 +149,19 @@ public void transfer(TabletInsertionEvent tabletInsertionEvent) throws Exception } @Override - public void transfer(TsFileInsertionEvent tsFileInsertionEvent) throws Exception { + public void transfer(final TsFileInsertionEvent tsFileInsertionEvent) throws Exception { // Note: here we don't need to do type judgment here, because PipeConsensus uses DO_NOTHING // processor and will not change the event type like // org.apache.iotdb.db.pipe.connector.protocol.thrift.sync.IoTDBDataRegionSyncConnector try { - long startTime = System.nanoTime(); + final long startTime = System.nanoTime(); // In order to commit in order if (isTabletBatchModeEnabled && !tabletBatchBuilder.isEmpty()) { doTransfer(); } doTransfer((PipeTsFileInsertionEvent) tsFileInsertionEvent); - long duration = System.nanoTime() - startTime; - pipeConsensusConnectorMetrics.recordRetryTsFileTransferTimer(duration); + final long duration = System.nanoTime() - startTime; + pipeConsensusSinkMetrics.recordRetryTsFileTransferTimer(duration); } catch (Exception e) { throw new PipeConnectionException( String.format( @@ -171,7 +172,7 @@ public void transfer(TsFileInsertionEvent tsFileInsertionEvent) throws Exception } @Override - public void transfer(Event event) throws Exception { + public void transfer(final Event event) throws Exception { // in order to commit in order if (isTabletBatchModeEnabled && !tabletBatchBuilder.isEmpty()) { doTransfer(); @@ -209,7 +210,7 @@ private void doTransfer() { // } tabletBatchBuilder.onSuccess(); - } catch (Exception e) { + } catch (final Exception e) { throw new PipeConnectionException( String.format( PIPE_CONSENSUS_SYNC_CONNECTION_FAILED_FORMAT, @@ -224,16 +225,16 @@ private void doTransfer() { private void doTransferWrapper( final PipeInsertNodeTabletInsertionEvent pipeInsertNodeTabletInsertionEvent) throws PipeException { + // We increase the reference count for this event to determine if the event may be released. + if (!pipeInsertNodeTabletInsertionEvent.increaseReferenceCount( + PipeConsensusSyncSink.class.getName())) { + return; + } try { - // We increase the reference count for this event to determine if the event may be released. - if (!pipeInsertNodeTabletInsertionEvent.increaseReferenceCount( - PipeConsensusSyncConnector.class.getName())) { - return; - } doTransfer(pipeInsertNodeTabletInsertionEvent); } finally { pipeInsertNodeTabletInsertionEvent.decreaseReferenceCount( - PipeConsensusSyncConnector.class.getName(), false); + PipeConsensusSyncSink.class.getName(), false); } } @@ -242,16 +243,16 @@ private void doTransfer(PipeInsertNodeTabletInsertionEvent pipeInsertNodeTabletI final InsertNode insertNode; final ProgressIndex progressIndex; final TPipeConsensusTransferResp resp; - TCommitId tCommitId = + final TCommitId tCommitId = new TCommitId( pipeInsertNodeTabletInsertionEvent.getCommitId(), pipeInsertNodeTabletInsertionEvent.getRebootTimes()); - TConsensusGroupId tConsensusGroupId = + final TConsensusGroupId tConsensusGroupId = new TConsensusGroupId(TConsensusGroupType.DataRegion, consensusGroupId); try (final SyncPipeConsensusServiceClient syncPipeConsensusServiceClient = syncRetryClientManager.borrowClient(getFollowerUrl())) { - insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNodeViaCacheIfPossible(); + insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNode(); progressIndex = pipeInsertNodeTabletInsertionEvent.getProgressIndex(); if (insertNode != null) { @@ -269,7 +270,7 @@ private void doTransfer(PipeInsertNodeTabletInsertionEvent pipeInsertNodeTabletI progressIndex, thisDataNodeId)); } - } catch (Exception e) { + } catch (final Exception e) { throw new PipeConnectionException( String.format( PIPE_CONSENSUS_SYNC_CONNECTION_FAILED_FORMAT, @@ -293,7 +294,8 @@ private void doTransfer(PipeInsertNodeTabletInsertionEvent pipeInsertNodeTabletI } } - private void doTransfer(PipeTsFileInsertionEvent pipeTsFileInsertionEvent) throws PipeException { + private void doTransfer(final PipeTsFileInsertionEvent pipeTsFileInsertionEvent) + throws PipeException { final File tsFile = pipeTsFileInsertionEvent.getTsFile(); final File modFile = pipeTsFileInsertionEvent.getModFile(); final TPipeConsensusTransferResp resp; @@ -340,7 +342,7 @@ private void doTransfer(PipeTsFileInsertionEvent pipeTsFileInsertionEvent) throw pipeTsFileInsertionEvent.getProgressIndex(), thisDataNodeId)); } - } catch (Exception e) { + } catch (final Exception e) { throw new PipeConnectionException( String.format( PIPE_CONSENSUS_SYNC_CONNECTION_FAILED_FORMAT, @@ -365,11 +367,11 @@ private void doTransfer(PipeTsFileInsertionEvent pipeTsFileInsertionEvent) throw } protected void transferFilePieces( - File file, - SyncPipeConsensusServiceClient syncPipeConsensusServiceClient, - boolean isMultiFile, - TCommitId tCommitId, - TConsensusGroupId tConsensusGroupId) + final File file, + final SyncPipeConsensusServiceClient syncPipeConsensusServiceClient, + final boolean isMultiFile, + final TCommitId tCommitId, + final TConsensusGroupId tConsensusGroupId) throws PipeException, IOException { final int readFileBufferSize = PipeConfig.getInstance().getPipeConnectorReadFileBufferSize(); final byte[] readBuffer = new byte[readFileBufferSize]; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/handler/PipeConsensusTabletBatchEventHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/pipeconsensus/handler/PipeConsensusTabletBatchEventHandler.java similarity index 81% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/handler/PipeConsensusTabletBatchEventHandler.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/pipeconsensus/handler/PipeConsensusTabletBatchEventHandler.java index de9c7bec37d31..56d2c9ad0626f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/handler/PipeConsensusTabletBatchEventHandler.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/pipeconsensus/handler/PipeConsensusTabletBatchEventHandler.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.handler; +package org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.handler; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.client.async.AsyncPipeConsensusServiceClient; @@ -25,9 +25,9 @@ import org.apache.iotdb.consensus.pipe.thrift.TPipeConsensusBatchTransferReq; import org.apache.iotdb.consensus.pipe.thrift.TPipeConsensusBatchTransferResp; import org.apache.iotdb.consensus.pipe.thrift.TPipeConsensusTransferResp; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.PipeConsensusAsyncConnector; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.payload.builder.PipeConsensusAsyncBatchReqBuilder; -import org.apache.iotdb.db.pipe.consensus.PipeConsensusConnectorMetrics; +import org.apache.iotdb.db.pipe.consensus.PipeConsensusSinkMetrics; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.PipeConsensusAsyncSink; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.payload.builder.PipeConsensusAsyncBatchReqBuilder; import org.apache.iotdb.pipe.api.event.Event; import org.apache.iotdb.pipe.api.exception.PipeException; import org.apache.iotdb.rpc.TSStatusCode; @@ -48,20 +48,20 @@ public class PipeConsensusTabletBatchEventHandler private final List requestCommitIds; private final List events; private final TPipeConsensusBatchTransferReq req; - private final PipeConsensusAsyncConnector connector; - private final PipeConsensusConnectorMetrics pipeConsensusConnectorMetrics; + private final PipeConsensusAsyncSink connector; + private final PipeConsensusSinkMetrics pipeConsensusSinkMetrics; public PipeConsensusTabletBatchEventHandler( final PipeConsensusAsyncBatchReqBuilder batchBuilder, - final PipeConsensusAsyncConnector connector, - final PipeConsensusConnectorMetrics pipeConsensusConnectorMetrics) + final PipeConsensusAsyncSink connector, + final PipeConsensusSinkMetrics pipeConsensusSinkMetrics) throws IOException { // Deep copy to keep Ids' and events' reference requestCommitIds = batchBuilder.deepCopyRequestCommitIds(); events = batchBuilder.deepCopyEvents(); req = batchBuilder.toTPipeConsensusBatchTransferReq(); - this.pipeConsensusConnectorMetrics = pipeConsensusConnectorMetrics; + this.pipeConsensusSinkMetrics = pipeConsensusSinkMetrics; this.connector = connector; } @@ -90,7 +90,7 @@ public void onComplete(final TPipeConsensusBatchTransferResp response) { .filter(tsStatus -> tsStatus.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) .forEach( tsStatus -> { - pipeConsensusConnectorMetrics.recordRetryCounter(); + pipeConsensusSinkMetrics.recordRetryCounter(); connector .statusHandler() .handle(tsStatus, tsStatus.getMessage(), events.toString()); @@ -117,15 +117,15 @@ public void onComplete(final TPipeConsensusBatchTransferResp response) { @Override public void onError(final Exception exception) { LOGGER.warn( - "PipeConsensus: Failed to transfer TabletInsertionEvent batch {} (request commit ids={}).", + "PipeConsensus: Failed to transfer TabletInsertionEvent batch. Total failed events: {}, related pipe names: {}", + events.size(), events.stream() .map( event -> event instanceof EnrichedEvent - ? ((EnrichedEvent) event).coreReportMessage() - : event.toString()) - .collect(Collectors.toList()), - requestCommitIds, + ? ((EnrichedEvent) event).getPipeName() + : "UNKNOWN") + .collect(Collectors.toSet()), exception); connector.addFailureEventsToRetryQueue(events); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/handler/PipeConsensusTabletInsertNodeEventHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/pipeconsensus/handler/PipeConsensusTabletInsertNodeEventHandler.java similarity index 83% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/handler/PipeConsensusTabletInsertNodeEventHandler.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/pipeconsensus/handler/PipeConsensusTabletInsertNodeEventHandler.java index cdd56d72cce5c..9687f3127281a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/handler/PipeConsensusTabletInsertNodeEventHandler.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/pipeconsensus/handler/PipeConsensusTabletInsertNodeEventHandler.java @@ -17,14 +17,14 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.handler; +package org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.handler; import org.apache.iotdb.commons.client.async.AsyncPipeConsensusServiceClient; import org.apache.iotdb.consensus.pipe.thrift.TPipeConsensusTransferReq; import org.apache.iotdb.consensus.pipe.thrift.TPipeConsensusTransferResp; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.PipeConsensusAsyncConnector; -import org.apache.iotdb.db.pipe.consensus.PipeConsensusConnectorMetrics; +import org.apache.iotdb.db.pipe.consensus.PipeConsensusSinkMetrics; import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.PipeConsensusAsyncSink; import org.apache.thrift.TException; @@ -34,8 +34,8 @@ public class PipeConsensusTabletInsertNodeEventHandler public PipeConsensusTabletInsertNodeEventHandler( PipeInsertNodeTabletInsertionEvent event, TPipeConsensusTransferReq req, - PipeConsensusAsyncConnector connector, - PipeConsensusConnectorMetrics metric) { + PipeConsensusAsyncSink connector, + PipeConsensusSinkMetrics metric) { super(event, req, connector, metric); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/handler/PipeConsensusTabletInsertionEventHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/pipeconsensus/handler/PipeConsensusTabletInsertionEventHandler.java similarity index 88% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/handler/PipeConsensusTabletInsertionEventHandler.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/pipeconsensus/handler/PipeConsensusTabletInsertionEventHandler.java index ae6a1e5334d20..9d027e711f3f4 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/handler/PipeConsensusTabletInsertionEventHandler.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/pipeconsensus/handler/PipeConsensusTabletInsertionEventHandler.java @@ -17,16 +17,16 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.handler; +package org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.handler; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.client.async.AsyncPipeConsensusServiceClient; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; import org.apache.iotdb.consensus.pipe.thrift.TPipeConsensusTransferReq; import org.apache.iotdb.consensus.pipe.thrift.TPipeConsensusTransferResp; -import org.apache.iotdb.db.pipe.connector.protocol.pipeconsensus.PipeConsensusAsyncConnector; -import org.apache.iotdb.db.pipe.connector.protocol.thrift.async.handler.PipeTransferTabletInsertionEventHandler; -import org.apache.iotdb.db.pipe.consensus.PipeConsensusConnectorMetrics; +import org.apache.iotdb.db.pipe.consensus.PipeConsensusSinkMetrics; +import org.apache.iotdb.db.pipe.sink.protocol.pipeconsensus.PipeConsensusAsyncSink; +import org.apache.iotdb.db.pipe.sink.protocol.thrift.async.handler.PipeTransferTabletInsertionEventHandler; import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; import org.apache.iotdb.pipe.api.exception.PipeException; import org.apache.iotdb.rpc.TSStatusCode; @@ -45,17 +45,17 @@ public abstract class PipeConsensusTabletInsertionEventHandler retryEventQueue = new LinkedBlockingQueue<>(); + private final BlockingQueue retryTsFileQueue = new LinkedBlockingQueue<>(); + private final PipeDataRegionEventCounter retryEventQueueEventCounter = + new PipeDataRegionEventCounter(); + + private IoTDBDataNodeAsyncClientManager clientManager; + private IoTDBDataNodeAsyncClientManager transferTsFileClientManager; + + // It is necessary to ensure that other classes that inherit Async Connector will not have NPE + public AtomicInteger transferTsFileCounter = new AtomicInteger(0); + + private PipeTransferBatchReqBuilder tabletBatchBuilder; + + // use these variables to prevent reference count leaks under some corner cases when closing + private final AtomicBoolean isClosed = new AtomicBoolean(false); + private final Map pendingHandlers = + new ConcurrentHashMap<>(); + + private boolean enableSendTsFileLimit; + + @Override + public void validate(final PipeParameterValidator validator) throws Exception { + super.validate(validator); + syncConnector.validate(validator); + + final PipeParameters parameters = validator.getParameters(); + + validator.validate( + args -> !((boolean) args[0] || (boolean) args[1] || (boolean) args[2]), + "Only 'iotdb-thrift-ssl-sink' supports SSL transmission currently.", + parameters.getBooleanOrDefault(SINK_IOTDB_SSL_ENABLE_KEY, false), + parameters.hasAttribute(SINK_IOTDB_SSL_TRUST_STORE_PATH_KEY), + parameters.hasAttribute(SINK_IOTDB_SSL_TRUST_STORE_PWD_KEY)); + } + + @Override + public void customize( + final PipeParameters parameters, final PipeConnectorRuntimeConfiguration configuration) + throws Exception { + super.customize(parameters, configuration); + syncConnector.customize(parameters, configuration); + + clientManager = + new IoTDBDataNodeAsyncClientManager( + nodeUrls, + parameters.getBooleanOrDefault( + Arrays.asList(SINK_LEADER_CACHE_ENABLE_KEY, CONNECTOR_LEADER_CACHE_ENABLE_KEY), + CONNECTOR_LEADER_CACHE_ENABLE_DEFAULT_VALUE), + loadBalanceStrategy, + username, + password, + shouldReceiverConvertOnTypeMismatch, + loadTsFileStrategy, + loadTsFileValidation, + shouldMarkAsPipeRequest, + false); + + transferTsFileClientManager = + new IoTDBDataNodeAsyncClientManager( + nodeUrls, + parameters.getBooleanOrDefault( + Arrays.asList(SINK_LEADER_CACHE_ENABLE_KEY, CONNECTOR_LEADER_CACHE_ENABLE_KEY), + CONNECTOR_LEADER_CACHE_ENABLE_DEFAULT_VALUE), + loadBalanceStrategy, + username, + password, + shouldReceiverConvertOnTypeMismatch, + loadTsFileStrategy, + loadTsFileValidation, + shouldMarkAsPipeRequest, + isSplitTSFileBatchModeEnabled); + + if (isTabletBatchModeEnabled) { + tabletBatchBuilder = new PipeTransferBatchReqBuilder(parameters); + } + + enableSendTsFileLimit = + parameters.getBooleanOrDefault( + Arrays.asList(SINK_ENABLE_SEND_TSFILE_LIMIT, CONNECTOR_ENABLE_SEND_TSFILE_LIMIT), + CONNECTOR_ENABLE_SEND_TSFILE_LIMIT_DEFAULT_VALUE); + } + + @Override + // Synchronized to avoid close connector when transfer event + public synchronized void handshake() throws Exception { + syncConnector.handshake(); + } + + @Override + public void heartbeat() throws Exception { + if (!isClosed()) { + syncConnector.heartbeat(); + } + } + + @Override + public void transfer(final TabletInsertionEvent tabletInsertionEvent) throws Exception { + transferQueuedEventsIfNecessary(false); + + if (!(tabletInsertionEvent instanceof PipeInsertNodeTabletInsertionEvent) + && !(tabletInsertionEvent instanceof PipeRawTabletInsertionEvent)) { + LOGGER.warn( + "IoTDBThriftAsyncConnector only support PipeInsertNodeTabletInsertionEvent and PipeRawTabletInsertionEvent. " + + "Current event: {}.", + tabletInsertionEvent); + return; + } + + if (isTabletBatchModeEnabled) { + tabletBatchBuilder.onEvent(tabletInsertionEvent); + transferBatchedEventsIfNecessary(); + } else { + transferInEventWithoutCheck(tabletInsertionEvent); + } + } + + private void transferInBatchWithoutCheck( + final Pair endPointAndBatch) + throws IOException, WriteProcessException { + if (Objects.isNull(endPointAndBatch)) { + return; + } + final PipeTabletEventBatch batch = endPointAndBatch.getRight(); + + if (batch instanceof PipeTabletEventPlainBatch) { + transfer( + endPointAndBatch.getLeft(), + new PipeTransferTabletBatchEventHandler((PipeTabletEventPlainBatch) batch, this)); + } else if (batch instanceof PipeTabletEventTsFileBatch) { + final PipeTabletEventTsFileBatch tsFileBatch = (PipeTabletEventTsFileBatch) batch; + final List sealedFiles = tsFileBatch.sealTsFiles(); + final Map, Double> pipe2WeightMap = tsFileBatch.deepCopyPipe2WeightMap(); + final List events = tsFileBatch.deepCopyEvents(); + final AtomicInteger eventsReferenceCount = new AtomicInteger(sealedFiles.size()); + final AtomicBoolean eventsHadBeenAddedToRetryQueue = new AtomicBoolean(false); + + try { + for (final File sealedFile : sealedFiles) { + transfer( + new PipeTransferTsFileHandler( + this, + pipe2WeightMap, + events, + eventsReferenceCount, + eventsHadBeenAddedToRetryQueue, + sealedFile, + null, + false)); + } + } catch (final Throwable t) { + LOGGER.warn("Failed to transfer tsfile batch ({}).", sealedFiles, t); + if (eventsHadBeenAddedToRetryQueue.compareAndSet(false, true)) { + addFailureEventsToRetryQueue(events); + } + } + } else { + LOGGER.warn( + "Unsupported batch type {} when transferring tablet insertion event.", batch.getClass()); + } + + endPointAndBatch.getRight().onSuccess(); + } + + private boolean transferInEventWithoutCheck(final TabletInsertionEvent tabletInsertionEvent) + throws Exception { + if (tabletInsertionEvent instanceof PipeInsertNodeTabletInsertionEvent) { + final PipeInsertNodeTabletInsertionEvent pipeInsertNodeTabletInsertionEvent = + (PipeInsertNodeTabletInsertionEvent) tabletInsertionEvent; + // We increase the reference count for this event to determine if the event may be released. + if (!pipeInsertNodeTabletInsertionEvent.increaseReferenceCount( + IoTDBDataRegionAsyncSink.class.getName())) { + return false; + } + + final InsertNode insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNode(); + final TPipeTransferReq pipeTransferReq = + compressIfNeeded( + Objects.isNull(insertNode) + ? PipeTransferTabletBinaryReq.toTPipeTransferReq( + pipeInsertNodeTabletInsertionEvent.getByteBuffer()) + : PipeTransferTabletInsertNodeReq.toTPipeTransferReq(insertNode)); + final PipeTransferTabletInsertNodeEventHandler pipeTransferInsertNodeReqHandler = + new PipeTransferTabletInsertNodeEventHandler( + pipeInsertNodeTabletInsertionEvent, pipeTransferReq, this); + + transfer( + // getDeviceId() may return null for InsertRowsNode + pipeInsertNodeTabletInsertionEvent.getDeviceId(), pipeTransferInsertNodeReqHandler); + } else { // tabletInsertionEvent instanceof PipeRawTabletInsertionEvent + final PipeRawTabletInsertionEvent pipeRawTabletInsertionEvent = + (PipeRawTabletInsertionEvent) tabletInsertionEvent; + // We increase the reference count for this event to determine if the event may be released. + if (!pipeRawTabletInsertionEvent.increaseReferenceCount( + IoTDBDataRegionAsyncSink.class.getName())) { + return false; + } + + final TPipeTransferReq pipeTransferTabletRawReq = + compressIfNeeded( + PipeTransferTabletRawReq.toTPipeTransferReq( + pipeRawTabletInsertionEvent.convertToTablet(), + pipeRawTabletInsertionEvent.isAligned())); + final PipeTransferTabletRawEventHandler pipeTransferTabletReqHandler = + new PipeTransferTabletRawEventHandler( + pipeRawTabletInsertionEvent, pipeTransferTabletRawReq, this); + + transfer(pipeRawTabletInsertionEvent.getDeviceId(), pipeTransferTabletReqHandler); + } + + return true; + } + + private void transfer( + final TEndPoint endPoint, + final PipeTransferTabletBatchEventHandler pipeTransferTabletBatchEventHandler) { + AsyncPipeDataTransferServiceClient client = null; + try { + client = clientManager.borrowClient(endPoint); + pipeTransferTabletBatchEventHandler.transfer(client); + } catch (final Exception ex) { + logOnClientException(client, ex); + pipeTransferTabletBatchEventHandler.onError(ex); + } + } + + private void transfer( + final String deviceId, + final PipeTransferTabletInsertNodeEventHandler pipeTransferInsertNodeReqHandler) { + AsyncPipeDataTransferServiceClient client = null; + try { + client = clientManager.borrowClient(deviceId); + pipeTransferInsertNodeReqHandler.transfer(client); + } catch (final Exception ex) { + logOnClientException(client, ex); + pipeTransferInsertNodeReqHandler.onError(ex); + } + } + + private void transfer( + final String deviceId, final PipeTransferTabletRawEventHandler pipeTransferTabletReqHandler) { + AsyncPipeDataTransferServiceClient client = null; + try { + client = clientManager.borrowClient(deviceId); + pipeTransferTabletReqHandler.transfer(client); + } catch (final Exception ex) { + logOnClientException(client, ex); + pipeTransferTabletReqHandler.onError(ex); + } + } + + @Override + public void transfer(final TsFileInsertionEvent tsFileInsertionEvent) throws Exception { + transferQueuedEventsIfNecessary(false); + transferBatchedEventsIfNecessary(); + + if (!(tsFileInsertionEvent instanceof PipeTsFileInsertionEvent)) { + LOGGER.warn( + "IoTDBThriftAsyncConnector only support PipeTsFileInsertionEvent. Current event: {}.", + tsFileInsertionEvent); + return; + } + + transferWithoutCheck(tsFileInsertionEvent); + } + + private boolean transferWithoutCheck(final TsFileInsertionEvent tsFileInsertionEvent) + throws Exception { + final PipeTsFileInsertionEvent pipeTsFileInsertionEvent = + (PipeTsFileInsertionEvent) tsFileInsertionEvent; + // We increase the reference count for this event to determine if the event may be released. + if (!pipeTsFileInsertionEvent.increaseReferenceCount( + IoTDBDataRegionAsyncSink.class.getName())) { + return false; + } + + // We assume that no exceptions will be thrown after reference count is increased. + try { + // Just in case. To avoid the case that exception occurred when constructing the handler. + if (!pipeTsFileInsertionEvent.getTsFile().exists()) { + throw new FileNotFoundException(pipeTsFileInsertionEvent.getTsFile().getAbsolutePath()); + } + + final PipeTransferTsFileHandler pipeTransferTsFileHandler = + new PipeTransferTsFileHandler( + this, + Collections.singletonMap( + new Pair<>( + pipeTsFileInsertionEvent.getPipeName(), + pipeTsFileInsertionEvent.getCreationTime()), + 1.0), + Collections.singletonList(pipeTsFileInsertionEvent), + new AtomicInteger(1), + new AtomicBoolean(false), + pipeTsFileInsertionEvent.getTsFile(), + pipeTsFileInsertionEvent.getModFile(), + pipeTsFileInsertionEvent.isWithMod() + && clientManager.supportModsIfIsDataNodeReceiver()); + + transfer(pipeTransferTsFileHandler); + return true; + } catch (final Exception e) { + // Just in case. To avoid the case that exception occurred when constructing the handler. + pipeTsFileInsertionEvent.decreaseReferenceCount( + IoTDBDataRegionAsyncSink.class.getName(), false); + throw e; + } + } + + private void transfer(final PipeTransferTsFileHandler pipeTransferTsFileHandler) { + transferTsFileCounter.incrementAndGet(); + CompletableFuture completableFuture = + CompletableFuture.supplyAsync( + () -> { + AsyncPipeDataTransferServiceClient client = null; + try { + client = transferTsFileClientManager.borrowClient(); + pipeTransferTsFileHandler.transfer(transferTsFileClientManager, client); + } catch (final Exception ex) { + logOnClientException(client, ex); + pipeTransferTsFileHandler.onError(ex); + } finally { + transferTsFileCounter.decrementAndGet(); + } + return null; + }, + transferTsFileClientManager.getExecutor()); + + if (PipeConfig.getInstance().isTransferTsFileSync() || !isRealtimeFirst) { + try { + completableFuture.get(); + } catch (final Exception e) { + if (e instanceof InterruptedException) { + Thread.currentThread().interrupt(); + LOGGER.warn( + "Transfer tsfile event {} asynchronously was interrupted.", + pipeTransferTsFileHandler.getTsFile(), + e); + } + + pipeTransferTsFileHandler.onError(e); + LOGGER.warn( + "Failed to transfer tsfile event {} asynchronously.", + pipeTransferTsFileHandler.getTsFile(), + e); + } + } + } + + @Override + public void transfer(final Event event) throws Exception { + transferQueuedEventsIfNecessary(true); + transferBatchedEventsIfNecessary(); + + if (!(event instanceof PipeHeartbeatEvent + || event instanceof PipeSchemaRegionWritePlanEvent + || event instanceof PipeTerminateEvent)) { + LOGGER.warn( + "IoTDBThriftAsyncConnector does not support transferring generic event: {}.", event); + return; + } + + syncConnector.transfer(event); + } + + /** Try its best to commit data in order. Flush can also be a trigger to transfer batched data. */ + private void transferBatchedEventsIfNecessary() throws IOException, WriteProcessException { + if (!isTabletBatchModeEnabled || tabletBatchBuilder.isEmpty()) { + return; + } + + for (final Pair endPointAndBatch : + tabletBatchBuilder.getAllNonEmptyAndShouldEmitBatches()) { + transferInBatchWithoutCheck(endPointAndBatch); + } + } + + @Override + public TPipeTransferReq compressIfNeeded(final TPipeTransferReq req) throws IOException { + if (Objects.isNull(compressionTimer) && Objects.nonNull(attributeSortedString)) { + compressionTimer = + PipeDataRegionSinkMetrics.getInstance().getCompressionTimer(attributeSortedString); + } + return super.compressIfNeeded(req); + } + + //////////////////////////// Leader cache update //////////////////////////// + + public void updateLeaderCache(final String deviceId, final TEndPoint endPoint) { + clientManager.updateLeaderCache(deviceId, endPoint); + } + + //////////////////////////// Exception handlers //////////////////////////// + + private void logOnClientException( + final AsyncPipeDataTransferServiceClient client, final Exception e) { + if (client == null) { + LOGGER.warn(THRIFT_ERROR_FORMATTER_WITHOUT_ENDPOINT); + } else { + client.resetMethodStateIfStopped(); + LOGGER.warn( + String.format(THRIFT_ERROR_FORMATTER_WITH_ENDPOINT, client.getIp(), client.getPort()), e); + } + } + + /** + * Transfer queued {@link Event}s which are waiting for retry. + * + * @see PipeConnector#transfer(Event) for more details. + * @see PipeConnector#transfer(TabletInsertionEvent) for more details. + * @see PipeConnector#transfer(TsFileInsertionEvent) for more details. + */ + private void transferQueuedEventsIfNecessary(final boolean forced) { + if ((retryEventQueue.isEmpty() && retryTsFileQueue.isEmpty()) + || (!forced + && retryEventQueueEventCounter.getTabletInsertionEventCount() + < PipeConfig.getInstance() + .getPipeAsyncConnectorForcedRetryTabletEventQueueSizeThreshold() + && retryEventQueueEventCounter.getTsFileInsertionEventCount() + < PipeConfig.getInstance() + .getPipeAsyncConnectorForcedRetryTsFileEventQueueSizeThreshold() + && retryEventQueue.size() + retryTsFileQueue.size() + < PipeConfig.getInstance() + .getPipeAsyncConnectorForcedRetryTotalEventQueueSizeThreshold())) { + return; + } + + final long retryStartTime = System.currentTimeMillis(); + final int remainingEvents = retryEventQueue.size() + retryTsFileQueue.size(); + while (!retryEventQueue.isEmpty() || !retryTsFileQueue.isEmpty()) { + synchronized (this) { + if (isClosed.get()) { + return; + } + if (retryEventQueue.isEmpty() && retryTsFileQueue.isEmpty()) { + break; + } + + final Event peekedEvent; + final Event polledEvent; + if (!retryEventQueue.isEmpty()) { + peekedEvent = retryEventQueue.peek(); + + if (peekedEvent instanceof PipeInsertNodeTabletInsertionEvent) { + retryTransfer((PipeInsertNodeTabletInsertionEvent) peekedEvent); + } else if (peekedEvent instanceof PipeRawTabletInsertionEvent) { + retryTransfer((PipeRawTabletInsertionEvent) peekedEvent); + } else { + LOGGER.warn( + "IoTDBThriftAsyncConnector does not support transfer generic event: {}.", + peekedEvent); + } + + polledEvent = retryEventQueue.poll(); + } else { + if (transferTsFileCounter.get() + >= PipeConfig.getInstance().getPipeRealTimeQueueMaxWaitingTsFileSize()) { + return; + } + peekedEvent = retryTsFileQueue.peek(); + retryTransfer((PipeTsFileInsertionEvent) peekedEvent); + polledEvent = retryTsFileQueue.poll(); + } + + retryEventQueueEventCounter.decreaseEventCount(polledEvent); + if (polledEvent != peekedEvent) { + LOGGER.error( + "The event polled from the queue is not the same as the event peeked from the queue. " + + "Peeked event: {}, polled event: {}.", + peekedEvent, + polledEvent); + } + if (polledEvent != null && LOGGER.isDebugEnabled()) { + LOGGER.debug("Polled event {} from retry queue.", polledEvent); + } + } + + // Stop retrying if the execution time exceeds the threshold for better realtime performance + if (System.currentTimeMillis() - retryStartTime + > PipeConfig.getInstance().getPipeAsyncConnectorMaxRetryExecutionTimeMsPerCall()) { + if (retryEventQueueEventCounter.getTabletInsertionEventCount() + < PipeConfig.getInstance() + .getPipeAsyncConnectorForcedRetryTabletEventQueueSizeThreshold() + && retryEventQueueEventCounter.getTsFileInsertionEventCount() + < PipeConfig.getInstance() + .getPipeAsyncConnectorForcedRetryTsFileEventQueueSizeThreshold() + && retryEventQueue.size() + retryTsFileQueue.size() + < PipeConfig.getInstance() + .getPipeAsyncConnectorForcedRetryTotalEventQueueSizeThreshold()) { + return; + } + + if (remainingEvents <= retryEventQueue.size() + retryTsFileQueue.size()) { + throw new PipeException( + "Failed to retry transferring events in the retry queue. Remaining events: " + + (retryEventQueue.size() + retryTsFileQueue.size()) + + " (tablet events: " + + retryEventQueueEventCounter.getTabletInsertionEventCount() + + ", tsfile events: " + + retryEventQueueEventCounter.getTsFileInsertionEventCount() + + ")."); + } + } + } + } + + private void retryTransfer(final TabletInsertionEvent tabletInsertionEvent) { + if (isTabletBatchModeEnabled) { + try { + tabletBatchBuilder.onEvent(tabletInsertionEvent); + transferBatchedEventsIfNecessary(); + if (tabletInsertionEvent instanceof EnrichedEvent) { + ((EnrichedEvent) tabletInsertionEvent) + .decreaseReferenceCount(IoTDBDataRegionAsyncSink.class.getName(), false); + } + } catch (final Exception e) { + addFailureEventToRetryQueue(tabletInsertionEvent); + } + return; + } + + // Tablet batch mode is not enabled, so we need to transfer the event directly. + try { + if (transferInEventWithoutCheck(tabletInsertionEvent)) { + if (tabletInsertionEvent instanceof EnrichedEvent) { + ((EnrichedEvent) tabletInsertionEvent) + .decreaseReferenceCount(IoTDBDataRegionAsyncSink.class.getName(), false); + } + } else { + addFailureEventToRetryQueue(tabletInsertionEvent); + } + } catch (final Exception e) { + if (tabletInsertionEvent instanceof EnrichedEvent) { + ((EnrichedEvent) tabletInsertionEvent) + .decreaseReferenceCount(IoTDBDataRegionAsyncSink.class.getName(), false); + } + addFailureEventToRetryQueue(tabletInsertionEvent); + } + } + + private void retryTransfer(final PipeTsFileInsertionEvent tsFileInsertionEvent) { + try { + if (transferWithoutCheck(tsFileInsertionEvent)) { + tsFileInsertionEvent.decreaseReferenceCount( + IoTDBDataRegionAsyncSink.class.getName(), false); + } else { + addFailureEventToRetryQueue(tsFileInsertionEvent); + } + } catch (final Exception e) { + tsFileInsertionEvent.decreaseReferenceCount(IoTDBDataRegionAsyncSink.class.getName(), false); + addFailureEventToRetryQueue(tsFileInsertionEvent); + } + } + + /** + * Add failure {@link Event} to retry queue. + * + * @param event {@link Event} to retry + */ + @SuppressWarnings("java:S899") + public void addFailureEventToRetryQueue(final Event event) { + if (event instanceof EnrichedEvent && ((EnrichedEvent) event).isReleased()) { + return; + } + + if (isClosed.get()) { + if (event instanceof EnrichedEvent) { + ((EnrichedEvent) event).clearReferenceCount(IoTDBDataRegionAsyncSink.class.getName()); + } + return; + } + + if (event instanceof PipeTsFileInsertionEvent) { + retryTsFileQueue.offer((PipeTsFileInsertionEvent) event); + retryEventQueueEventCounter.increaseEventCount(event); + } else { + retryEventQueue.offer(event); + retryEventQueueEventCounter.increaseEventCount(event); + } + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Added event {} to retry queue.", event); + } + + if (isClosed.get()) { + if (event instanceof EnrichedEvent) { + ((EnrichedEvent) event).clearReferenceCount(IoTDBDataRegionAsyncSink.class.getName()); + } + } + } + + /** + * Add failure {@link EnrichedEvent}s to retry queue. + * + * @param events {@link EnrichedEvent}s to retry + */ + public void addFailureEventsToRetryQueue(final Iterable events) { + events.forEach(this::addFailureEventToRetryQueue); + } + + public boolean isEnableSendTsFileLimit() { + return enableSendTsFileLimit; + } + + //////////////////////////// Operations for close //////////////////////////// + + @Override + public synchronized void discardEventsOfPipe(final String pipeNameToDrop, final int regionId) { + if (isTabletBatchModeEnabled) { + tabletBatchBuilder.discardEventsOfPipe(pipeNameToDrop, regionId); + } + retryEventQueue.removeIf( + event -> { + if (event instanceof EnrichedEvent + && pipeNameToDrop.equals(((EnrichedEvent) event).getPipeName()) + && regionId == ((EnrichedEvent) event).getRegionId()) { + ((EnrichedEvent) event).clearReferenceCount(IoTDBDataRegionAsyncSink.class.getName()); + retryEventQueueEventCounter.decreaseEventCount(event); + return true; + } + return false; + }); + + retryTsFileQueue.removeIf( + event -> { + if (event instanceof EnrichedEvent + && pipeNameToDrop.equals(((EnrichedEvent) event).getPipeName()) + && regionId == ((EnrichedEvent) event).getRegionId()) { + ((EnrichedEvent) event).clearReferenceCount(IoTDBDataRegionAsyncSink.class.getName()); + retryEventQueueEventCounter.decreaseEventCount(event); + return true; + } + return false; + }); + } + + @Override + // synchronized to avoid close connector when transfer event + public synchronized void close() { + isClosed.set(true); + + syncConnector.close(); + + if (tabletBatchBuilder != null) { + tabletBatchBuilder.close(); + } + + // ensure all on-the-fly handlers have been cleared + if (hasPendingHandlers()) { + ImmutableSet.copyOf(pendingHandlers.keySet()) + .forEach( + handler -> { + handler.clearEventsReferenceCount(); + eliminateHandler(handler, true); + }); + } + + try { + if (clientManager != null) { + clientManager.close(); + } + + if (transferTsFileClientManager != null) { + transferTsFileClientManager.close(); + } + } catch (final Exception e) { + LOGGER.warn("Failed to close client manager.", e); + } + + // clear reference count of events in retry queue after closing async client + clearRetryEventsReferenceCount(); + + super.close(); + } + + public synchronized void clearRetryEventsReferenceCount() { + while (!retryEventQueue.isEmpty() || !retryTsFileQueue.isEmpty()) { + final Event event = + retryTsFileQueue.isEmpty() ? retryEventQueue.poll() : retryTsFileQueue.poll(); + retryEventQueueEventCounter.decreaseEventCount(event); + if (event instanceof EnrichedEvent) { + ((EnrichedEvent) event).clearReferenceCount(IoTDBDataRegionAsyncSink.class.getName()); + } + } + } + + //////////////////////// APIs provided for metric framework //////////////////////// + + public int getRetryEventQueueSize() { + return retryEventQueue.size() + retryTsFileQueue.size(); + } + + public int getBatchSize() { + return Objects.nonNull(tabletBatchBuilder) ? tabletBatchBuilder.size() : 0; + } + + public int getPendingHandlersSize() { + return pendingHandlers.size(); + } + + //////////////////////// APIs provided for PipeTransferTrackableHandler //////////////////////// + + public boolean isClosed() { + return isClosed.get(); + } + + public void trackHandler(final PipeTransferTrackableHandler handler) { + pendingHandlers.put(handler, handler); + } + + public void eliminateHandler( + final PipeTransferTrackableHandler handler, final boolean closeClient) { + if (closeClient) { + handler.closeClient(); + } + handler.close(); + pendingHandlers.remove(handler); + } + + public boolean hasPendingHandlers() { + return !pendingHandlers.isEmpty(); + } + + public void setTransferTsFileCounter(AtomicInteger transferTsFileCounter) { + this.transferTsFileCounter = transferTsFileCounter; + } + + @Override + public void setTabletBatchSizeHistogram(Histogram tabletBatchSizeHistogram) { + if (tabletBatchBuilder != null) { + tabletBatchBuilder.setTabletBatchSizeHistogram(tabletBatchSizeHistogram); + } + } + + @Override + public void setTsFileBatchSizeHistogram(Histogram tsFileBatchSizeHistogram) { + if (tabletBatchBuilder != null) { + tabletBatchBuilder.setTsFileBatchSizeHistogram(tsFileBatchSizeHistogram); + } + } + + @Override + public void setTabletBatchTimeIntervalHistogram(Histogram tabletBatchTimeIntervalHistogram) { + if (tabletBatchBuilder != null) { + tabletBatchBuilder.setTabletBatchTimeIntervalHistogram(tabletBatchTimeIntervalHistogram); + } + } + + @Override + public void setTsFileBatchTimeIntervalHistogram(Histogram tsFileBatchTimeIntervalHistogram) { + if (tabletBatchBuilder != null) { + tabletBatchBuilder.setTsFileBatchTimeIntervalHistogram(tsFileBatchTimeIntervalHistogram); + } + } + + @Override + public void setBatchEventSizeHistogram(Histogram eventSizeHistogram) { + if (tabletBatchBuilder != null) { + tabletBatchBuilder.setEventSizeHistogram(eventSizeHistogram); + } + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletBatchEventHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTabletBatchEventHandler.java similarity index 75% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletBatchEventHandler.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTabletBatchEventHandler.java index fb879c7b1dce4..b8f1f3961a261 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletBatchEventHandler.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTabletBatchEventHandler.java @@ -17,23 +17,21 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.protocol.thrift.async.handler; +package org.apache.iotdb.db.pipe.sink.protocol.thrift.async.handler; import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.client.async.AsyncPipeDataTransferServiceClient; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferCompressedReq; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.batch.PipeTabletEventPlainBatch; -import org.apache.iotdb.db.pipe.connector.protocol.thrift.async.IoTDBDataRegionAsyncConnector; -import org.apache.iotdb.db.pipe.connector.util.LeaderCacheUtils; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.batch.PipeTabletEventPlainBatch; +import org.apache.iotdb.db.pipe.sink.protocol.thrift.async.IoTDBDataRegionAsyncSink; +import org.apache.iotdb.db.pipe.sink.util.LeaderCacheUtils; import org.apache.iotdb.pipe.api.exception.PipeException; import org.apache.iotdb.rpc.TSStatusCode; import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; import org.apache.iotdb.service.rpc.thrift.TPipeTransferResp; import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; import org.apache.tsfile.utils.Pair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -43,7 +41,7 @@ import java.util.Map; import java.util.stream.Collectors; -public class PipeTransferTabletBatchEventHandler implements AsyncMethodCallback { +public class PipeTransferTabletBatchEventHandler extends PipeTransferTrackableHandler { private static final Logger LOGGER = LoggerFactory.getLogger(PipeTransferTabletBatchEventHandler.class); @@ -54,24 +52,18 @@ public class PipeTransferTabletBatchEventHandler implements AsyncMethodCallback< private final TPipeTransferReq req; private final double reqCompressionRatio; - private final IoTDBDataRegionAsyncConnector connector; - public PipeTransferTabletBatchEventHandler( - final PipeTabletEventPlainBatch batch, final IoTDBDataRegionAsyncConnector connector) + final PipeTabletEventPlainBatch batch, final IoTDBDataRegionAsyncSink connector) throws IOException { + super(connector); + // Deep copy to keep events' reference events = batch.deepCopyEvents(); pipeName2BytesAccumulated = batch.deepCopyPipeName2BytesAccumulated(); final TPipeTransferReq uncompressedReq = batch.toTPipeTransferReq(); - req = - connector.isRpcCompressionEnabled() - ? PipeTransferCompressedReq.toTPipeTransferReq( - uncompressedReq, connector.getCompressors()) - : uncompressedReq; + req = connector.compressIfNeeded(uncompressedReq); reqCompressionRatio = (double) req.getBody().length / uncompressedReq.getBody().length; - - this.connector = connector; } public void transfer(final AsyncPipeDataTransferServiceClient client) throws TException { @@ -83,15 +75,15 @@ public void transfer(final AsyncPipeDataTransferServiceClient client) throws TEx (long) (entry.getValue() * reqCompressionRatio)); } - client.pipeTransfer(req, this); + tryTransfer(client, req); } @Override - public void onComplete(final TPipeTransferResp response) { + protected boolean onCompleteInternal(final TPipeTransferResp response) { // Just in case if (response == null) { onError(new PipeException("TPipeTransferResp is null")); - return; + return false; } try { @@ -114,19 +106,35 @@ public void onComplete(final TPipeTransferResp response) { PipeTransferTabletBatchEventHandler.class.getName(), true)); } catch (final Exception e) { onError(e); + return false; } + + return true; } @Override - public void onError(final Exception exception) { + protected void onErrorInternal(final Exception exception) { try { LOGGER.warn( - "Failed to transfer TabletInsertionEvent batch {} (request commit ids={}).", - events.stream().map(EnrichedEvent::coreReportMessage).collect(Collectors.toList()), - events.stream().map(EnrichedEvent::getCommitId).collect(Collectors.toList()), + "Failed to transfer TabletInsertionEvent batch. Total failed events: {}, related pipe names: {}", + events.size(), + events.stream().map(EnrichedEvent::getPipeName).collect(Collectors.toSet()), exception); } finally { connector.addFailureEventsToRetryQueue(events); } } + + @Override + protected void doTransfer( + final AsyncPipeDataTransferServiceClient client, final TPipeTransferReq req) + throws TException { + client.pipeTransfer(req, this); + } + + @Override + public void clearEventsReferenceCount() { + events.forEach( + event -> event.clearReferenceCount(PipeTransferTabletBatchEventHandler.class.getName())); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletInsertNodeEventHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTabletInsertNodeEventHandler.java similarity index 72% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletInsertNodeEventHandler.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTabletInsertNodeEventHandler.java index 43dfb6aad29bc..70ba7f4cfc5b7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletInsertNodeEventHandler.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTabletInsertNodeEventHandler.java @@ -17,35 +17,35 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.protocol.thrift.async.handler; +package org.apache.iotdb.db.pipe.sink.protocol.thrift.async.handler; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.client.async.AsyncPipeDataTransferServiceClient; -import org.apache.iotdb.db.pipe.connector.protocol.thrift.async.IoTDBDataRegionAsyncConnector; import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; +import org.apache.iotdb.db.pipe.sink.protocol.thrift.async.IoTDBDataRegionAsyncSink; import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; -import org.apache.iotdb.service.rpc.thrift.TPipeTransferResp; import org.apache.thrift.TException; public class PipeTransferTabletInsertNodeEventHandler - extends PipeTransferTabletInsertionEventHandler { + extends PipeTransferTabletInsertionEventHandler { public PipeTransferTabletInsertNodeEventHandler( - PipeInsertNodeTabletInsertionEvent event, - TPipeTransferReq req, - IoTDBDataRegionAsyncConnector connector) { + final PipeInsertNodeTabletInsertionEvent event, + final TPipeTransferReq req, + final IoTDBDataRegionAsyncSink connector) { super(event, req, connector); } @Override - protected void doTransfer(AsyncPipeDataTransferServiceClient client, TPipeTransferReq req) + protected void doTransfer( + final AsyncPipeDataTransferServiceClient client, final TPipeTransferReq req) throws TException { client.pipeTransfer(req, this); } @Override - protected void updateLeaderCache(TSStatus status) { + protected void updateLeaderCache(final TSStatus status) { connector.updateLeaderCache( ((PipeInsertNodeTabletInsertionEvent) event).getDeviceId(), status.getRedirectNode()); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletInsertionEventHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTabletInsertionEventHandler.java similarity index 81% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletInsertionEventHandler.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTabletInsertionEventHandler.java index 40561121724bc..093b739bd0058 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletInsertionEventHandler.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTabletInsertionEventHandler.java @@ -17,12 +17,12 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.protocol.thrift.async.handler; +package org.apache.iotdb.db.pipe.sink.protocol.thrift.async.handler; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.client.async.AsyncPipeDataTransferServiceClient; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.db.pipe.connector.protocol.thrift.async.IoTDBDataRegionAsyncConnector; +import org.apache.iotdb.db.pipe.sink.protocol.thrift.async.IoTDBDataRegionAsyncSink; import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; import org.apache.iotdb.pipe.api.exception.PipeException; import org.apache.iotdb.rpc.TSStatusCode; @@ -30,12 +30,10 @@ import org.apache.iotdb.service.rpc.thrift.TPipeTransferResp; import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public abstract class PipeTransferTabletInsertionEventHandler - implements AsyncMethodCallback { +public abstract class PipeTransferTabletInsertionEventHandler extends PipeTransferTrackableHandler { private static final Logger LOGGER = LoggerFactory.getLogger(PipeTransferTabletInsertionEventHandler.class); @@ -43,15 +41,14 @@ public abstract class PipeTransferTabletInsertionEventHandler { +public class PipeTransferTabletRawEventHandler extends PipeTransferTabletInsertionEventHandler { public PipeTransferTabletRawEventHandler( - PipeRawTabletInsertionEvent event, - TPipeTransferReq req, - IoTDBDataRegionAsyncConnector connector) { + final PipeRawTabletInsertionEvent event, + final TPipeTransferReq req, + final IoTDBDataRegionAsyncSink connector) { super(event, req, connector); } @Override - protected void doTransfer(AsyncPipeDataTransferServiceClient client, TPipeTransferReq req) + protected void doTransfer( + final AsyncPipeDataTransferServiceClient client, final TPipeTransferReq req) throws TException { client.pipeTransfer(req, this); } @Override - protected void updateLeaderCache(TSStatus status) { + protected void updateLeaderCache(final TSStatus status) { connector.updateLeaderCache( ((PipeRawTabletInsertionEvent) event).getDeviceId(), status.getRedirectNode()); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTrackableHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTrackableHandler.java new file mode 100644 index 0000000000000..6ea07b0c278fc --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTrackableHandler.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.sink.protocol.thrift.async.handler; + +import org.apache.iotdb.commons.client.async.AsyncPipeDataTransferServiceClient; +import org.apache.iotdb.db.pipe.sink.protocol.thrift.async.IoTDBDataRegionAsyncSink; +import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; +import org.apache.iotdb.service.rpc.thrift.TPipeTransferResp; + +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Objects; + +public abstract class PipeTransferTrackableHandler + implements AsyncMethodCallback, AutoCloseable { + private static final Logger LOGGER = LoggerFactory.getLogger(PipeTransferTsFileHandler.class); + + protected final IoTDBDataRegionAsyncSink connector; + protected volatile AsyncPipeDataTransferServiceClient client; + + public PipeTransferTrackableHandler(final IoTDBDataRegionAsyncSink connector) { + this.connector = connector; + } + + @Override + public void onComplete(final TPipeTransferResp response) { + if (connector.isClosed()) { + clearEventsReferenceCount(); + connector.eliminateHandler(this, true); + return; + } + + if (onCompleteInternal(response)) { + // eliminate handler only when all transmissions corresponding to the handler have been + // completed + // NOTE: We should not clear the reference count of events, as this would cause the + // `org.apache.iotdb.pipe.it.dual.tablemodel.manual.basic.IoTDBPipeDataSinkIT#testSinkTsFileFormat3` test to fail. + connector.eliminateHandler(this, false); + } + } + + @Override + public void onError(final Exception exception) { + if (connector.isClosed()) { + clearEventsReferenceCount(); + connector.eliminateHandler(this, true); + return; + } + + onErrorInternal(exception); + connector.eliminateHandler(this, false); + } + + /** + * Attempts to transfer data using the provided client and request. + * + * @param client the client used for data transfer + * @param req the request containing transfer details + * @return {@code true} if the transfer was initiated successfully, {@code false} if the connector + * is closed + * @throws TException if an error occurs during the transfer + */ + protected boolean tryTransfer( + final AsyncPipeDataTransferServiceClient client, final TPipeTransferReq req) + throws TException { + if (Objects.isNull(this.client)) { + this.client = client; + } + // track handler before checking if connector is closed + connector.trackHandler(this); + if (connector.isClosed()) { + clearEventsReferenceCount(); + connector.eliminateHandler(this, true); + client.setShouldReturnSelf(true); + try { + client.returnSelf(); + } catch (final IllegalStateException e) { + LOGGER.info( + "Illegal state when return the client to object pool, maybe the pool is already cleared. Will ignore."); + } + this.client = null; + return false; + } + doTransfer(client, req); + return true; + } + + /** + * @return {@code true} if all transmissions corresponding to the handler have been completed, + * {@code false} otherwise + */ + protected abstract boolean onCompleteInternal(final TPipeTransferResp response); + + protected abstract void onErrorInternal(final Exception exception); + + protected abstract void doTransfer( + final AsyncPipeDataTransferServiceClient client, final TPipeTransferReq req) + throws TException; + + public abstract void clearEventsReferenceCount(); + + public void closeClient() { + if (Objects.isNull(client)) { + return; + } + try { + client.close(); + client.invalidateAll(); + } catch (final Exception e) { + LOGGER.warn( + "Failed to close or invalidate client when connector is closed. Client: {}, Exception: {}", + client, + e.getMessage(), + e); + } + } + + @Override + public void close() { + // Do nothing + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTsFileHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTsFileHandler.java similarity index 57% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTsFileHandler.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTsFileHandler.java index a5f126d712301..1854a47e66709 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTsFileHandler.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTsFileHandler.java @@ -17,34 +17,38 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.protocol.thrift.async.handler; +package org.apache.iotdb.db.pipe.sink.protocol.thrift.async.handler; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.client.async.AsyncPipeDataTransferServiceClient; import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferCompressedReq; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.response.PipeTransferFilePieceResp; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.db.pipe.connector.client.IoTDBDataNodeAsyncClientManager; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTsFilePieceReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTsFilePieceWithModReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTsFileSealReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTsFileSealWithModReq; -import org.apache.iotdb.db.pipe.connector.protocol.thrift.async.IoTDBDataRegionAsyncConnector; +import org.apache.iotdb.commons.pipe.sink.limiter.TsFileSendRateLimiter; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.response.PipeTransferFilePieceResp; +import org.apache.iotdb.commons.utils.RetryUtils; import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; +import org.apache.iotdb.db.pipe.metric.overview.PipeResourceMetrics; +import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; +import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryManager; +import org.apache.iotdb.db.pipe.resource.memory.PipeTsFileMemoryBlock; +import org.apache.iotdb.db.pipe.sink.client.IoTDBDataNodeAsyncClientManager; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTsFilePieceReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTsFilePieceWithModReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTsFileSealReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTsFileSealWithModReq; +import org.apache.iotdb.db.pipe.sink.protocol.thrift.async.IoTDBDataRegionAsyncSink; +import org.apache.iotdb.pipe.api.exception.PipeException; import org.apache.iotdb.rpc.TSStatusCode; import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq; import org.apache.iotdb.service.rpc.thrift.TPipeTransferResp; import org.apache.commons.io.FileUtils; import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; import org.apache.tsfile.utils.Pair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; -import java.io.FileNotFoundException; import java.io.IOException; import java.io.RandomAccessFile; import java.util.Arrays; @@ -55,13 +59,10 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; -public class PipeTransferTsFileHandler implements AsyncMethodCallback { +public class PipeTransferTsFileHandler extends PipeTransferTrackableHandler { private static final Logger LOGGER = LoggerFactory.getLogger(PipeTransferTsFileHandler.class); - // Used to transfer the file - private final IoTDBDataRegionAsyncConnector connector; - // Used to rate limit the transfer private final Map, Double> pipeName2WeightMap; @@ -78,7 +79,8 @@ public class PipeTransferTsFileHandler implements AsyncMethodCallback, Double> pipeName2WeightMap, final List events, final AtomicInteger eventsReferenceCount, @@ -97,8 +98,8 @@ public PipeTransferTsFileHandler( final File tsFile, final File modFile, final boolean transferMod) - throws FileNotFoundException { - this.connector = connector; + throws InterruptedException { + super(connector); this.pipeName2WeightMap = pipeName2WeightMap; @@ -111,28 +112,67 @@ public PipeTransferTsFileHandler( this.transferMod = transferMod; currentFile = transferMod ? modFile : tsFile; - readFileBufferSize = PipeConfig.getInstance().getPipeConnectorReadFileBufferSize(); - readBuffer = new byte[readFileBufferSize]; + // NOTE: Waiting for resource enough for slicing here may cause deadlock! + // TsFile events are producing and consuming at the same time, and the memory of a TsFile + // event is not released until the TsFile is sealed. So if the memory is not enough for slicing, + // the TsFile event will be blocked and waiting for the memory to be released. At the same time, + // the memory of the TsFile event is not released, so the memory is not enough for slicing. This + // will cause a deadlock. + waitForResourceEnough4Slicing((long) ((1 + Math.random()) * 20 * 1000)); // 20 - 40 seconds + readFileBufferSize = + (int) + Math.min( + PipeConfig.getInstance().getPipeConnectorReadFileBufferSize(), + transferMod ? Math.max(tsFile.length(), modFile.length()) : tsFile.length()); position = 0; - reader = - Objects.nonNull(modFile) - ? new RandomAccessFile(modFile, "r") - : new RandomAccessFile(tsFile, "r"); - isSealSignalSent = new AtomicBoolean(false); } + public File getTsFile() { + return tsFile; + } + public void transfer( final IoTDBDataNodeAsyncClientManager clientManager, final AsyncPipeDataTransferServiceClient client) throws TException, IOException { + // Delay creation of resources to avoid OOM or too many open files + if (readBuffer == null) { + memoryBlock = + PipeDataNodeResourceManager.memory() + .forceAllocateForTsFileWithRetry( + PipeConfig.getInstance().isPipeConnectorReadFileBufferMemoryControlEnabled() + ? readFileBufferSize + : 0); + readBuffer = new byte[readFileBufferSize]; + } + + if (reader == null) { + reader = + Objects.nonNull(modFile) + ? new RandomAccessFile(modFile, "r") + : new RandomAccessFile(tsFile, "r"); + } + this.clientManager = clientManager; this.client = client; + if (client == null) { + LOGGER.warn( + "Client has been returned to the pool. Current handler status is {}. Will not transfer {}.", + connector.isClosed() ? "CLOSED" : "NOT CLOSED", + tsFile); + return; + } + client.setShouldReturnSelf(false); client.setTimeoutDynamically(clientManager.getConnectionTimeout()); + PipeResourceMetrics.getInstance().recordDiskIO(readFileBufferSize); + if (connector.isEnableSendTsFileLimit()) { + TsFileSendRateLimiter.getInstance().acquire(readFileBufferSize); + } final int readLength = reader.read(readBuffer); if (readLength == -1) { @@ -154,11 +194,7 @@ public void transfer( ? PipeTransferTsFileSealWithModReq.toTPipeTransferReq( modFile.getName(), modFile.length(), tsFile.getName(), tsFile.length()) : PipeTransferTsFileSealReq.toTPipeTransferReq(tsFile.getName(), tsFile.length()); - final TPipeTransferReq req = - connector.isRpcCompressionEnabled() - ? PipeTransferCompressedReq.toTPipeTransferReq( - uncompressedReq, connector.getCompressors()) - : uncompressedReq; + final TPipeTransferReq req = connector.compressIfNeeded(uncompressedReq); pipeName2WeightMap.forEach( (pipePair, weight) -> @@ -168,7 +204,9 @@ public void transfer( client.getEndPoint(), (long) (req.getBody().length * weight))); - client.pipeTransfer(req, this); + if (!tryTransfer(client, req)) { + return; + } } return; } @@ -183,11 +221,7 @@ public void transfer( currentFile.getName(), position, payload) : PipeTransferTsFilePieceReq.toTPipeTransferReq( currentFile.getName(), position, payload); - final TPipeTransferReq req = - connector.isRpcCompressionEnabled() - ? PipeTransferCompressedReq.toTPipeTransferReq( - uncompressedReq, connector.getCompressors()) - : uncompressedReq; + final TPipeTransferReq req = connector.compressIfNeeded(uncompressedReq); pipeName2WeightMap.forEach( (pipePair, weight) -> @@ -197,13 +231,26 @@ public void transfer( client.getEndPoint(), (long) (req.getBody().length * weight))); - client.pipeTransfer(req, this); + if (!tryTransfer(client, req)) { + return; + } position += readLength; } @Override public void onComplete(final TPipeTransferResp response) { + try { + super.onComplete(response); + } finally { + if (connector.isClosed()) { + returnClientIfNecessary(); + } + } + } + + @Override + protected boolean onCompleteInternal(final TPipeTransferResp response) { if (isSealSignalSent.get()) { try { final TSStatus status = response.getStatus(); @@ -220,7 +267,7 @@ public void onComplete(final TPipeTransferResp response) { } } catch (final Exception e) { onError(e); - return; + return false; } try { @@ -230,7 +277,11 @@ public void onComplete(final TPipeTransferResp response) { // Delete current file when using tsFile as batch if (events.stream().anyMatch(event -> !(event instanceof PipeTsFileInsertionEvent))) { - FileUtils.delete(currentFile); + RetryUtils.retryOnException( + () -> { + FileUtils.delete(currentFile); + return null; + }); } } catch (final IOException e) { LOGGER.warn( @@ -248,7 +299,7 @@ public void onComplete(final TPipeTransferResp response) { "Successfully transferred file {} (committer key={}, commit id={}, reference count={}).", tsFile, events.stream().map(EnrichedEvent::getCommitterKey).collect(Collectors.toList()), - events.stream().map(EnrichedEvent::getCommitId).collect(Collectors.toList()), + events.stream().map(EnrichedEvent::getCommitIds).collect(Collectors.toList()), referenceCount); } else { LOGGER.info( @@ -257,12 +308,10 @@ public void onComplete(final TPipeTransferResp response) { referenceCount); } - if (client != null) { - client.setShouldReturnSelf(true); - client.returnSelf(); - } + returnClientIfNecessary(); } - return; + + return true; } // If the isSealSignalSent is false, then the response must be a PipeTransferFilePieceResp @@ -292,18 +341,30 @@ public void onComplete(final TPipeTransferResp response) { transfer(clientManager, client); } catch (final Exception e) { onError(e); + return false; } + + return false; // due to seal transfer not yet completed } @Override public void onError(final Exception exception) { + try { + super.onError(exception); + } finally { + returnClientIfNecessary(); + } + } + + @Override + protected void onErrorInternal(final Exception exception) { try { if (events.size() <= 1 || LOGGER.isDebugEnabled()) { LOGGER.warn( "Failed to transfer TsFileInsertionEvent {} (committer key {}, commit id {}).", tsFile, events.stream().map(EnrichedEvent::getCommitterKey).collect(Collectors.toList()), - events.stream().map(EnrichedEvent::getCommitId).collect(Collectors.toList()), + events.stream().map(EnrichedEvent::getCommitIds).collect(Collectors.toList()), exception); } else { LOGGER.warn( @@ -330,16 +391,17 @@ public void onError(final Exception exception) { // Delete current file when using tsFile as batch if (events.stream().anyMatch(event -> !(event instanceof PipeTsFileInsertionEvent))) { - FileUtils.delete(currentFile); + RetryUtils.retryOnException( + () -> { + FileUtils.delete(currentFile); + return null; + }); } } catch (final IOException e) { LOGGER.warn("Failed to close file reader or delete tsFile when failed to transfer file.", e); } finally { try { - if (client != null) { - client.setShouldReturnSelf(true); - client.returnSelf(); - } + returnClientIfNecessary(); } finally { if (eventsHadBeenAddedToRetryQueue.compareAndSet(false, true)) { connector.addFailureEventsToRetryQueue(events); @@ -347,4 +409,103 @@ public void onError(final Exception exception) { } } } + + private void returnClientIfNecessary() { + if (client == null) { + return; + } + + if (connector.isClosed()) { + closeClient(); + } + + client.setShouldReturnSelf(true); + try { + client.returnSelf(); + } catch (final IllegalStateException e) { + LOGGER.info( + "Illegal state when return the client to object pool, maybe the pool is already cleared. Will ignore."); + } + client = null; + } + + @Override + protected void doTransfer( + final AsyncPipeDataTransferServiceClient client, final TPipeTransferReq req) + throws TException { + if (client == null) { + LOGGER.warn( + "Client has been returned to the pool. Current handler status is {}. Will not transfer {}.", + connector.isClosed() ? "CLOSED" : "NOT CLOSED", + tsFile); + return; + } + + client.pipeTransfer(req, this); + } + + @Override + public void clearEventsReferenceCount() { + events.forEach(event -> event.clearReferenceCount(PipeTransferTsFileHandler.class.getName())); + } + + @Override + public void close() { + super.close(); + + if (memoryBlock != null) { + memoryBlock.close(); + memoryBlock = null; + } + } + + /** + * @param timeoutMs CAN NOT BE UNLIMITED, otherwise it may cause deadlock. + */ + private void waitForResourceEnough4Slicing(final long timeoutMs) throws InterruptedException { + if (!PipeConfig.getInstance().isPipeConnectorReadFileBufferMemoryControlEnabled()) { + return; + } + + final PipeMemoryManager memoryManager = PipeDataNodeResourceManager.memory(); + if (memoryManager.isEnough4TsFileSlicing()) { + return; + } + + final long startTime = System.currentTimeMillis(); + long lastRecordTime = startTime; + + final long memoryCheckIntervalMs = + PipeConfig.getInstance().getPipeCheckMemoryEnoughIntervalMs(); + while (!memoryManager.isEnough4TsFileSlicing()) { + Thread.sleep(memoryCheckIntervalMs); + + final long currentTime = System.currentTimeMillis(); + final double elapsedRecordTimeSeconds = (currentTime - lastRecordTime) / 1000.0; + final double waitTimeSeconds = (currentTime - startTime) / 1000.0; + if (elapsedRecordTimeSeconds > 10.0) { + LOGGER.info( + "Wait for resource enough for slicing tsfile {} for {} seconds.", + tsFile, + waitTimeSeconds); + lastRecordTime = currentTime; + } else if (LOGGER.isDebugEnabled()) { + LOGGER.debug( + "Wait for resource enough for slicing tsfile {} for {} seconds.", + tsFile, + waitTimeSeconds); + } + + if (waitTimeSeconds * 1000 > timeoutMs) { + // should contain 'TimeoutException' in exception message + throw new PipeException( + String.format("TimeoutException: Waited %s seconds", waitTimeSeconds)); + } + } + + final long currentTime = System.currentTimeMillis(); + final double waitTimeSeconds = (currentTime - startTime) / 1000.0; + LOGGER.info( + "Wait for resource enough for slicing tsfile {} for {} seconds.", tsFile, waitTimeSeconds); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/sync/IoTDBDataNodeSyncConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/sync/IoTDBDataNodeSyncSink.java similarity index 61% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/sync/IoTDBDataNodeSyncConnector.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/sync/IoTDBDataNodeSyncSink.java index 9ad83ee84e286..3e6c0ff83ead4 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/sync/IoTDBDataNodeSyncConnector.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/sync/IoTDBDataNodeSyncSink.java @@ -17,20 +17,17 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.protocol.thrift.sync; +package org.apache.iotdb.db.pipe.sink.protocol.thrift.sync; import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.pipe.connector.client.IoTDBSyncClient; -import org.apache.iotdb.commons.pipe.connector.client.IoTDBSyncClientManager; -import org.apache.iotdb.commons.pipe.connector.protocol.IoTDBSslSyncConnector; -import org.apache.iotdb.commons.utils.NodeUrlUtils; +import org.apache.iotdb.commons.pipe.sink.client.IoTDBSyncClient; +import org.apache.iotdb.commons.pipe.sink.client.IoTDBSyncClientManager; +import org.apache.iotdb.commons.pipe.sink.protocol.IoTDBSslSyncSink; import org.apache.iotdb.db.conf.IoTDBConfig; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.pipe.connector.client.IoTDBDataNodeSyncClientManager; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferPlanNodeReq; import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent; -import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator; +import org.apache.iotdb.db.pipe.sink.client.IoTDBDataNodeSyncClientManager; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferPlanNodeReq; import org.apache.iotdb.pipe.api.exception.PipeConnectionException; import org.apache.iotdb.pipe.api.exception.PipeException; import org.apache.iotdb.rpc.TSStatusCode; @@ -41,70 +38,58 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.net.UnknownHostException; import java.util.List; -import java.util.Set; -import java.util.stream.Collectors; +import java.util.Objects; -public abstract class IoTDBDataNodeSyncConnector extends IoTDBSslSyncConnector { +public abstract class IoTDBDataNodeSyncSink extends IoTDBSslSyncSink { - private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBDataNodeSyncConnector.class); + private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBDataNodeSyncSink.class); protected IoTDBDataNodeSyncClientManager clientManager; - @Override - public void validate(final PipeParameterValidator validator) throws Exception { - super.validate(validator); - - final IoTDBConfig iotdbConfig = IoTDBDescriptor.getInstance().getConfig(); - final Set givenNodeUrls = parseNodeUrls(validator.getParameters()); - - validator.validate( - empty -> { - try { - // Ensure the sink doesn't point to the thrift receiver on DataNode itself - return !NodeUrlUtils.containsLocalAddress( - givenNodeUrls.stream() - .filter(tEndPoint -> tEndPoint.getPort() == iotdbConfig.getRpcPort()) - .map(TEndPoint::getIp) - .collect(Collectors.toList())); - } catch (final UnknownHostException e) { - LOGGER.warn("Unknown host when checking pipe sink IP.", e); - return false; - } - }, - String.format( - "One of the endpoints %s of the receivers is pointing back to the thrift receiver %s on sender itself, " - + "or unknown host when checking pipe sink IP.", - givenNodeUrls, new TEndPoint(iotdbConfig.getRpcAddress(), iotdbConfig.getRpcPort()))); - } - @Override protected IoTDBSyncClientManager constructClient( final List nodeUrls, + final String username, + final String password, final boolean useSSL, final String trustStorePath, final String trustStorePwd, final boolean useLeaderCache, - final String loadBalanceStrategy) { + final String loadBalanceStrategy, + final boolean shouldReceiverConvertOnTypeMismatch, + final String loadTsFileStrategy, + final boolean validateTsFile, + final boolean shouldMarkAsPipeRequest) { clientManager = new IoTDBDataNodeSyncClientManager( - nodeUrls, useSSL, trustStorePath, trustStorePwd, useLeaderCache, loadBalanceStrategy); + nodeUrls, + username, + password, + useSSL, + Objects.nonNull(trustStorePath) ? IoTDBConfig.addDataHomeDir(trustStorePath) : null, + trustStorePwd, + useLeaderCache, + loadBalanceStrategy, + shouldReceiverConvertOnTypeMismatch, + loadTsFileStrategy, + validateTsFile, + shouldMarkAsPipeRequest); return clientManager; } protected void doTransferWrapper( final PipeSchemaRegionWritePlanEvent pipeSchemaRegionWritePlanEvent) throws PipeException { + // We increase the reference count for this event to determine if the event may be released. + if (!pipeSchemaRegionWritePlanEvent.increaseReferenceCount( + IoTDBDataNodeSyncSink.class.getName())) { + return; + } try { - // We increase the reference count for this event to determine if the event may be released. - if (!pipeSchemaRegionWritePlanEvent.increaseReferenceCount( - IoTDBDataNodeSyncConnector.class.getName())) { - return; - } doTransfer(pipeSchemaRegionWritePlanEvent); } finally { pipeSchemaRegionWritePlanEvent.decreaseReferenceCount( - IoTDBDataNodeSyncConnector.class.getName(), false); + IoTDBDataNodeSyncSink.class.getName(), false); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/sync/IoTDBDataRegionSyncConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/sync/IoTDBDataRegionSyncSink.java similarity index 75% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/sync/IoTDBDataRegionSyncConnector.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/sync/IoTDBDataRegionSyncSink.java index fca40f21d6470..a40074392f483 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/sync/IoTDBDataRegionSyncConnector.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/sync/IoTDBDataRegionSyncSink.java @@ -17,32 +17,38 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.protocol.thrift.sync; +package org.apache.iotdb.db.pipe.sink.protocol.thrift.sync; import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.pipe.connector.client.IoTDBSyncClient; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferFilePieceReq; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.batch.PipeTabletEventBatch; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.batch.PipeTabletEventPlainBatch; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.batch.PipeTabletEventTsFileBatch; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.batch.PipeTransferBatchReqBuilder; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTabletBinaryReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTabletInsertNodeReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTabletRawReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTsFilePieceReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTsFilePieceWithModReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTsFileSealReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTsFileSealWithModReq; -import org.apache.iotdb.db.pipe.connector.util.LeaderCacheUtils; +import org.apache.iotdb.commons.pipe.sink.client.IoTDBSyncClient; +import org.apache.iotdb.commons.pipe.sink.limiter.TsFileSendRateLimiter; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferFilePieceReq; +import org.apache.iotdb.commons.utils.RetryUtils; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.terminate.PipeTerminateEvent; import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; +import org.apache.iotdb.db.pipe.metric.overview.PipeResourceMetrics; +import org.apache.iotdb.db.pipe.metric.sink.PipeDataRegionSinkMetrics; +import org.apache.iotdb.db.pipe.sink.client.IoTDBDataNodeSyncClientManager; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.batch.PipeTabletEventBatch; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.batch.PipeTabletEventPlainBatch; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.batch.PipeTabletEventTsFileBatch; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.batch.PipeTransferBatchReqBuilder; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTabletBinaryReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTabletInsertNodeReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTabletRawReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTsFilePieceReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTsFilePieceWithModReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTsFileSealReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTsFileSealWithModReq; +import org.apache.iotdb.db.pipe.sink.util.LeaderCacheUtils; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; +import org.apache.iotdb.metrics.type.Histogram; import org.apache.iotdb.pipe.api.customizer.configuration.PipeConnectorRuntimeConfiguration; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; import org.apache.iotdb.pipe.api.event.Event; @@ -63,16 +69,22 @@ import java.io.File; import java.io.IOException; import java.nio.file.NoSuchFileException; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; -public class IoTDBDataRegionSyncConnector extends IoTDBDataNodeSyncConnector { +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_ENABLE_SEND_TSFILE_LIMIT; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.CONNECTOR_ENABLE_SEND_TSFILE_LIMIT_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant.SINK_ENABLE_SEND_TSFILE_LIMIT; - private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBDataRegionSyncConnector.class); +public class IoTDBDataRegionSyncSink extends IoTDBDataNodeSyncSink { + + private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBDataRegionSyncSink.class); private PipeTransferBatchReqBuilder tabletBatchBuilder; + private boolean enableSendTsFileLimit; @Override public void customize( @@ -84,6 +96,11 @@ public void customize( if (isTabletBatchModeEnabled) { tabletBatchBuilder = new PipeTransferBatchReqBuilder(parameters); } + + enableSendTsFileLimit = + parameters.getBooleanOrDefault( + Arrays.asList(SINK_ENABLE_SEND_TSFILE_LIMIT, CONNECTOR_ENABLE_SEND_TSFILE_LIMIT), + CONNECTOR_ENABLE_SEND_TSFILE_LIMIT_DEFAULT_VALUE); } @Override @@ -98,6 +115,14 @@ protected PipeTransferFilePieceReq getTransferMultiFilePieceReq( return PipeTransferTsFilePieceWithModReq.toTPipeTransferReq(fileName, position, payLoad); } + @Override + protected void mayLimitRateAndRecordIO(final long requiredBytes) { + PipeResourceMetrics.getInstance().recordDiskIO(requiredBytes); + if (enableSendTsFileLimit) { + TsFileSendRateLimiter.getInstance().acquire(requiredBytes); + } + } + @Override public void transfer(final TabletInsertionEvent tabletInsertionEvent) throws Exception { // PipeProcessor can change the type of TabletInsertionEvent @@ -113,11 +138,8 @@ public void transfer(final TabletInsertionEvent tabletInsertionEvent) throws Exc try { if (isTabletBatchModeEnabled) { - final Pair endPointAndBatch = - tabletBatchBuilder.onEvent(tabletInsertionEvent); - if (Objects.nonNull(endPointAndBatch)) { - doTransferWrapper(endPointAndBatch); - } + tabletBatchBuilder.onEvent(tabletInsertionEvent); + doTransferWrapper(); } else { if (tabletInsertionEvent instanceof PipeInsertNodeTabletInsertionEvent) { doTransferWrapper((PipeInsertNodeTabletInsertionEvent) tabletInsertionEvent); @@ -180,9 +202,9 @@ public void transfer(final Event event) throws Exception { } private void doTransferWrapper() throws IOException, WriteProcessException { - for (final Pair nonEmptyBatch : - tabletBatchBuilder.getAllNonEmptyBatches()) { - doTransferWrapper(nonEmptyBatch); + for (final Pair nonEmptyAndShouldEmitBatch : + tabletBatchBuilder.getAllNonEmptyAndShouldEmitBatches()) { + doTransferWrapper(nonEmptyAndShouldEmitBatch); } } @@ -196,7 +218,7 @@ private void doTransferWrapper(final Pair endPo } else { LOGGER.warn("Unsupported batch type {}.", batch.getClass()); } - batch.decreaseEventsReferenceCount(IoTDBDataRegionSyncConnector.class.getName(), true); + batch.decreaseEventsReferenceCount(IoTDBDataRegionSyncSink.class.getName(), true); batch.onSuccess(); } @@ -255,7 +277,11 @@ private void doTransfer(final PipeTabletEventTsFileBatch batchToTransfer) for (final File tsFile : sealedFiles) { doTransfer(pipe2WeightMap, tsFile, null); try { - FileUtils.delete(tsFile); + RetryUtils.retryOnException( + () -> { + FileUtils.delete(tsFile); + return null; + }); } catch (final NoSuchFileException e) { LOGGER.info("The file {} is not found, may already be deleted.", tsFile); } catch (final Exception e) { @@ -268,16 +294,16 @@ private void doTransfer(final PipeTabletEventTsFileBatch batchToTransfer) private void doTransferWrapper( final PipeInsertNodeTabletInsertionEvent pipeInsertNodeTabletInsertionEvent) throws PipeException { + // We increase the reference count for this event to determine if the event may be released. + if (!pipeInsertNodeTabletInsertionEvent.increaseReferenceCount( + IoTDBDataRegionSyncSink.class.getName())) { + return; + } try { - // We increase the reference count for this event to determine if the event may be released. - if (!pipeInsertNodeTabletInsertionEvent.increaseReferenceCount( - IoTDBDataRegionSyncConnector.class.getName())) { - return; - } doTransfer(pipeInsertNodeTabletInsertionEvent); } finally { pipeInsertNodeTabletInsertionEvent.decreaseReferenceCount( - IoTDBDataRegionSyncConnector.class.getName(), false); + IoTDBDataRegionSyncSink.class.getName(), false); } } @@ -291,8 +317,7 @@ private void doTransfer( // getDeviceId() may return null for InsertRowsNode, will be equal to getClient(null) clientAndStatus = clientManager.getClient(pipeInsertNodeTabletInsertionEvent.getDeviceId()); - final InsertNode insertNode = - pipeInsertNodeTabletInsertionEvent.getInsertNodeViaCacheIfPossible(); + final InsertNode insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNode(); final TPipeTransferReq req = compressIfNeeded( insertNode != null @@ -336,16 +361,16 @@ private void doTransfer( private void doTransferWrapper(final PipeRawTabletInsertionEvent pipeRawTabletInsertionEvent) throws PipeException { + // We increase the reference count for this event to determine if the event may be released. + if (!pipeRawTabletInsertionEvent.increaseReferenceCount( + IoTDBDataRegionSyncSink.class.getName())) { + return; + } try { - // We increase the reference count for this event to determine if the event may be released. - if (!pipeRawTabletInsertionEvent.increaseReferenceCount( - IoTDBDataRegionSyncConnector.class.getName())) { - return; - } doTransfer(pipeRawTabletInsertionEvent); } finally { pipeRawTabletInsertionEvent.decreaseReferenceCount( - IoTDBDataRegionSyncConnector.class.getName(), false); + IoTDBDataRegionSyncSink.class.getName(), false); } } @@ -395,12 +420,11 @@ private void doTransfer(final PipeRawTabletInsertionEvent pipeRawTabletInsertion private void doTransferWrapper(final PipeTsFileInsertionEvent pipeTsFileInsertionEvent) throws PipeException, IOException { + // We increase the reference count for this event to determine if the event may be released. + if (!pipeTsFileInsertionEvent.increaseReferenceCount(IoTDBDataRegionSyncSink.class.getName())) { + return; + } try { - // We increase the reference count for this event to determine if the event may be released. - if (!pipeTsFileInsertionEvent.increaseReferenceCount( - IoTDBDataRegionSyncConnector.class.getName())) { - return; - } doTransfer( Collections.singletonMap( new Pair<>( @@ -411,7 +435,7 @@ private void doTransferWrapper(final PipeTsFileInsertionEvent pipeTsFileInsertio pipeTsFileInsertionEvent.isWithMod() ? pipeTsFileInsertionEvent.getModFile() : null); } finally { pipeTsFileInsertionEvent.decreaseReferenceCount( - IoTDBDataRegionSyncConnector.class.getName(), false); + IoTDBDataRegionSyncSink.class.getName(), false); } } @@ -492,6 +516,26 @@ private void doTransfer( LOGGER.info("Successfully transferred file {}.", tsFile); } + @Override + public TPipeTransferReq compressIfNeeded(final TPipeTransferReq req) throws IOException { + if (Objects.isNull(compressionTimer) && Objects.nonNull(attributeSortedString)) { + compressionTimer = + PipeDataRegionSinkMetrics.getInstance().getCompressionTimer(attributeSortedString); + } + return super.compressIfNeeded(req); + } + + @Override + public synchronized void discardEventsOfPipe(final String pipeNameToDrop, final int regionId) { + if (Objects.nonNull(tabletBatchBuilder)) { + tabletBatchBuilder.discardEventsOfPipe(pipeNameToDrop, regionId); + } + } + + public int getBatchSize() { + return Objects.nonNull(tabletBatchBuilder) ? tabletBatchBuilder.size() : 0; + } + @Override public void close() { if (tabletBatchBuilder != null) { @@ -500,4 +544,43 @@ public void close() { super.close(); } + + public IoTDBDataNodeSyncClientManager getClientManager() { + return clientManager; + } + + @Override + public void setTabletBatchSizeHistogram(Histogram tabletBatchSizeHistogram) { + if (tabletBatchBuilder != null) { + tabletBatchBuilder.setTabletBatchSizeHistogram(tabletBatchSizeHistogram); + } + } + + @Override + public void setTsFileBatchSizeHistogram(Histogram tsFileBatchSizeHistogram) { + if (tabletBatchBuilder != null) { + tabletBatchBuilder.setTsFileBatchSizeHistogram(tsFileBatchSizeHistogram); + } + } + + @Override + public void setTabletBatchTimeIntervalHistogram(Histogram tabletBatchTimeIntervalHistogram) { + if (tabletBatchBuilder != null) { + tabletBatchBuilder.setTabletBatchTimeIntervalHistogram(tabletBatchTimeIntervalHistogram); + } + } + + @Override + public void setTsFileBatchTimeIntervalHistogram(Histogram tsFileBatchTimeIntervalHistogram) { + if (tabletBatchBuilder != null) { + tabletBatchBuilder.setTsFileBatchTimeIntervalHistogram(tsFileBatchTimeIntervalHistogram); + } + } + + @Override + public void setBatchEventSizeHistogram(Histogram eventSizeHistogram) { + if (tabletBatchBuilder != null) { + tabletBatchBuilder.setEventSizeHistogram(eventSizeHistogram); + } + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/sync/IoTDBSchemaRegionConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/sync/IoTDBSchemaRegionSink.java similarity index 87% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/sync/IoTDBSchemaRegionConnector.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/sync/IoTDBSchemaRegionSink.java index 878430e027be2..94333e824d6c5 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/sync/IoTDBSchemaRegionConnector.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/sync/IoTDBSchemaRegionSink.java @@ -17,15 +17,15 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.protocol.thrift.sync; +package org.apache.iotdb.db.pipe.sink.protocol.thrift.sync; -import org.apache.iotdb.commons.pipe.connector.client.IoTDBSyncClient; -import org.apache.iotdb.commons.pipe.connector.payload.thrift.request.PipeTransferFilePieceReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferSchemaSnapshotPieceReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferSchemaSnapshotSealReq; +import org.apache.iotdb.commons.pipe.sink.client.IoTDBSyncClient; +import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferFilePieceReq; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionSnapshotEvent; import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferSchemaSnapshotPieceReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferSchemaSnapshotSealReq; import org.apache.iotdb.pipe.api.event.Event; import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; import org.apache.iotdb.pipe.api.event.dml.insertion.TsFileInsertionEvent; @@ -44,9 +44,9 @@ import java.util.Collections; import java.util.Objects; -public class IoTDBSchemaRegionConnector extends IoTDBDataNodeSyncConnector { +public class IoTDBSchemaRegionSink extends IoTDBDataNodeSyncSink { - private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBSchemaRegionConnector.class); + private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBSchemaRegionSink.class); @Override public void transfer(final TabletInsertionEvent tabletInsertionEvent) throws Exception { @@ -74,16 +74,16 @@ public void transfer(final Event event) throws Exception { private void doTransferWrapper(final PipeSchemaRegionSnapshotEvent pipeSchemaRegionSnapshotEvent) throws PipeException, IOException { + // We increase the reference count for this event to determine if the event may be released. + if (!pipeSchemaRegionSnapshotEvent.increaseReferenceCount( + IoTDBSchemaRegionSink.class.getName())) { + return; + } try { - // We increase the reference count for this event to determine if the event may be released. - if (!pipeSchemaRegionSnapshotEvent.increaseReferenceCount( - IoTDBSchemaRegionConnector.class.getName())) { - return; - } doTransfer(pipeSchemaRegionSnapshotEvent); } finally { pipeSchemaRegionSnapshotEvent.decreaseReferenceCount( - IoTDBSchemaRegionConnector.class.getName(), false); + IoTDBSchemaRegionSink.class.getName(), false); } } @@ -163,4 +163,9 @@ protected PipeTransferFilePieceReq getTransferMultiFilePieceReq( final String fileName, final long position, final byte[] payLoad) throws IOException { return PipeTransferSchemaSnapshotPieceReq.toTPipeTransferReq(fileName, position, payLoad); } + + @Override + protected void mayLimitRateAndRecordIO(final long requiredBytes) { + // Do nothing + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/websocket/WebSocketConnectorServer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/websocket/WebSocketConnectorServer.java similarity index 96% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/websocket/WebSocketConnectorServer.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/websocket/WebSocketConnectorServer.java index 2db12e0a10eba..940d6ff63dbe7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/websocket/WebSocketConnectorServer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/websocket/WebSocketConnectorServer.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.protocol.websocket; +package org.apache.iotdb.db.pipe.sink.protocol.websocket; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent; @@ -73,14 +73,14 @@ public static synchronized WebSocketConnectorServer getOrCreateInstance(int port return instance.get(); } - public synchronized void register(WebSocketConnector connector) { + public synchronized void register(WebSocketSink connector) { eventsWaitingForTransfer.putIfAbsent( connector.getPipeName(), new PriorityBlockingQueue<>(11, Comparator.comparing(o -> o.eventId))); eventsWaitingForAck.putIfAbsent(connector.getPipeName(), new ConcurrentHashMap<>()); } - public synchronized void unregister(WebSocketConnector connector) { + public synchronized void unregister(WebSocketSink connector) { final String pipeName = connector.getPipeName(); // close invoked in validation stage if (pipeName == null) { @@ -291,7 +291,7 @@ public void onError(WebSocket webSocket, Exception e) { } } - public void addEvent(Event event, WebSocketConnector connector) { + public void addEvent(Event event, WebSocketSink connector) { final PriorityBlockingQueue queue = eventsWaitingForTransfer.get(connector.getPipeName()); @@ -363,7 +363,7 @@ public void run() { private void transfer(String pipeName, EventWaitingForTransfer element) { final Long eventId = element.eventId; final Event event = element.event; - final WebSocketConnector connector = element.connector; + final WebSocketSink connector = element.connector; try { ByteBuffer tabletBuffer; @@ -434,10 +434,10 @@ private boolean sleepIfNecessary() { private static class EventWaitingForTransfer { private final Long eventId; - private final WebSocketConnector connector; + private final WebSocketSink connector; private final Event event; - public EventWaitingForTransfer(Long eventId, WebSocketConnector connector, Event event) { + public EventWaitingForTransfer(Long eventId, WebSocketSink connector, Event event) { this.eventId = eventId; this.connector = connector; this.event = event; @@ -446,10 +446,10 @@ public EventWaitingForTransfer(Long eventId, WebSocketConnector connector, Event private static class EventWaitingForAck { - private final WebSocketConnector connector; + private final WebSocketSink connector; private final Event event; - public EventWaitingForAck(WebSocketConnector connector, Event event) { + public EventWaitingForAck(WebSocketSink connector, Event event) { this.connector = connector; this.event = event; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/websocket/WebSocketConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/websocket/WebSocketSink.java similarity index 82% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/websocket/WebSocketConnector.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/websocket/WebSocketSink.java index e1e5a5dd44c85..c89486bc2c81d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/websocket/WebSocketConnector.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/websocket/WebSocketSink.java @@ -17,9 +17,9 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.protocol.websocket; +package org.apache.iotdb.db.pipe.sink.protocol.websocket; -import org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant; +import org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent; @@ -39,9 +39,9 @@ import java.util.Arrays; import java.util.Optional; -public class WebSocketConnector implements PipeConnector { +public class WebSocketSink implements PipeConnector { - private static final Logger LOGGER = LoggerFactory.getLogger(WebSocketConnector.class); + private static final Logger LOGGER = LoggerFactory.getLogger(WebSocketSink.class); private Integer port; private WebSocketConnectorServer server; @@ -54,9 +54,9 @@ public void validate(PipeParameterValidator validator) throws Exception { port = parameters.getIntOrDefault( Arrays.asList( - PipeConnectorConstant.CONNECTOR_WEBSOCKET_PORT_KEY, - PipeConnectorConstant.SINK_WEBSOCKET_PORT_KEY), - PipeConnectorConstant.CONNECTOR_WEBSOCKET_PORT_DEFAULT_VALUE); + PipeSinkConstant.CONNECTOR_WEBSOCKET_PORT_KEY, + PipeSinkConstant.SINK_WEBSOCKET_PORT_KEY), + PipeSinkConstant.CONNECTOR_WEBSOCKET_PORT_DEFAULT_VALUE); server = WebSocketConnectorServer.getOrCreateInstance(port); if (server.getPort() != port) { @@ -118,8 +118,13 @@ public void transfer(final TabletInsertionEvent tabletInsertionEvent) { return; } - ((EnrichedEvent) tabletInsertionEvent) - .increaseReferenceCount(WebSocketConnector.class.getName()); + if (!((EnrichedEvent) tabletInsertionEvent) + .increaseReferenceCount(WebSocketSink.class.getName())) { + LOGGER.warn( + "WebsocketConnector failed to increase the reference count of the event. Ignore it. Current event: {}.", + tabletInsertionEvent); + return; + } server.addEvent(tabletInsertionEvent, this); } @@ -134,11 +139,14 @@ public void transfer(TsFileInsertionEvent tsFileInsertionEvent) throws Exception } try { - for (TabletInsertionEvent event : tsFileInsertionEvent.toTabletInsertionEvents()) { - // Skip report if any tablet events is added - ((PipeTsFileInsertionEvent) tsFileInsertionEvent).skipReportOnCommit(); - transfer(event); - } + ((PipeTsFileInsertionEvent) tsFileInsertionEvent) + .consumeTabletInsertionEventsWithRetry( + event -> { + // Skip report if any tablet events is added + ((PipeTsFileInsertionEvent) tsFileInsertionEvent).skipReportOnCommit(); + transfer(event); + }, + "WebSocketConnector::transfer"); } finally { tsFileInsertionEvent.close(); } @@ -158,7 +166,7 @@ public void close() throws Exception { public void commit(EnrichedEvent enrichedEvent) { Optional.ofNullable(enrichedEvent) - .ifPresent(event -> event.decreaseReferenceCount(WebSocketConnector.class.getName(), true)); + .ifPresent(event -> event.decreaseReferenceCount(WebSocketSink.class.getName(), true)); } public String getPipeName() { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/writeback/WriteBackConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/writeback/WriteBackSink.java similarity index 86% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/writeback/WriteBackConnector.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/writeback/WriteBackSink.java index 68789bd3ef564..7aa54f1120d76 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/writeback/WriteBackConnector.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/writeback/WriteBackSink.java @@ -17,19 +17,19 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.protocol.writeback; +package org.apache.iotdb.db.pipe.sink.protocol.writeback; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.db.auth.AuthorityChecker; import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTabletBinaryReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTabletInsertNodeReq; -import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTabletRawReq; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.terminate.PipeTerminateEvent; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTabletBinaryReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTabletInsertNodeReq; +import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferTabletRawReq; import org.apache.iotdb.db.protocol.session.SessionManager; import org.apache.iotdb.db.queryengine.common.SessionInfo; import org.apache.iotdb.db.queryengine.plan.Coordinator; @@ -55,9 +55,9 @@ import java.time.ZoneId; import java.util.Objects; -public class WriteBackConnector implements PipeConnector { +public class WriteBackSink implements PipeConnector { - private static final Logger LOGGER = LoggerFactory.getLogger(WriteBackConnector.class); + private static final Logger LOGGER = LoggerFactory.getLogger(WriteBackSink.class); @Override public void validate(final PipeParameterValidator validator) throws Exception { @@ -111,16 +111,15 @@ public void transfer(final Event event) throws Exception { private void doTransferWrapper( final PipeInsertNodeTabletInsertionEvent pipeInsertNodeTabletInsertionEvent) throws PipeException, WALPipeException { + // We increase the reference count for this event to determine if the event may be released. + if (!pipeInsertNodeTabletInsertionEvent.increaseReferenceCount(WriteBackSink.class.getName())) { + return; + } try { - // We increase the reference count for this event to determine if the event may be released. - if (!pipeInsertNodeTabletInsertionEvent.increaseReferenceCount( - WriteBackConnector.class.getName())) { - return; - } doTransfer(pipeInsertNodeTabletInsertionEvent); } finally { pipeInsertNodeTabletInsertionEvent.decreaseReferenceCount( - WriteBackConnector.class.getName(), false); + WriteBackSink.class.getName(), false); } } @@ -129,8 +128,7 @@ private void doTransfer( throws PipeException, WALPipeException { final TSStatus status; - final InsertNode insertNode = - pipeInsertNodeTabletInsertionEvent.getInsertNodeViaCacheIfPossible(); + final InsertNode insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNode(); if (Objects.isNull(insertNode)) { status = PipeDataNodeAgent.receiver() @@ -155,14 +153,14 @@ private void doTransfer( private void doTransferWrapper(final PipeRawTabletInsertionEvent pipeRawTabletInsertionEvent) throws PipeException { + // We increase the reference count for this event to determine if the event may be released. + if (!pipeRawTabletInsertionEvent.increaseReferenceCount(WriteBackSink.class.getName())) { + return; + } try { - // We increase the reference count for this event to determine if the event may be released. - if (!pipeRawTabletInsertionEvent.increaseReferenceCount(WriteBackConnector.class.getName())) { - return; - } doTransfer(pipeRawTabletInsertionEvent); } finally { - pipeRawTabletInsertionEvent.decreaseReferenceCount(WriteBackConnector.class.getName(), false); + pipeRawTabletInsertionEvent.decreaseReferenceCount(WriteBackSink.class.getName(), false); } } @@ -193,7 +191,8 @@ private TSStatus executeStatement(final InsertBaseStatement statement) { "", ClusterPartitionFetcher.getInstance(), ClusterSchemaFetcher.getInstance(), - IoTDBDescriptor.getInstance().getConfig().getQueryTimeoutThreshold()) + IoTDBDescriptor.getInstance().getConfig().getQueryTimeoutThreshold(), + false) .status; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/util/LeaderCacheUtils.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/util/LeaderCacheUtils.java similarity index 98% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/util/LeaderCacheUtils.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/util/LeaderCacheUtils.java index e31fec9b57b65..c026fb9fc3760 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/util/LeaderCacheUtils.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/util/LeaderCacheUtils.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.pipe.connector.util; +package org.apache.iotdb.db.pipe.sink.util; import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.common.rpc.thrift.TSStatus; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/util/PipeTabletEventSorter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/util/PipeTabletEventSorter.java new file mode 100644 index 0000000000000..9c1dec9da331e --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/util/PipeTabletEventSorter.java @@ -0,0 +1,240 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.sink.util; + +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.utils.Binary; +import org.apache.tsfile.utils.BitMap; +import org.apache.tsfile.write.UnSupportedDataTypeException; +import org.apache.tsfile.write.record.Tablet; +import org.apache.tsfile.write.schema.IMeasurementSchema; + +import java.time.LocalDate; +import java.util.Arrays; +import java.util.Comparator; + +public class PipeTabletEventSorter { + + private final Tablet tablet; + + private boolean isSorted = true; + private boolean isDeDuplicated = true; + + private Integer[] index; + private int[] deDuplicatedIndex; + private int deDuplicatedSize; + + public PipeTabletEventSorter(final Tablet tablet) { + this.tablet = tablet; + deDuplicatedSize = tablet == null ? 0 : tablet.rowSize; + } + + public void deduplicateAndSortTimestampsIfNecessary() { + if (tablet == null || tablet.rowSize == 0) { + return; + } + + for (int i = 1, size = tablet.rowSize; i < size; ++i) { + final long currentTimestamp = tablet.timestamps[i]; + final long previousTimestamp = tablet.timestamps[i - 1]; + + if (currentTimestamp < previousTimestamp) { + isSorted = false; + } + if (currentTimestamp == previousTimestamp) { + isDeDuplicated = false; + } + + if (!isSorted && !isDeDuplicated) { + break; + } + } + + if (isSorted && isDeDuplicated) { + return; + } + + index = new Integer[tablet.rowSize]; + deDuplicatedIndex = new int[tablet.rowSize]; + for (int i = 0, size = tablet.rowSize; i < size; i++) { + index[i] = i; + } + + if (!isSorted) { + sortTimestamps(); + + // Do deduplicate anyway. + // isDeDuplicated may be false positive when isSorted is false. + deduplicateTimestamps(); + isDeDuplicated = true; + } + + if (!isDeDuplicated) { + deduplicateTimestamps(); + } + + sortAndMayDeduplicateValuesAndBitMaps(); + } + + private void sortTimestamps() { + // Index is sorted stably because it is Integer[] + Arrays.sort(index, Comparator.comparingLong(i -> tablet.timestamps[i])); + Arrays.sort(tablet.timestamps, 0, tablet.rowSize); + } + + private void deduplicateTimestamps() { + deDuplicatedSize = 0; + long[] timestamps = tablet.timestamps; + for (int i = 1, size = tablet.rowSize; i < size; i++) { + if (timestamps[i] != timestamps[i - 1]) { + deDuplicatedIndex[deDuplicatedSize] = i - 1; + timestamps[deDuplicatedSize] = timestamps[i - 1]; + + ++deDuplicatedSize; + } + } + + deDuplicatedIndex[deDuplicatedSize] = tablet.rowSize - 1; + timestamps[deDuplicatedSize] = timestamps[tablet.rowSize - 1]; + tablet.rowSize = ++deDuplicatedSize; + } + + // Input: + // Col: [1, null, 3, 6, null] + // Timestamp: [2, 1, 1, 1, 1] + // Intermediate: + // Index: [1, 2, 3, 4, 0] + // SortedTimestamp: [1, 2] + // DeduplicateIndex: [3, 4] + // Output: + // (Used index: [2(3), 4(0)]) + // Col: [6, 1] + private void sortAndMayDeduplicateValuesAndBitMaps() { + int columnIndex = 0; + for (int i = 0, size = tablet.getSchemas().size(); i < size; i++) { + final IMeasurementSchema schema = tablet.getSchemas().get(i); + if (schema != null) { + BitMap deDuplicatedBitMap = null; + BitMap originalBitMap = null; + if (tablet.bitMaps != null && tablet.bitMaps[columnIndex] != null) { + originalBitMap = tablet.bitMaps[columnIndex]; + deDuplicatedBitMap = new BitMap(originalBitMap.getSize()); + } + + tablet.values[columnIndex] = + reorderValueListAndBitMap( + tablet.values[columnIndex], schema.getType(), originalBitMap, deDuplicatedBitMap); + + if (tablet.bitMaps != null && tablet.bitMaps[columnIndex] != null) { + tablet.bitMaps[columnIndex] = deDuplicatedBitMap; + } + columnIndex++; + } + } + } + + private Object reorderValueListAndBitMap( + final Object valueList, + final TSDataType dataType, + final BitMap originalBitMap, + final BitMap deDuplicatedBitMap) { + switch (dataType) { + case BOOLEAN: + final boolean[] boolValues = (boolean[]) valueList; + final boolean[] deDuplicatedBoolValues = new boolean[boolValues.length]; + for (int i = 0; i < deDuplicatedSize; i++) { + deDuplicatedBoolValues[i] = + boolValues[getLastNonnullIndex(i, originalBitMap, deDuplicatedBitMap)]; + } + return deDuplicatedBoolValues; + case INT32: + final int[] intValues = (int[]) valueList; + final int[] deDuplicatedIntValues = new int[intValues.length]; + for (int i = 0; i < deDuplicatedSize; i++) { + deDuplicatedIntValues[i] = + intValues[getLastNonnullIndex(i, originalBitMap, deDuplicatedBitMap)]; + } + return deDuplicatedIntValues; + case DATE: + final LocalDate[] dateValues = (LocalDate[]) valueList; + final LocalDate[] deDuplicatedDateValues = new LocalDate[dateValues.length]; + for (int i = 0; i < deDuplicatedSize; i++) { + deDuplicatedDateValues[i] = + dateValues[getLastNonnullIndex(i, originalBitMap, deDuplicatedBitMap)]; + } + return deDuplicatedDateValues; + case INT64: + case TIMESTAMP: + final long[] longValues = (long[]) valueList; + final long[] deDuplicatedLongValues = new long[longValues.length]; + for (int i = 0; i < deDuplicatedSize; i++) { + deDuplicatedLongValues[i] = + longValues[getLastNonnullIndex(i, originalBitMap, deDuplicatedBitMap)]; + } + return deDuplicatedLongValues; + case FLOAT: + final float[] floatValues = (float[]) valueList; + final float[] deDuplicatedFloatValues = new float[floatValues.length]; + for (int i = 0; i < deDuplicatedSize; i++) { + deDuplicatedFloatValues[i] = + floatValues[getLastNonnullIndex(i, originalBitMap, deDuplicatedBitMap)]; + } + return deDuplicatedFloatValues; + case DOUBLE: + final double[] doubleValues = (double[]) valueList; + final double[] deDuplicatedDoubleValues = new double[doubleValues.length]; + for (int i = 0; i < deDuplicatedSize; i++) { + deDuplicatedDoubleValues[i] = + doubleValues[getLastNonnullIndex(i, originalBitMap, deDuplicatedBitMap)]; + } + return deDuplicatedDoubleValues; + case TEXT: + case BLOB: + case STRING: + final Binary[] binaryValues = (Binary[]) valueList; + final Binary[] deDuplicatedBinaryValues = new Binary[binaryValues.length]; + for (int i = 0; i < deDuplicatedSize; i++) { + deDuplicatedBinaryValues[i] = + binaryValues[getLastNonnullIndex(i, originalBitMap, deDuplicatedBitMap)]; + } + return deDuplicatedBinaryValues; + default: + throw new UnSupportedDataTypeException( + String.format("Data type %s is not supported.", dataType)); + } + } + + private int getLastNonnullIndex( + final int i, final BitMap originalBitMap, final BitMap deDuplicatedBitMap) { + if (originalBitMap == null) { + return index[deDuplicatedIndex[i]]; + } + int lastNonnullIndex = deDuplicatedIndex[i]; + final int lastIndex = i > 0 ? deDuplicatedIndex[i - 1] : -1; + while (originalBitMap.isMarked(index[lastNonnullIndex])) { + --lastNonnullIndex; + if (lastNonnullIndex == lastIndex) { + deDuplicatedBitMap.mark(i); + return index[lastNonnullIndex + 1]; + } + } + return index[lastNonnullIndex]; + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/DataRegionListeningFilter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/DataRegionListeningFilter.java similarity index 75% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/DataRegionListeningFilter.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/DataRegionListeningFilter.java index fa617334d6af6..49652ee67e188 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/DataRegionListeningFilter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/DataRegionListeningFilter.java @@ -17,11 +17,14 @@ * under the License. */ -package org.apache.iotdb.db.pipe.extractor.dataregion; +package org.apache.iotdb.db.pipe.source.dataregion; +import org.apache.iotdb.commons.consensus.DataRegionId; import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.pipe.task.PipeTask; +import org.apache.iotdb.commons.pipe.agent.task.PipeTask; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; +import org.apache.iotdb.db.storageengine.StorageEngine; import org.apache.iotdb.db.storageengine.dataregion.DataRegion; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; @@ -32,12 +35,12 @@ import java.util.Set; import java.util.stream.Collectors; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_EXCLUSION_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_EXCLUSION_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_INCLUSION_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_INCLUSION_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_EXCLUSION_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_INCLUSION_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_EXCLUSION_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_EXCLUSION_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_INCLUSION_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_INCLUSION_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_EXCLUSION_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_INCLUSION_KEY; import static org.apache.iotdb.commons.pipe.datastructure.options.PipeInclusionOptions.parseOptions; /** @@ -57,12 +60,24 @@ public class DataRegionListeningFilter { } } - public static boolean shouldDataRegionBeListened(PipeParameters parameters) - throws IllegalPathException { + public static boolean shouldDataRegionBeListened( + PipeParameters parameters, DataRegionId dataRegionId) throws IllegalPathException { final Pair insertionDeletionListeningOptionPair = parseInsertionDeletionListeningOptionPair(parameters); - return insertionDeletionListeningOptionPair.getLeft() - || insertionDeletionListeningOptionPair.getRight(); + final boolean hasSpecificListeningOption = + insertionDeletionListeningOptionPair.getLeft() + || insertionDeletionListeningOptionPair.getRight(); + if (!hasSpecificListeningOption) { + return false; + } + + final DataRegion dataRegion = StorageEngine.getInstance().getDataRegion(dataRegionId); + if (dataRegion == null) { + return true; + } + + return PipePattern.parsePipePatternFromSourceParameters(parameters) + .mayOverlapWithDb(dataRegion.getDatabaseName()); } public static Pair parseInsertionDeletionListeningOptionPair( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/DataRegionWatermarkInjector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/DataRegionWatermarkInjector.java similarity index 95% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/DataRegionWatermarkInjector.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/DataRegionWatermarkInjector.java index a8d95d4481037..625c6156a2b30 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/DataRegionWatermarkInjector.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/DataRegionWatermarkInjector.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.pipe.extractor.dataregion; +package org.apache.iotdb.db.pipe.source.dataregion; import org.apache.iotdb.db.pipe.event.common.watermark.PipeWatermarkEvent; @@ -48,10 +48,6 @@ public long getInjectionIntervalInMs() { return injectionIntervalInMs; } - public long getNextInjectionTime() { - return nextInjectionTime; - } - public PipeWatermarkEvent inject() { if (System.currentTimeMillis() < nextInjectionTime) { return null; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/IoTDBDataRegionExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/IoTDBDataRegionSource.java similarity index 74% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/IoTDBDataRegionExtractor.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/IoTDBDataRegionSource.java index 0b447ffe7a4fa..aebd39d320c5e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/IoTDBDataRegionExtractor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/IoTDBDataRegionSource.java @@ -17,27 +17,26 @@ * under the License. */ -package org.apache.iotdb.db.pipe.extractor.dataregion; +package org.apache.iotdb.db.pipe.source.dataregion; import org.apache.iotdb.commons.consensus.DataRegionId; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.pipe.extractor.IoTDBExtractor; -import org.apache.iotdb.commons.pipe.pattern.IoTDBPipePattern; -import org.apache.iotdb.commons.pipe.pattern.PipePattern; +import org.apache.iotdb.commons.pipe.datastructure.pattern.IoTDBPipePattern; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; +import org.apache.iotdb.commons.pipe.source.IoTDBSource; import org.apache.iotdb.consensus.ConsensusFactory; import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; -import org.apache.iotdb.db.pipe.extractor.dataregion.historical.PipeHistoricalDataRegionExtractor; -import org.apache.iotdb.db.pipe.extractor.dataregion.historical.PipeHistoricalDataRegionTsFileExtractor; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.PipeRealtimeDataRegionExtractor; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.PipeRealtimeDataRegionHeartbeatExtractor; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.PipeRealtimeDataRegionHybridExtractor; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.PipeRealtimeDataRegionLogExtractor; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.PipeRealtimeDataRegionTsFileExtractor; -import org.apache.iotdb.db.pipe.metric.PipeDataNodeRemainingEventAndTimeMetrics; -import org.apache.iotdb.db.pipe.metric.PipeDataRegionExtractorMetrics; +import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics; +import org.apache.iotdb.db.pipe.metric.overview.PipeTsFileToTabletsMetrics; +import org.apache.iotdb.db.pipe.metric.source.PipeDataRegionSourceMetrics; +import org.apache.iotdb.db.pipe.source.dataregion.historical.PipeHistoricalDataRegionSource; +import org.apache.iotdb.db.pipe.source.dataregion.historical.PipeHistoricalDataRegionTsFileSource; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.PipeRealtimeDataRegionHeartbeatSource; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.PipeRealtimeDataRegionHybridSource; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.PipeRealtimeDataRegionLogSource; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.PipeRealtimeDataRegionSource; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.PipeRealtimeDataRegionTsFileSource; import org.apache.iotdb.db.storageengine.StorageEngine; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALMode; import org.apache.iotdb.pipe.api.customizer.configuration.PipeExtractorRuntimeConfiguration; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; @@ -54,52 +53,51 @@ import java.util.Objects; import java.util.concurrent.atomic.AtomicReference; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_END_TIME_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_HISTORY_ENABLE_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_HISTORY_ENABLE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_HISTORY_END_TIME_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_HISTORY_START_TIME_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_MODE_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_MODE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_MODE_QUERY_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_MODE_SNAPSHOT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_PATTERN_FORMAT_IOTDB_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_PATTERN_FORMAT_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_PATTERN_FORMAT_PREFIX_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_REALTIME_ENABLE_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_REALTIME_ENABLE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_REALTIME_MODE_BATCH_MODE_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_REALTIME_MODE_FILE_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_REALTIME_MODE_FORCED_LOG_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_REALTIME_MODE_HYBRID_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_REALTIME_MODE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_REALTIME_MODE_LOG_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_REALTIME_MODE_STREAM_MODE_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_START_TIME_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_WATERMARK_INTERVAL_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_WATERMARK_INTERVAL_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_END_TIME_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_HISTORY_ENABLE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_HISTORY_END_TIME_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_HISTORY_START_TIME_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_MODE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_PATTERN_FORMAT_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_REALTIME_ENABLE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_REALTIME_MODE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_START_TIME_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_WATERMARK_INTERVAL_KEY; - -public class IoTDBDataRegionExtractor extends IoTDBExtractor { - - private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBDataRegionExtractor.class); - - private PipeHistoricalDataRegionExtractor historicalExtractor; - private PipeRealtimeDataRegionExtractor realtimeExtractor; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_END_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_HISTORY_ENABLE_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_HISTORY_ENABLE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_HISTORY_END_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_HISTORY_START_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_MODE_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_MODE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_MODE_QUERY_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_MODE_SNAPSHOT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_PATTERN_FORMAT_IOTDB_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_PATTERN_FORMAT_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_PATTERN_FORMAT_PREFIX_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_REALTIME_ENABLE_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_REALTIME_ENABLE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_REALTIME_MODE_BATCH_MODE_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_REALTIME_MODE_FILE_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_REALTIME_MODE_FORCED_LOG_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_REALTIME_MODE_HYBRID_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_REALTIME_MODE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_REALTIME_MODE_LOG_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_REALTIME_MODE_STREAM_MODE_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_START_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_WATERMARK_INTERVAL_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_WATERMARK_INTERVAL_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_END_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_HISTORY_ENABLE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_HISTORY_END_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_HISTORY_START_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_MODE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_PATTERN_FORMAT_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_REALTIME_ENABLE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_REALTIME_MODE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_START_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_WATERMARK_INTERVAL_KEY; + +public class IoTDBDataRegionSource extends IoTDBSource { + + private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBDataRegionSource.class); + + private PipeHistoricalDataRegionSource historicalExtractor; + private PipeRealtimeDataRegionSource realtimeExtractor; private DataRegionWatermarkInjector watermarkInjector; private boolean hasNoExtractionNeed = true; - private boolean shouldExtractInsertion = false; private boolean shouldExtractDeletion = false; @Override @@ -114,7 +112,6 @@ public void validate(final PipeParameterValidator validator) throws Exception { return; } hasNoExtractionNeed = false; - shouldExtractInsertion = insertionDeletionListeningOptionPair.getLeft(); shouldExtractDeletion = insertionDeletionListeningOptionPair.getRight(); if (insertionDeletionListeningOptionPair.getLeft().equals(true) @@ -200,11 +197,13 @@ public void validate(final PipeParameterValidator validator) throws Exception { SOURCE_HISTORY_ENABLE_KEY, SOURCE_REALTIME_ENABLE_KEY)) { LOGGER.warn( - "When {}, {}, {} or {} is specified, specifying {}, {}, {} and {} is invalid.", + "When {}, {}, {} or {} is specified, specifying {}, {}, {}, {}, {} and {} is invalid.", SOURCE_START_TIME_KEY, EXTRACTOR_START_TIME_KEY, SOURCE_END_TIME_KEY, EXTRACTOR_END_TIME_KEY, + SOURCE_HISTORY_ENABLE_KEY, + EXTRACTOR_HISTORY_ENABLE_KEY, SOURCE_HISTORY_START_TIME_KEY, EXTRACTOR_HISTORY_START_TIME_KEY, SOURCE_HISTORY_END_TIME_KEY, @@ -235,17 +234,15 @@ private void validatePattern(final PipePattern pattern) { } private void constructHistoricalExtractor() { - // Enable historical extractor by default - historicalExtractor = new PipeHistoricalDataRegionTsFileExtractor(); + historicalExtractor = new PipeHistoricalDataRegionTsFileSource(); } - private void constructRealtimeExtractor(final PipeParameters parameters) - throws IllegalPathException { + private void constructRealtimeExtractor(final PipeParameters parameters) { // Use heartbeat only extractor if disable realtime extractor if (!parameters.getBooleanOrDefault( Arrays.asList(EXTRACTOR_REALTIME_ENABLE_KEY, SOURCE_REALTIME_ENABLE_KEY), EXTRACTOR_REALTIME_ENABLE_DEFAULT_VALUE)) { - realtimeExtractor = new PipeRealtimeDataRegionHeartbeatExtractor(); + realtimeExtractor = new PipeRealtimeDataRegionHeartbeatSource(); LOGGER.info( "Pipe: '{}' is set to false, use heartbeat realtime extractor.", EXTRACTOR_REALTIME_ENABLE_KEY); @@ -258,7 +255,7 @@ private void constructRealtimeExtractor(final PipeParameters parameters) Arrays.asList(EXTRACTOR_MODE_KEY, SOURCE_MODE_KEY), EXTRACTOR_MODE_DEFAULT_VALUE); if (extractorModeValue.equalsIgnoreCase(EXTRACTOR_MODE_QUERY_VALUE) || extractorModeValue.equalsIgnoreCase(EXTRACTOR_MODE_SNAPSHOT_VALUE)) { - realtimeExtractor = new PipeRealtimeDataRegionHeartbeatExtractor(); + realtimeExtractor = new PipeRealtimeDataRegionHeartbeatSource(); LOGGER.info( "Pipe: '{}' is set to {}, use heartbeat realtime extractor.", EXTRACTOR_MODE_KEY, @@ -268,8 +265,7 @@ private void constructRealtimeExtractor(final PipeParameters parameters) // Use hybrid mode by default if (!parameters.hasAnyAttributes(EXTRACTOR_REALTIME_MODE_KEY, SOURCE_REALTIME_MODE_KEY)) { - checkWalEnable(parameters); - realtimeExtractor = new PipeRealtimeDataRegionHybridExtractor(); + realtimeExtractor = new PipeRealtimeDataRegionHybridSource(); LOGGER.info( "Pipe: '{}' is not set, use hybrid mode by default.", EXTRACTOR_REALTIME_MODE_KEY); return; @@ -278,21 +274,18 @@ private void constructRealtimeExtractor(final PipeParameters parameters) switch (parameters.getStringByKeys(EXTRACTOR_REALTIME_MODE_KEY, SOURCE_REALTIME_MODE_KEY)) { case EXTRACTOR_REALTIME_MODE_FILE_VALUE: case EXTRACTOR_REALTIME_MODE_BATCH_MODE_VALUE: - realtimeExtractor = new PipeRealtimeDataRegionTsFileExtractor(); + realtimeExtractor = new PipeRealtimeDataRegionTsFileSource(); break; case EXTRACTOR_REALTIME_MODE_HYBRID_VALUE: case EXTRACTOR_REALTIME_MODE_LOG_VALUE: case EXTRACTOR_REALTIME_MODE_STREAM_MODE_VALUE: - checkWalEnable(parameters); - realtimeExtractor = new PipeRealtimeDataRegionHybridExtractor(); + realtimeExtractor = new PipeRealtimeDataRegionHybridSource(); break; case EXTRACTOR_REALTIME_MODE_FORCED_LOG_VALUE: - checkWalEnable(parameters); - realtimeExtractor = new PipeRealtimeDataRegionLogExtractor(); + realtimeExtractor = new PipeRealtimeDataRegionLogSource(); break; default: - checkWalEnable(parameters); - realtimeExtractor = new PipeRealtimeDataRegionHybridExtractor(); + realtimeExtractor = new PipeRealtimeDataRegionHybridSource(); if (LOGGER.isWarnEnabled()) { LOGGER.warn( "Pipe: Unsupported extractor realtime mode: {}, create a hybrid extractor.", @@ -301,16 +294,6 @@ private void constructRealtimeExtractor(final PipeParameters parameters) } } - private void checkWalEnable(final PipeParameters parameters) throws IllegalPathException { - if (Boolean.TRUE.equals( - DataRegionListeningFilter.parseInsertionDeletionListeningOptionPair(parameters) - .getLeft()) - && IoTDBDescriptor.getInstance().getConfig().getWalMode().equals(WALMode.DISABLE)) { - throw new PipeException( - "The pipe cannot transfer realtime insertion if data region disables wal. Please set 'realtime.mode'='batch' in source parameters when enabling realtime transmission."); - } - } - @Override public void customize( final PipeParameters parameters, final PipeExtractorRuntimeConfiguration configuration) @@ -342,8 +325,9 @@ public void customize( } // register metric after generating taskID - PipeDataRegionExtractorMetrics.getInstance().register(this); - PipeDataNodeRemainingEventAndTimeMetrics.getInstance().register(this); + PipeDataRegionSourceMetrics.getInstance().register(this); + PipeTsFileToTabletsMetrics.getInstance().register(this); + PipeDataNodeSinglePipeMetrics.getInstance().register(this); } @Override @@ -374,8 +358,7 @@ public void start() throws Exception { dataRegionIdObject, (dataRegion -> { dataRegion.writeLock( - String.format( - "Pipe: starting %s", IoTDBDataRegionExtractor.class.getName())); + String.format("Pipe: starting %s", IoTDBDataRegionSource.class.getName())); try { startHistoricalExtractorAndRealtimeExtractor(exceptionHolder); } finally { @@ -449,11 +432,11 @@ public Event supply() throws Exception { if (Objects.nonNull(event)) { if (event instanceof TabletInsertionEvent) { - PipeDataRegionExtractorMetrics.getInstance().markTabletEvent(taskID); + PipeDataRegionSourceMetrics.getInstance().markTabletEvent(taskID); } else if (event instanceof TsFileInsertionEvent) { - PipeDataRegionExtractorMetrics.getInstance().markTsFileEvent(taskID); + PipeDataRegionSourceMetrics.getInstance().markTsFileEvent(taskID); } else if (event instanceof PipeHeartbeatEvent) { - PipeDataRegionExtractorMetrics.getInstance().markPipeHeartbeatEvent(taskID); + PipeDataRegionSourceMetrics.getInstance().markPipeHeartbeatEvent(taskID); } } @@ -469,29 +452,16 @@ public void close() throws Exception { historicalExtractor.close(); realtimeExtractor.close(); if (Objects.nonNull(taskID)) { - PipeDataRegionExtractorMetrics.getInstance().deregister(taskID); + PipeDataRegionSourceMetrics.getInstance().deregister(taskID); } } - //////////////////////////// APIs provided for detecting stuck //////////////////////////// - - public boolean shouldExtractInsertion() { - return shouldExtractInsertion; - } - - public boolean isStreamMode() { - return realtimeExtractor instanceof PipeRealtimeDataRegionHybridExtractor - || realtimeExtractor instanceof PipeRealtimeDataRegionLogExtractor; - } - - public boolean hasConsumedAllHistoricalTsFiles() { - return historicalExtractor.hasConsumedAll(); - } - //////////////////////////// APIs provided for metric framework //////////////////////////// public int getHistoricalTsFileInsertionEventCount() { - return hasBeenStarted.get() ? historicalExtractor.getPendingQueueSize() : 0; + return hasBeenStarted.get() && Objects.nonNull(historicalExtractor) + ? historicalExtractor.getPendingQueueSize() + : 0; } public int getTabletInsertionEventCount() { @@ -505,10 +475,4 @@ public int getRealtimeTsFileInsertionEventCount() { public int getPipeHeartbeatEventCount() { return hasBeenStarted.get() ? realtimeExtractor.getPipeHeartbeatEventCount() : 0; } - - public int getEventCount() { - return hasBeenStarted.get() - ? (historicalExtractor.getPendingQueueSize() + realtimeExtractor.getEventCount()) - : 0; - } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/historical/PipeHistoricalDataRegionExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/historical/PipeHistoricalDataRegionSource.java similarity index 86% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/historical/PipeHistoricalDataRegionExtractor.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/historical/PipeHistoricalDataRegionSource.java index 067c5143c783b..c3aba61336366 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/historical/PipeHistoricalDataRegionExtractor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/historical/PipeHistoricalDataRegionSource.java @@ -17,11 +17,11 @@ * under the License. */ -package org.apache.iotdb.db.pipe.extractor.dataregion.historical; +package org.apache.iotdb.db.pipe.source.dataregion.historical; import org.apache.iotdb.pipe.api.PipeExtractor; -public interface PipeHistoricalDataRegionExtractor extends PipeExtractor { +public interface PipeHistoricalDataRegionSource extends PipeExtractor { boolean hasConsumedAll(); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/historical/PipeHistoricalDataRegionTsFileExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/historical/PipeHistoricalDataRegionTsFileSource.java similarity index 62% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/historical/PipeHistoricalDataRegionTsFileExtractor.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/historical/PipeHistoricalDataRegionTsFileSource.java index ca1888b0200e5..e32789776479e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/historical/PipeHistoricalDataRegionTsFileExtractor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/historical/PipeHistoricalDataRegionTsFileSource.java @@ -17,28 +17,28 @@ * under the License. */ -package org.apache.iotdb.db.pipe.extractor.dataregion.historical; +package org.apache.iotdb.db.pipe.source.dataregion.historical; import org.apache.iotdb.commons.consensus.DataRegionId; import org.apache.iotdb.commons.consensus.index.ProgressIndex; import org.apache.iotdb.commons.consensus.index.impl.StateProgressIndex; import org.apache.iotdb.commons.consensus.index.impl.TimeWindowStateProgressIndex; import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStaticMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; import org.apache.iotdb.commons.pipe.config.constant.SystemConstant; -import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskExtractorRuntimeEnvironment; -import org.apache.iotdb.commons.pipe.pattern.PipePattern; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskSourceRuntimeEnvironment; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; +import org.apache.iotdb.commons.pipe.event.ProgressReportEvent; import org.apache.iotdb.db.pipe.event.common.terminate.PipeTerminateEvent; import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; -import org.apache.iotdb.db.pipe.extractor.dataregion.DataRegionListeningFilter; import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; -import org.apache.iotdb.db.pipe.resource.tsfile.PipeTsFileResourceManager; +import org.apache.iotdb.db.pipe.source.dataregion.DataRegionListeningFilter; import org.apache.iotdb.db.storageengine.StorageEngine; import org.apache.iotdb.db.storageengine.dataregion.DataRegion; +import org.apache.iotdb.db.storageengine.dataregion.memtable.TsFileProcessor; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileManager; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; -import org.apache.iotdb.db.storageengine.dataregion.tsfile.generator.TsFileNameGenerator; import org.apache.iotdb.db.utils.DateTimeUtils; import org.apache.iotdb.pipe.api.customizer.configuration.PipeExtractorRuntimeConfiguration; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator; @@ -57,42 +57,48 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Queue; import java.util.Set; import java.util.stream.Collectors; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_END_TIME_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_HISTORY_ENABLE_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_HISTORY_ENABLE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_HISTORY_END_TIME_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_HISTORY_LOOSE_RANGE_ALL_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_HISTORY_LOOSE_RANGE_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_HISTORY_LOOSE_RANGE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_HISTORY_LOOSE_RANGE_PATH_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_HISTORY_LOOSE_RANGE_TIME_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_HISTORY_START_TIME_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_MODS_ENABLE_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_MODS_ENABLE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_START_TIME_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_END_TIME_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_HISTORY_ENABLE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_HISTORY_END_TIME_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_HISTORY_LOOSE_RANGE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_HISTORY_START_TIME_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_MODS_ENABLE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_START_TIME_KEY; - -public class PipeHistoricalDataRegionTsFileExtractor implements PipeHistoricalDataRegionExtractor { +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_END_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_FORWARDING_PIPE_REQUESTS_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_FORWARDING_PIPE_REQUESTS_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_HISTORY_ENABLE_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_HISTORY_ENABLE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_HISTORY_END_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_HISTORY_LOOSE_RANGE_ALL_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_HISTORY_LOOSE_RANGE_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_HISTORY_LOOSE_RANGE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_HISTORY_LOOSE_RANGE_PATH_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_HISTORY_LOOSE_RANGE_TIME_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_HISTORY_START_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_MODE_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_MODE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_MODE_QUERY_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_MODE_SNAPSHOT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_MODS_ENABLE_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_MODS_ENABLE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_START_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_END_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_FORWARDING_PIPE_REQUESTS_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_HISTORY_ENABLE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_HISTORY_END_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_HISTORY_LOOSE_RANGE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_HISTORY_START_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_MODE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_MODS_ENABLE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_START_TIME_KEY; + +public class PipeHistoricalDataRegionTsFileSource implements PipeHistoricalDataRegionSource { private static final Logger LOGGER = - LoggerFactory.getLogger(PipeHistoricalDataRegionTsFileExtractor.class); - - private static final Map DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP = new HashMap<>(); - private static final long PIPE_MIN_FLUSH_INTERVAL_IN_MS = 2000; + LoggerFactory.getLogger(PipeHistoricalDataRegionTsFileSource.class); private String pipeName; private long creationTime; @@ -108,7 +114,6 @@ public class PipeHistoricalDataRegionTsFileExtractor implements PipeHistoricalDa private boolean isHistoricalExtractorEnabled = false; private long historicalDataExtractionStartTime = Long.MIN_VALUE; // Event time private long historicalDataExtractionEndTime = Long.MAX_VALUE; // Event time - private long historicalDataExtractionTimeLowerBound; // Arrival time private boolean sloppyTimeRange; // true to disable time range filter after extraction private boolean sloppyPattern; // true to disable pattern filter after extraction @@ -120,7 +125,12 @@ public class PipeHistoricalDataRegionTsFileExtractor implements PipeHistoricalDa private boolean shouldTerminatePipeOnAllHistoricalEventsConsumed; private boolean isTerminateSignalSent = false; + private boolean isForwardingPipeRequests; + + private volatile boolean hasBeenStarted = false; + private Queue pendingQueue; + private final Set filteredTsFileResources = new HashSet<>(); @Override public void validate(final PipeParameterValidator validator) { @@ -213,17 +223,14 @@ public void validate(final PipeParameterValidator validator) { try { historicalDataExtractionStartTime = - isHistoricalExtractorEnabled - && parameters.hasAnyAttributes( - EXTRACTOR_HISTORY_START_TIME_KEY, SOURCE_HISTORY_START_TIME_KEY) + parameters.hasAnyAttributes( + EXTRACTOR_HISTORY_START_TIME_KEY, SOURCE_HISTORY_START_TIME_KEY) ? DateTimeUtils.convertTimestampOrDatetimeStrToLongWithDefaultZone( parameters.getStringByKeys( EXTRACTOR_HISTORY_START_TIME_KEY, SOURCE_HISTORY_START_TIME_KEY)) : Long.MIN_VALUE; historicalDataExtractionEndTime = - isHistoricalExtractorEnabled - && parameters.hasAnyAttributes( - EXTRACTOR_HISTORY_END_TIME_KEY, SOURCE_HISTORY_END_TIME_KEY) + parameters.hasAnyAttributes(EXTRACTOR_HISTORY_END_TIME_KEY, SOURCE_HISTORY_END_TIME_KEY) ? DateTimeUtils.convertTimestampOrDatetimeStrToLongWithDefaultZone( parameters.getStringByKeys( EXTRACTOR_HISTORY_END_TIME_KEY, SOURCE_HISTORY_END_TIME_KEY)) @@ -255,8 +262,8 @@ public void customize( return; } - final PipeTaskExtractorRuntimeEnvironment environment = - (PipeTaskExtractorRuntimeEnvironment) configuration.getRuntimeEnvironment(); + final PipeTaskSourceRuntimeEnvironment environment = + (PipeTaskSourceRuntimeEnvironment) configuration.getRuntimeEnvironment(); pipeName = environment.getPipeName(); creationTime = environment.getCreationTime(); @@ -264,10 +271,6 @@ public void customize( startIndex = environment.getPipeTaskMeta().getProgressIndex(); dataRegionId = environment.getRegionId(); - synchronized (DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP) { - DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP.putIfAbsent(dataRegionId, 0L); - } - pipePattern = PipePattern.parsePipePatternFromSourceParameters(parameters); final DataRegion dataRegion = @@ -279,46 +282,6 @@ public void customize( } } - // Enable historical extractor by default - historicalDataExtractionTimeLowerBound = - isHistoricalExtractorEnabled - ? Long.MIN_VALUE - // We define the realtime data as the data generated after the creation time - // of the pipe from user's perspective. But we still need to use - // PipeHistoricalDataRegionExtractor to extract the realtime data generated between the - // creation time of the pipe and the time when the pipe starts, because those data - // can not be listened by PipeRealtimeDataRegionExtractor, and should be extracted by - // PipeHistoricalDataRegionExtractor from implementation perspective. - : environment.getCreationTime(); - - // Only invoke flushDataRegionAllTsFiles() when the pipe runs in the realtime only mode. - // realtime only mode -> (historicalDataExtractionTimeLowerBound != Long.MIN_VALUE) - // - // Ensure that all data in the data region is flushed to disk before extracting data. - // This ensures the generation time of all newly generated TsFiles (realtime data) after the - // invocation of flushDataRegionAllTsFiles() is later than the creationTime of the pipe - // (historicalDataExtractionTimeLowerBound). - // - // Note that: the generation time of the TsFile is the time when the TsFile is created, not - // the time when the data is flushed to the TsFile. - // - // Then we can use the generation time of the TsFile to determine whether the data in the - // TsFile should be extracted by comparing the generation time of the TsFile with the - // historicalDataExtractionTimeLowerBound when starting the pipe in realtime only mode. - // - // If we don't invoke flushDataRegionAllTsFiles() in the realtime only mode, the data generated - // between the creation time of the pipe the time when the pipe starts will be lost. - if (historicalDataExtractionTimeLowerBound != Long.MIN_VALUE) { - synchronized (DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP) { - final long lastFlushedByPipeTime = - DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP.get(dataRegionId); - if (System.currentTimeMillis() - lastFlushedByPipeTime >= PIPE_MIN_FLUSH_INTERVAL_IN_MS) { - flushDataRegionAllTsFiles(); - DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP.replace(dataRegionId, System.currentTimeMillis()); - } - } - } - shouldTransferModFile = parameters.getBooleanOrDefault( Arrays.asList(SOURCE_MODS_ENABLE_KEY, EXTRACTOR_MODS_ENABLE_KEY), @@ -328,17 +291,20 @@ public void customize( final String extractorModeValue = parameters.getStringOrDefault( - Arrays.asList( - PipeExtractorConstant.EXTRACTOR_MODE_KEY, PipeExtractorConstant.SOURCE_MODE_KEY), - PipeExtractorConstant.EXTRACTOR_MODE_DEFAULT_VALUE); + Arrays.asList(EXTRACTOR_MODE_KEY, SOURCE_MODE_KEY), EXTRACTOR_MODE_DEFAULT_VALUE); shouldTerminatePipeOnAllHistoricalEventsConsumed = - extractorModeValue.equalsIgnoreCase(PipeExtractorConstant.EXTRACTOR_MODE_QUERY_VALUE) - || extractorModeValue.equalsIgnoreCase( - PipeExtractorConstant.EXTRACTOR_MODE_SNAPSHOT_VALUE); + extractorModeValue.equalsIgnoreCase(EXTRACTOR_MODE_SNAPSHOT_VALUE) + || extractorModeValue.equalsIgnoreCase(EXTRACTOR_MODE_QUERY_VALUE); + + isForwardingPipeRequests = + parameters.getBooleanOrDefault( + Arrays.asList( + EXTRACTOR_FORWARDING_PIPE_REQUESTS_KEY, SOURCE_FORWARDING_PIPE_REQUESTS_KEY), + EXTRACTOR_FORWARDING_PIPE_REQUESTS_DEFAULT_VALUE); if (LOGGER.isInfoEnabled()) { LOGGER.info( - "Pipe {}@{}: historical data extraction time range, start time {}({}), end time {}({}), sloppy pattern {}, sloppy time range {}, should transfer mod file {}, should terminate pipe on all historical events consumed {}", + "Pipe {}@{}: historical data extraction time range, start time {}({}), end time {}({}), sloppy pattern {}, sloppy time range {}, should transfer mod file {}, is forwarding pipe requests: {}", pipeName, dataRegionId, DateTimeUtils.convertLongToDate(historicalDataExtractionStartTime), @@ -348,30 +314,24 @@ public void customize( sloppyPattern, sloppyTimeRange, shouldTransferModFile, - shouldTerminatePipeOnAllHistoricalEventsConsumed); - } - } - - private void flushDataRegionAllTsFiles() { - final DataRegion dataRegion = - StorageEngine.getInstance().getDataRegion(new DataRegionId(dataRegionId)); - if (Objects.isNull(dataRegion)) { - return; - } - - dataRegion.writeLock("Pipe: create historical TsFile extractor"); - try { - dataRegion.syncCloseAllWorkingTsFileProcessors(); - } finally { - dataRegion.writeUnlock(); + isForwardingPipeRequests); } } @Override public synchronized void start() { if (!shouldExtractInsertion) { + hasBeenStarted = true; return; } + if (!StorageEngine.getInstance().isReadyForNonReadWriteFunctions()) { + LOGGER.info( + "Pipe {}@{}: failed to start to extract historical TsFile, storage engine is not ready. Will retry later.", + pipeName, + dataRegionId); + return; + } + hasBeenStarted = true; final DataRegion dataRegion = StorageEngine.getInstance().getDataRegion(new DataRegionId(dataRegionId)); @@ -384,88 +344,111 @@ public synchronized void start() { final long startHistoricalExtractionTime = System.currentTimeMillis(); try { LOGGER.info("Pipe {}@{}: start to flush data region", pipeName, dataRegionId); - synchronized (DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP) { - final long lastFlushedByPipeTime = - DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP.get(dataRegionId); - if (System.currentTimeMillis() - lastFlushedByPipeTime >= PIPE_MIN_FLUSH_INTERVAL_IN_MS) { - dataRegion.syncCloseAllWorkingTsFileProcessors(); - DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP.replace(dataRegionId, System.currentTimeMillis()); - LOGGER.info( - "Pipe {}@{}: finish to flush data region, took {} ms", - pipeName, - dataRegionId, - System.currentTimeMillis() - startHistoricalExtractionTime); - } else { - LOGGER.info( - "Pipe {}@{}: skip to flush data region, last flushed time {} ms ago", - pipeName, - dataRegionId, - System.currentTimeMillis() - lastFlushedByPipeTime); - } + + // Consider the scenario: a consensus pipe comes to the same region, followed by another pipe + // **immediately**, the latter pipe will skip the flush operation. + // Since a large number of consensus pipes are not created at the same time, resulting in no + // serious waiting for locks. Therefore, the flush operation is always performed for the + // consensus pipe, and the lastFlushed timestamp is not updated here. + if (pipeName.startsWith(PipeStaticMeta.CONSENSUS_PIPE_PREFIX)) { + dataRegion.syncCloseAllWorkingTsFileProcessors(); + } else { + dataRegion.asyncCloseAllWorkingTsFileProcessors(); } + LOGGER.info( + "Pipe {}@{}: finish to flush data region, took {} ms", + pipeName, + dataRegionId, + System.currentTimeMillis() - startHistoricalExtractionTime); final TsFileManager tsFileManager = dataRegion.getTsFileManager(); tsFileManager.readLock(); try { final int originalSequenceTsFileCount = tsFileManager.size(true); - final int originalUnsequenceTsFileCount = tsFileManager.size(false); - final List resourceList = - new ArrayList<>(originalSequenceTsFileCount + originalUnsequenceTsFileCount); + final int originalUnSequenceTsFileCount = tsFileManager.size(false); + final List originalResourceList = + new ArrayList<>(originalSequenceTsFileCount + originalUnSequenceTsFileCount); LOGGER.info( "Pipe {}@{}: start to extract historical TsFile, original sequence file count {}, " - + "original unsequence file count {}, start progress index {}", + + "original unSequence file count {}, start progress index {}", pipeName, dataRegionId, originalSequenceTsFileCount, - originalUnsequenceTsFileCount, + originalUnSequenceTsFileCount, startIndex); final Collection sequenceTsFileResources = tsFileManager.getTsFileList(true).stream() + .peek(originalResourceList::add) .filter( resource -> - // Some resource may not be closed due to the control of - // PIPE_MIN_FLUSH_INTERVAL_IN_MS. We simply ignore them. - !resource.isClosed() - || mayTsFileContainUnprocessedData(resource) - && isTsFileResourceOverlappedWithTimeRange(resource) - && isTsFileGeneratedAfterExtractionTimeLowerBound(resource) - && mayTsFileResourceOverlappedWithPattern(resource)) + isHistoricalExtractorEnabled + && + // Some resource is marked as deleted but not removed from the list. + !resource.isDeleted() + // Some resource is generated by pipe. We ignore them if the pipe should + // not transfer pipe requests. + && (!resource.isGeneratedByPipe() || isForwardingPipeRequests) + && ( + // If the tsFile is not already marked closing, it is not captured by + // the pipe realtime module. Thus, we can wait for the realtime sync + // module to handle this, to avoid blocking the pipe sync process. + !resource.isClosed() + && Optional.ofNullable(resource.getProcessor()) + .map(TsFileProcessor::alreadyMarkedClosing) + .orElse(true) + || mayTsFileContainUnprocessedData(resource) + && isTsFileResourceOverlappedWithTimeRange(resource) + && mayTsFileResourceOverlappedWithPattern(resource))) .collect(Collectors.toList()); - resourceList.addAll(sequenceTsFileResources); + filteredTsFileResources.addAll(sequenceTsFileResources); - final Collection unsequenceTsFileResources = + final Collection unSequenceTsFileResources = tsFileManager.getTsFileList(false).stream() + .peek(originalResourceList::add) .filter( resource -> - // Some resource may not be closed due to the control of - // PIPE_MIN_FLUSH_INTERVAL_IN_MS. We simply ignore them. - !resource.isClosed() - || mayTsFileContainUnprocessedData(resource) - && isTsFileResourceOverlappedWithTimeRange(resource) - && isTsFileGeneratedAfterExtractionTimeLowerBound(resource) - && mayTsFileResourceOverlappedWithPattern(resource)) + isHistoricalExtractorEnabled + && + // Some resource is marked as deleted but not removed from the list. + !resource.isDeleted() + // Some resource is generated by pipe. We ignore them if the pipe should + // not transfer pipe requests. + && (!resource.isGeneratedByPipe() || isForwardingPipeRequests) + && ( + // If the tsFile is not already marked closing, it is not captured by + // the pipe realtime module. Thus, we can wait for the realtime sync + // module to handle this, to avoid blocking the pipe sync process. + !resource.isClosed() + && Optional.ofNullable(resource.getProcessor()) + .map(TsFileProcessor::alreadyMarkedClosing) + .orElse(true) + || mayTsFileContainUnprocessedData(resource) + && isTsFileResourceOverlappedWithTimeRange(resource) + && mayTsFileResourceOverlappedWithPattern(resource))) .collect(Collectors.toList()); - resourceList.addAll(unsequenceTsFileResources); + filteredTsFileResources.addAll(unSequenceTsFileResources); - resourceList.forEach( + filteredTsFileResources.removeIf( resource -> { // Pin the resource, in case the file is removed by compaction or anything. // Will unpin it after the PipeTsFileInsertionEvent is created and pinned. try { PipeDataNodeResourceManager.tsfile() - .pinTsFileResource(resource, shouldTransferModFile); + .pinTsFileResource(resource, shouldTransferModFile, pipeName); + return false; } catch (final IOException e) { - LOGGER.warn("Pipe: failed to pin TsFileResource {}", resource.getTsFilePath()); + LOGGER.warn("Pipe: failed to pin TsFileResource {}", resource.getTsFilePath(), e); + return true; } }); - resourceList.sort( + originalResourceList.sort( (o1, o2) -> startIndex instanceof TimeWindowStateProgressIndex ? Long.compare(o1.getFileStartTime(), o2.getFileStartTime()) : o1.getMaxProgressIndex().topologicalCompareTo(o2.getMaxProgressIndex())); - pendingQueue = new ArrayDeque<>(resourceList); + pendingQueue = new ArrayDeque<>(originalResourceList); LOGGER.info( "Pipe {}@{}: finish to extract historical TsFile, extracted sequence file count {}/{}, " @@ -474,10 +457,10 @@ && mayTsFileResourceOverlappedWithPattern(resource)) dataRegionId, sequenceTsFileResources.size(), originalSequenceTsFileCount, - unsequenceTsFileResources.size(), - originalUnsequenceTsFileCount, - resourceList.size(), - originalSequenceTsFileCount + originalUnsequenceTsFileCount, + unSequenceTsFileResources.size(), + originalUnSequenceTsFileCount, + filteredTsFileResources.size(), + originalSequenceTsFileCount + originalUnSequenceTsFileCount, System.currentTimeMillis() - startHistoricalExtractionTime); } finally { tsFileManager.readUnlock(); @@ -494,31 +477,29 @@ private boolean mayTsFileContainUnprocessedData(final TsFileResource resource) { } if (startIndex instanceof StateProgressIndex) { - // Some different tsFiles may share the same max progressIndex, thus tsFiles with an - // "equals" max progressIndex must be transmitted to avoid data loss - final ProgressIndex innerProgressIndex = - ((StateProgressIndex) startIndex).getInnerProgressIndex(); - return !innerProgressIndex.isAfter(resource.getMaxProgressIndexAfterClose()) - && !innerProgressIndex.equals(resource.getMaxProgressIndexAfterClose()); + startIndex = ((StateProgressIndex) startIndex).getInnerProgressIndex(); } - // Some different tsFiles may share the same max progressIndex, thus tsFiles with an - // "equals" max progressIndex must be transmitted to avoid data loss - return !startIndex.isAfter(resource.getMaxProgressIndexAfterClose()); - } - - private boolean mayTsFileResourceOverlappedWithPattern(final TsFileResource resource) { - if (!sloppyPattern) { + if (!startIndex.isAfter(resource.getMaxProgressIndex()) + && !startIndex.equals(resource.getMaxProgressIndex())) { + LOGGER.info( + "Pipe {}@{}: file {} meets mayTsFileContainUnprocessedData condition, extractor progressIndex: {}, resource ProgressIndex: {}", + pipeName, + dataRegionId, + resource.getTsFilePath(), + startIndex, + resource.getMaxProgressIndex()); return true; } + return false; + } + private boolean mayTsFileResourceOverlappedWithPattern(final TsFileResource resource) { final Set deviceSet; try { final Map deviceIsAlignedMap = PipeDataNodeResourceManager.tsfile() - .getDeviceIsAlignedMapFromCache( - PipeTsFileResourceManager.getHardlinkOrCopiedFileInPipeDir(resource.getTsFile()), - false); + .getDeviceIsAlignedMapFromCache(resource.getTsFile(), false); deviceSet = Objects.nonNull(deviceIsAlignedMap) ? deviceIsAlignedMap.keySet() : resource.getDevices(); } catch (final IOException e) { @@ -533,7 +514,6 @@ private boolean mayTsFileResourceOverlappedWithPattern(final TsFileResource reso return deviceSet.stream() .anyMatch( - // TODO: use IDeviceID deviceID -> pipePattern.mayOverlapWithDevice(((PlainDeviceID) deviceID).toStringID())); } @@ -547,47 +527,60 @@ private boolean isTsFileResourceCoveredByTimeRange(final TsFileResource resource && historicalDataExtractionEndTime >= resource.getFileEndTime(); } - private boolean isTsFileGeneratedAfterExtractionTimeLowerBound(final TsFileResource resource) { - try { - return historicalDataExtractionTimeLowerBound - <= TsFileNameGenerator.getTsFileName(resource.getTsFile().getName()).getTime(); - } catch (final IOException e) { - LOGGER.warn( - "Pipe {}@{}: failed to get the generation time of TsFile {}, extract it anyway" - + " (historical data extraction time lower bound: {})", - pipeName, - dataRegionId, - resource.getTsFilePath(), - historicalDataExtractionTimeLowerBound, - e); - // If failed to get the generation time of the TsFile, we will extract the data in the TsFile - // anyway. - return true; - } - } - @Override public synchronized Event supply() { + if (!hasBeenStarted && StorageEngine.getInstance().isReadyForNonReadWriteFunctions()) { + start(); + } + if (Objects.isNull(pendingQueue)) { return null; } final TsFileResource resource = pendingQueue.poll(); + if (resource == null) { - isTerminateSignalSent = true; final PipeTerminateEvent terminateEvent = - new PipeTerminateEvent(pipeName, creationTime, pipeTaskMeta, dataRegionId); - terminateEvent.increaseReferenceCount( - PipeHistoricalDataRegionTsFileExtractor.class.getName()); + new PipeTerminateEvent( + pipeName, + creationTime, + pipeTaskMeta, + dataRegionId, + shouldTerminatePipeOnAllHistoricalEventsConsumed); + if (!terminateEvent.increaseReferenceCount( + PipeHistoricalDataRegionTsFileSource.class.getName())) { + LOGGER.warn( + "Pipe {}@{}: failed to increase reference count for terminate event, will resend it", + pipeName, + dataRegionId); + return null; + } + isTerminateSignalSent = true; return terminateEvent; } + if (!filteredTsFileResources.contains(resource)) { + final ProgressReportEvent progressReportEvent = + new ProgressReportEvent(pipeName, creationTime, pipeTaskMeta); + progressReportEvent.bindProgressIndex(resource.getMaxProgressIndex()); + final boolean isReferenceCountIncreased = + progressReportEvent.increaseReferenceCount( + PipeHistoricalDataRegionTsFileSource.class.getName()); + if (!isReferenceCountIncreased) { + LOGGER.warn( + "The reference count of the event {} cannot be increased, skipping it.", + progressReportEvent); + } + return isReferenceCountIncreased ? progressReportEvent : null; + } + + filteredTsFileResources.remove(resource); final PipeTsFileInsertionEvent event = new PipeTsFileInsertionEvent( resource, + null, shouldTransferModFile, false, - false, true, pipeName, creationTime, @@ -603,27 +596,36 @@ public synchronized Event supply() { event.skipParsingTime(); } - event.increaseReferenceCount(PipeHistoricalDataRegionTsFileExtractor.class.getName()); try { - PipeDataNodeResourceManager.tsfile().unpinTsFileResource(resource); - } catch (final IOException e) { - LOGGER.warn( - "Pipe {}@{}: failed to unpin TsFileResource after creating event, original path: {}", - pipeName, - dataRegionId, - resource.getTsFilePath()); + final boolean isReferenceCountIncreased = + event.increaseReferenceCount(PipeHistoricalDataRegionTsFileSource.class.getName()); + if (!isReferenceCountIncreased) { + LOGGER.warn( + "Pipe {}@{}: failed to increase reference count for historical event {}, will discard it", + pipeName, + dataRegionId, + event); + } + return isReferenceCountIncreased ? event : null; + } finally { + try { + PipeDataNodeResourceManager.tsfile().unpinTsFileResource(resource, pipeName); + } catch (final IOException e) { + LOGGER.warn( + "Pipe {}@{}: failed to unpin TsFileResource after creating event, original path: {}", + pipeName, + dataRegionId, + resource.getTsFilePath()); + } } - - return event; } @Override public synchronized boolean hasConsumedAll() { // If the pendingQueue is null when the function is called, it implies that the extractor only // extracts deletion thus the historical event has nothing to consume. - return Objects.isNull(pendingQueue) - || pendingQueue.isEmpty() - && (!shouldTerminatePipeOnAllHistoricalEventsConsumed || isTerminateSignalSent); + return hasBeenStarted + && (Objects.isNull(pendingQueue) || pendingQueue.isEmpty() && isTerminateSignalSent); } @Override @@ -637,7 +639,7 @@ public synchronized void close() { pendingQueue.forEach( resource -> { try { - PipeDataNodeResourceManager.tsfile().unpinTsFileResource(resource); + PipeDataNodeResourceManager.tsfile().unpinTsFileResource(resource, pipeName); } catch (final IOException e) { LOGGER.warn( "Pipe {}@{}: failed to unpin TsFileResource after dropping pipe, original path: {}", diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionHeartbeatExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/PipeRealtimeDataRegionHeartbeatSource.java similarity index 89% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionHeartbeatExtractor.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/PipeRealtimeDataRegionHeartbeatSource.java index 1df62eecc9247..4aac0617f3f8b 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionHeartbeatExtractor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/PipeRealtimeDataRegionHeartbeatSource.java @@ -17,14 +17,14 @@ * under the License. */ -package org.apache.iotdb.db.pipe.extractor.dataregion.realtime; +package org.apache.iotdb.db.pipe.source.dataregion.realtime; import org.apache.iotdb.commons.pipe.event.ProgressReportEvent; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; import org.apache.iotdb.db.pipe.event.realtime.PipeRealtimeEvent; import org.apache.iotdb.pipe.api.event.Event; -public class PipeRealtimeDataRegionHeartbeatExtractor extends PipeRealtimeDataRegionExtractor { +public class PipeRealtimeDataRegionHeartbeatSource extends PipeRealtimeDataRegionSource { @Override public Event supply() { @@ -41,7 +41,7 @@ public Event supply() { } realtimeEvent.decreaseReferenceCount( - PipeRealtimeDataRegionHeartbeatExtractor.class.getName(), false); + PipeRealtimeDataRegionHeartbeatSource.class.getName(), false); if (suppliedEvent != null) { return suppliedEvent; @@ -59,7 +59,7 @@ protected void doExtract(final PipeRealtimeEvent event) { if (event.getEvent() instanceof PipeHeartbeatEvent) { extractHeartbeat(event); } else { - event.decreaseReferenceCount(PipeRealtimeDataRegionHeartbeatExtractor.class.getName(), false); + event.decreaseReferenceCount(PipeRealtimeDataRegionHeartbeatSource.class.getName(), false); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/PipeRealtimeDataRegionHybridSource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/PipeRealtimeDataRegionHybridSource.java new file mode 100644 index 0000000000000..081f365204704 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/PipeRealtimeDataRegionHybridSource.java @@ -0,0 +1,305 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.source.dataregion.realtime; + +import org.apache.iotdb.commons.exception.pipe.PipeRuntimeNonCriticalException; +import org.apache.iotdb.commons.pipe.event.ProgressReportEvent; +import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; +import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; +import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent; +import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; +import org.apache.iotdb.db.pipe.event.realtime.PipeRealtimeEvent; +import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeRemainingEventAndTimeOperator; +import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics; +import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryManager; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.assigner.PipeTsFileEpochProgressIndexKeeper; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.epoch.TsFileEpoch; +import org.apache.iotdb.pipe.api.event.Event; +import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; +import org.apache.iotdb.pipe.api.event.dml.insertion.TsFileInsertionEvent; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Optional; + +public class PipeRealtimeDataRegionHybridSource extends PipeRealtimeDataRegionSource { + + private static final Logger LOGGER = + LoggerFactory.getLogger(PipeRealtimeDataRegionHybridSource.class); + + @Override + protected void doExtract(final PipeRealtimeEvent event) { + final Event eventToExtract = event.getEvent(); + + if (eventToExtract instanceof TabletInsertionEvent) { + extractTabletInsertion(event); + } else if (eventToExtract instanceof TsFileInsertionEvent) { + extractTsFileInsertion(event); + } else if (eventToExtract instanceof PipeHeartbeatEvent) { + extractHeartbeat(event); + } else if (eventToExtract instanceof PipeSchemaRegionWritePlanEvent) { + extractDirectly(event); + } else { + throw new UnsupportedOperationException( + String.format( + "Unsupported event type %s for hybrid realtime extractor %s", + eventToExtract.getClass(), this)); + } + } + + @Override + public boolean isNeedListenToTsFile() { + return shouldExtractInsertion; + } + + @Override + public boolean isNeedListenToInsertNode() { + return shouldExtractInsertion; + } + + private void extractTabletInsertion(final PipeRealtimeEvent event) { + TsFileEpoch.State state; + + if (canNotUseTabletAnymore(event)) { + event.getTsFileEpoch().migrateState(this, curState -> TsFileEpoch.State.USING_TSFILE); + PipeTsFileEpochProgressIndexKeeper.getInstance() + .registerProgressIndex(dataRegionId, pipeName, event.getTsFileEpoch().getResource()); + } else { + event + .getTsFileEpoch() + .migrateState( + this, + curState -> { + switch (curState) { + case USING_BOTH: + case USING_TSFILE: + return TsFileEpoch.State.USING_BOTH; + case EMPTY: + case USING_TABLET: + default: + return TsFileEpoch.State.USING_TABLET; + } + }); + } + + state = event.getTsFileEpoch().getState(this); + switch (state) { + case USING_TSFILE: + // Ignore the tablet event. + event.decreaseReferenceCount(PipeRealtimeDataRegionHybridSource.class.getName(), false); + break; + case EMPTY: + case USING_TABLET: + case USING_BOTH: + // USING_BOTH indicates that there are discarded events previously. + // In this case, we need to delay the progress report to tsFile event, to avoid losing data. + if (state == TsFileEpoch.State.USING_BOTH) { + event.skipReportOnCommit(); + } + if (!pendingQueue.waitedOffer(event)) { + // This would not happen, but just in case. + // pendingQueue is unbounded, so it should never reach capacity. + final String errorMessage = + String.format( + "extractTabletInsertion: pending queue of PipeRealtimeDataRegionHybridExtractor %s " + + "has reached capacity, discard tablet event %s, current state %s", + this, event, event.getTsFileEpoch().getState(this)); + LOGGER.error(errorMessage); + PipeDataNodeAgent.runtime() + .report(pipeTaskMeta, new PipeRuntimeNonCriticalException(errorMessage)); + + // Ignore the tablet event. + event.decreaseReferenceCount(PipeRealtimeDataRegionHybridSource.class.getName(), false); + } + break; + default: + throw new UnsupportedOperationException( + String.format( + "Unsupported state %s for hybrid realtime extractor %s", + state, PipeRealtimeDataRegionHybridSource.class.getName())); + } + } + + private void extractTsFileInsertion(final PipeRealtimeEvent event) { + // Notice that, if the tsFile is partially extracted because the pipe is not opened before, the + // former data won't be extracted + event + .getTsFileEpoch() + .migrateState( + this, + state -> { + switch (state) { + case EMPTY: + return ((PipeTsFileInsertionEvent) event.getEvent()).isLoaded() + ? TsFileEpoch.State.USING_TSFILE + : TsFileEpoch.State.USING_TABLET; + case USING_TABLET: + return TsFileEpoch.State.USING_TABLET; + case USING_TSFILE: + return TsFileEpoch.State.USING_TSFILE; + case USING_BOTH: + default: + return canNotUseTabletAnymore(event) + ? TsFileEpoch.State.USING_TSFILE + : TsFileEpoch.State.USING_BOTH; + } + }); + + final TsFileEpoch.State state = event.getTsFileEpoch().getState(this); + switch (state) { + case USING_TABLET: + // If the state is USING_TABLET, discard the event + PipeTsFileEpochProgressIndexKeeper.getInstance() + .eliminateProgressIndex(dataRegionId, pipeName, event.getTsFileEpoch().getFilePath()); + event.decreaseReferenceCount(PipeRealtimeDataRegionHybridSource.class.getName(), false); + return; + case EMPTY: + case USING_TSFILE: + case USING_BOTH: + if (!pendingQueue.waitedOffer(event)) { + // This would not happen, but just in case. + // pendingQueue is unbounded, so it should never reach capacity. + final String errorMessage = + String.format( + "extractTsFileInsertion: pending queue of PipeRealtimeDataRegionHybridExtractor %s " + + "has reached capacity, discard TsFile event %s, current state %s", + this, event, event.getTsFileEpoch().getState(this)); + LOGGER.error(errorMessage); + PipeDataNodeAgent.runtime() + .report(pipeTaskMeta, new PipeRuntimeNonCriticalException(errorMessage)); + + // Ignore the tsfile event. + event.decreaseReferenceCount(PipeRealtimeDataRegionHybridSource.class.getName(), false); + } + break; + default: + throw new UnsupportedOperationException( + String.format( + "Unsupported state %s for hybrid realtime extractor %s", + state, PipeRealtimeDataRegionHybridSource.class.getName())); + } + } + + // If the insertNode's memory has reached the dangerous threshold, we should not extract any + // tablets. + private boolean canNotUseTabletAnymore(final PipeRealtimeEvent event) { + final long floatingMemoryUsageInByte = + PipeDataNodeAgent.task().getFloatingMemoryUsageInByte(pipeName); + final long pipeCount = PipeDataNodeAgent.task().getPipeCount(); + final long totalFloatingMemorySizeInBytes = + PipeMemoryManager.getTotalFloatingMemorySizeInBytes(); + final boolean mayInsertNodeMemoryReachDangerousThreshold = + floatingMemoryUsageInByte * pipeCount >= totalFloatingMemorySizeInBytes; + if (mayInsertNodeMemoryReachDangerousThreshold && event.mayExtractorUseTablets(this)) { + final PipeDataNodeRemainingEventAndTimeOperator operator = + PipeDataNodeSinglePipeMetrics.getInstance().remainingEventAndTimeOperatorMap.get(pipeID); + LOGGER.info( + "Pipe task {}@{} canNotUseTabletAnyMore for tsFile {}: The memory usage of the insert node {} has reached the dangerous threshold of single pipe {}, event count: {}", + pipeName, + dataRegionId, + event.getTsFileEpoch().getFilePath(), + floatingMemoryUsageInByte, + totalFloatingMemorySizeInBytes / pipeCount, + Optional.ofNullable(operator) + .map(PipeDataNodeRemainingEventAndTimeOperator::getInsertNodeEventCount) + .orElse(0)); + } + return mayInsertNodeMemoryReachDangerousThreshold; + } + + @Override + public Event supply() { + PipeRealtimeEvent realtimeEvent = (PipeRealtimeEvent) pendingQueue.directPoll(); + + while (realtimeEvent != null) { + final Event suppliedEvent; + + // Used to judge the type of the event, not directly for supplying. + final Event eventToSupply = realtimeEvent.getEvent(); + if (eventToSupply instanceof TabletInsertionEvent) { + suppliedEvent = supplyTabletInsertion(realtimeEvent); + } else if (eventToSupply instanceof TsFileInsertionEvent) { + suppliedEvent = supplyTsFileInsertion(realtimeEvent); + } else if (eventToSupply instanceof PipeHeartbeatEvent) { + suppliedEvent = supplyHeartbeat(realtimeEvent); + } else if (eventToSupply instanceof PipeSchemaRegionWritePlanEvent + || eventToSupply instanceof ProgressReportEvent) { + suppliedEvent = supplyDirectly(realtimeEvent); + } else { + throw new UnsupportedOperationException( + String.format( + "Unsupported event type %s for hybrid realtime extractor %s to supply.", + eventToSupply.getClass(), this)); + } + + realtimeEvent.decreaseReferenceCount( + PipeRealtimeDataRegionHybridSource.class.getName(), false); + + if (suppliedEvent != null) { + maySkipIndex4Event(realtimeEvent); + return suppliedEvent; + } + + realtimeEvent = (PipeRealtimeEvent) pendingQueue.directPoll(); + } + + // Means the pending queue is empty. + return null; + } + + private Event supplyTabletInsertion(final PipeRealtimeEvent event) { + if (event.increaseReferenceCount(PipeRealtimeDataRegionHybridSource.class.getName())) { + return event.getEvent(); + } else { + // If the event's reference count can not be increased, it means the data represented by + // this event is not reliable anymore. but the data represented by this event + // has been carried by the following tsfile event, so we can just discard this event. + event.getTsFileEpoch().migrateState(this, s -> TsFileEpoch.State.USING_BOTH); + LOGGER.warn( + "Discard tablet event {} because it is not reliable anymore. " + + "Change the state of TsFileEpoch to USING_BOTH.", + event); + return null; + } + } + + private Event supplyTsFileInsertion(final PipeRealtimeEvent event) { + if (event.increaseReferenceCount(PipeRealtimeDataRegionHybridSource.class.getName())) { + return event.getEvent(); + } else { + // If the event's reference count can not be increased, it means the data represented by + // this event is not reliable anymore. the data has been lost. we simply discard this + // event and report the exception to PipeRuntimeAgent. + final String errorMessage = + String.format( + "TsFile Event %s can not be supplied because " + + "the reference count can not be increased, " + + "the data represented by this event is lost", + event.getEvent()); + LOGGER.error(errorMessage); + PipeDataNodeAgent.runtime() + .report(pipeTaskMeta, new PipeRuntimeNonCriticalException(errorMessage)); + PipeTsFileEpochProgressIndexKeeper.getInstance() + .eliminateProgressIndex(dataRegionId, pipeName, event.getTsFileEpoch().getFilePath()); + return null; + } + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionLogExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/PipeRealtimeDataRegionLogSource.java similarity index 85% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionLogExtractor.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/PipeRealtimeDataRegionLogSource.java index 4b300355c80e4..ad40109d9fbe2 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionLogExtractor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/PipeRealtimeDataRegionLogSource.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.pipe.extractor.dataregion.realtime; +package org.apache.iotdb.db.pipe.source.dataregion.realtime; import org.apache.iotdb.commons.exception.pipe.PipeRuntimeNonCriticalException; import org.apache.iotdb.commons.pipe.event.ProgressReportEvent; @@ -26,7 +26,7 @@ import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent; import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; import org.apache.iotdb.db.pipe.event.realtime.PipeRealtimeEvent; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.epoch.TsFileEpoch; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.epoch.TsFileEpoch; import org.apache.iotdb.pipe.api.event.Event; import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; import org.apache.iotdb.pipe.api.event.dml.insertion.TsFileInsertionEvent; @@ -34,10 +34,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class PipeRealtimeDataRegionLogExtractor extends PipeRealtimeDataRegionExtractor { +public class PipeRealtimeDataRegionLogSource extends PipeRealtimeDataRegionSource { private static final Logger LOGGER = - LoggerFactory.getLogger(PipeRealtimeDataRegionLogExtractor.class); + LoggerFactory.getLogger(PipeRealtimeDataRegionLogSource.class); @Override protected void doExtract(PipeRealtimeEvent event) { @@ -75,20 +75,17 @@ private void extractTabletInsertion(PipeRealtimeEvent event) { .report(pipeTaskMeta, new PipeRuntimeNonCriticalException(errorMessage)); // ignore this event. - event.decreaseReferenceCount(PipeRealtimeDataRegionLogExtractor.class.getName(), false); + event.decreaseReferenceCount(PipeRealtimeDataRegionLogSource.class.getName(), false); } } private void extractTsFileInsertion(PipeRealtimeEvent event) { final PipeTsFileInsertionEvent tsFileInsertionEvent = (PipeTsFileInsertionEvent) event.getEvent(); - if (!(tsFileInsertionEvent.isLoaded() - // some insert nodes in the tsfile epoch are not captured by pipe - || tsFileInsertionEvent.getFileStartTime() - < event.getTsFileEpoch().getInsertNodeMinTime())) { + if (!(tsFileInsertionEvent.isLoaded())) { // All data in the tsfile epoch has been extracted in tablet mode, so we should // simply ignore this event. - event.decreaseReferenceCount(PipeRealtimeDataRegionLogExtractor.class.getName(), false); + event.decreaseReferenceCount(PipeRealtimeDataRegionLogSource.class.getName(), false); return; } @@ -107,7 +104,7 @@ private void extractTsFileInsertion(PipeRealtimeEvent event) { .report(pipeTaskMeta, new PipeRuntimeNonCriticalException(errorMessage)); // ignore this event. - event.decreaseReferenceCount(PipeRealtimeDataRegionLogExtractor.class.getName(), false); + event.decreaseReferenceCount(PipeRealtimeDataRegionLogSource.class.getName(), false); } } @@ -135,7 +132,7 @@ public Event supply() { || realtimeEvent.getEvent() instanceof ProgressReportEvent) { suppliedEvent = supplyDirectly(realtimeEvent); } else if (realtimeEvent.increaseReferenceCount( - PipeRealtimeDataRegionLogExtractor.class.getName())) { + PipeRealtimeDataRegionLogSource.class.getName())) { suppliedEvent = realtimeEvent.getEvent(); } else { // if the event's reference count can not be increased, it means the data represented by @@ -152,8 +149,7 @@ public Event supply() { .report(pipeTaskMeta, new PipeRuntimeNonCriticalException(errorMessage)); } - realtimeEvent.decreaseReferenceCount( - PipeRealtimeDataRegionLogExtractor.class.getName(), false); + realtimeEvent.decreaseReferenceCount(PipeRealtimeDataRegionLogSource.class.getName(), false); if (suppliedEvent != null) { return suppliedEvent; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/PipeRealtimeDataRegionSource.java similarity index 80% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionExtractor.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/PipeRealtimeDataRegionSource.java index 55faf2db59cae..4f30452fb8b23 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionExtractor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/PipeRealtimeDataRegionSource.java @@ -17,25 +17,29 @@ * under the License. */ -package org.apache.iotdb.db.pipe.extractor.dataregion.realtime; +package org.apache.iotdb.db.pipe.source.dataregion.realtime; import org.apache.iotdb.commons.consensus.DataRegionId; +import org.apache.iotdb.commons.consensus.index.ProgressIndex; import org.apache.iotdb.commons.exception.pipe.PipeRuntimeNonCriticalException; -import org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant; -import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskExtractorRuntimeEnvironment; +import org.apache.iotdb.commons.pipe.agent.task.connection.UnboundedBlockingPendingQueue; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant; +import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskSourceRuntimeEnvironment; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; import org.apache.iotdb.commons.pipe.event.ProgressReportEvent; -import org.apache.iotdb.commons.pipe.pattern.PipePattern; -import org.apache.iotdb.commons.pipe.task.connection.UnboundedBlockingPendingQueue; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; import org.apache.iotdb.commons.utils.TimePartitionUtils; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; +import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; +import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; import org.apache.iotdb.db.pipe.event.realtime.PipeRealtimeEvent; -import org.apache.iotdb.db.pipe.extractor.dataregion.DataRegionListeningFilter; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.listener.PipeInsertionDataNodeListener; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.listener.PipeTimePartitionListener; -import org.apache.iotdb.db.pipe.metric.PipeDataRegionEventCounter; +import org.apache.iotdb.db.pipe.metric.source.PipeDataRegionEventCounter; +import org.apache.iotdb.db.pipe.source.dataregion.DataRegionListeningFilter; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.assigner.PipeTsFileEpochProgressIndexKeeper; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.listener.PipeInsertionDataNodeListener; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.listener.PipeTimePartitionListener; import org.apache.iotdb.db.storageengine.StorageEngine; import org.apache.iotdb.db.storageengine.dataregion.DataRegion; import org.apache.iotdb.db.utils.DateTimeUtils; @@ -60,24 +64,23 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_END_TIME_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_MODS_ENABLE_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_MODS_ENABLE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_REALTIME_LOOSE_RANGE_ALL_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_REALTIME_LOOSE_RANGE_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_REALTIME_LOOSE_RANGE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_REALTIME_LOOSE_RANGE_PATH_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_REALTIME_LOOSE_RANGE_TIME_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_START_TIME_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_END_TIME_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_MODS_ENABLE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_REALTIME_LOOSE_RANGE_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_START_TIME_KEY; - -public abstract class PipeRealtimeDataRegionExtractor implements PipeExtractor { - - private static final Logger LOGGER = - LoggerFactory.getLogger(PipeRealtimeDataRegionExtractor.class); +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_END_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_MODS_ENABLE_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_MODS_ENABLE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_REALTIME_LOOSE_RANGE_ALL_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_REALTIME_LOOSE_RANGE_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_REALTIME_LOOSE_RANGE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_REALTIME_LOOSE_RANGE_PATH_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_REALTIME_LOOSE_RANGE_TIME_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_START_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_END_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_MODS_ENABLE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_REALTIME_LOOSE_RANGE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_START_TIME_KEY; + +public abstract class PipeRealtimeDataRegionSource implements PipeExtractor { + + private static final Logger LOGGER = LoggerFactory.getLogger(PipeRealtimeDataRegionSource.class); protected String pipeName; protected long creationTime; @@ -117,9 +120,10 @@ public abstract class PipeRealtimeDataRegionExtractor implements PipeExtractor { protected final AtomicBoolean isClosed = new AtomicBoolean(false); + protected String pipeID; private String taskID; - protected PipeRealtimeDataRegionExtractor() { + protected PipeRealtimeDataRegionSource() { // Do nothing } @@ -187,8 +191,8 @@ public void validate(final PipeParameterValidator validator) throws Exception { public void customize( final PipeParameters parameters, final PipeExtractorRuntimeConfiguration configuration) throws Exception { - final PipeTaskExtractorRuntimeEnvironment environment = - (PipeTaskExtractorRuntimeEnvironment) configuration.getRuntimeEnvironment(); + final PipeTaskSourceRuntimeEnvironment environment = + (PipeTaskSourceRuntimeEnvironment) configuration.getRuntimeEnvironment(); final Pair insertionDeletionListeningOptionPair = DataRegionListeningFilter.parseInsertionDeletionListeningOptionPair(parameters); @@ -204,6 +208,7 @@ public void customize( // holding a reference to IoTDBDataRegionExtractor, the taskID should be constructed to // match that of IoTDBDataRegionExtractor. creationTime = environment.getCreationTime(); + pipeID = pipeName + "_" + creationTime; taskID = pipeName + "_" + dataRegionId + "_" + creationTime; pipePattern = PipePattern.parsePipePatternFromSourceParameters(parameters); @@ -229,9 +234,9 @@ public void customize( isForwardingPipeRequests = parameters.getBooleanOrDefault( Arrays.asList( - PipeExtractorConstant.EXTRACTOR_FORWARDING_PIPE_REQUESTS_KEY, - PipeExtractorConstant.SOURCE_FORWARDING_PIPE_REQUESTS_KEY), - PipeExtractorConstant.EXTRACTOR_FORWARDING_PIPE_REQUESTS_DEFAULT_VALUE); + PipeSourceConstant.EXTRACTOR_FORWARDING_PIPE_REQUESTS_KEY, + PipeSourceConstant.SOURCE_FORWARDING_PIPE_REQUESTS_KEY), + PipeSourceConstant.EXTRACTOR_FORWARDING_PIPE_REQUESTS_DEFAULT_VALUE); shouldTransferModFile = parameters.getBooleanOrDefault( @@ -279,7 +284,7 @@ private void clearPendingQueue() { event -> { if (event instanceof EnrichedEvent) { ((EnrichedEvent) event) - .clearReferenceCount(PipeRealtimeDataRegionExtractor.class.getName()); + .clearReferenceCount(PipeRealtimeDataRegionSource.class.getName()); } }); } @@ -290,7 +295,7 @@ private void clearPendingQueue() { public final void extract(final PipeRealtimeEvent event) { // The progress report event shall be directly extracted if (event.getEvent() instanceof ProgressReportEvent) { - extractDirectly(event); + extractProgressReportEvent(event); return; } @@ -331,7 +336,7 @@ public final void extract(final PipeRealtimeEvent event) { doExtract(event); } else { - event.decreaseReferenceCount(PipeRealtimeDataRegionExtractor.class.getName(), false); + event.decreaseReferenceCount(PipeRealtimeDataRegionSource.class.getName(), false); } synchronized (isClosed) { @@ -355,7 +360,7 @@ protected void extractHeartbeat(final PipeRealtimeEvent event) { // If the last event in the pending queue is a heartbeat event, we should not extract any more // heartbeat events to avoid OOM when the pipe is stopped. // Besides, the printable event has higher priority to stay in queue to enable metrics report. - event.decreaseReferenceCount(PipeRealtimeDataRegionExtractor.class.getName(), false); + event.decreaseReferenceCount(PipeRealtimeDataRegionSource.class.getName(), false); return; } @@ -372,8 +377,20 @@ protected void extractHeartbeat(final PipeRealtimeEvent event) { // the correction of pipe progress. // Ignore this event. - event.decreaseReferenceCount(PipeRealtimeDataRegionExtractor.class.getName(), false); + event.decreaseReferenceCount(PipeRealtimeDataRegionSource.class.getName(), false); + } + } + + protected void extractProgressReportEvent(final PipeRealtimeEvent event) { + if (pendingQueue.peekLast() instanceof ProgressReportEvent) { + final ProgressReportEvent oldEvent = (ProgressReportEvent) pendingQueue.peekLast(); + oldEvent.bindProgressIndex( + oldEvent + .getProgressIndex() + .updateToMinimumEqualOrIsAfterProgressIndex(event.getProgressIndex())); + return; } + extractDirectly(event); } protected void extractDirectly(final PipeRealtimeEvent event) { @@ -389,12 +406,19 @@ protected void extractDirectly(final PipeRealtimeEvent event) { .report(pipeTaskMeta, new PipeRuntimeNonCriticalException(errorMessage)); // Ignore the event. - event.decreaseReferenceCount(PipeRealtimeDataRegionExtractor.class.getName(), false); + event.decreaseReferenceCount(PipeRealtimeDataRegionSource.class.getName(), false); + } + } + + protected void maySkipIndex4Event(final PipeRealtimeEvent event) { + if (event.getEvent() instanceof PipeTsFileInsertionEvent + || event.getEvent() instanceof PipeInsertNodeTabletInsertionEvent) { + maySkipProgressIndexForRealtimeEvent(event); } } protected Event supplyHeartbeat(final PipeRealtimeEvent event) { - if (event.increaseReferenceCount(PipeRealtimeDataRegionExtractor.class.getName())) { + if (event.increaseReferenceCount(PipeRealtimeDataRegionSource.class.getName())) { return event.getEvent(); } else { // This would not happen, but just in case. @@ -411,7 +435,7 @@ protected Event supplyHeartbeat(final PipeRealtimeEvent event) { } protected Event supplyDirectly(final PipeRealtimeEvent event) { - if (event.increaseReferenceCount(PipeRealtimeDataRegionExtractor.class.getName())) { + if (event.increaseReferenceCount(PipeRealtimeDataRegionSource.class.getName())) { return event.getEvent(); } else { // If the event's reference count can not be increased, it means the data represented by @@ -494,6 +518,30 @@ public final boolean isShouldTransferModFile() { return shouldTransferModFile; } + private void maySkipProgressIndexForRealtimeEvent(final PipeRealtimeEvent event) { + if (PipeTsFileEpochProgressIndexKeeper.getInstance() + .isProgressIndexAfterOrEquals( + dataRegionId, + pipeName, + event.getTsFileEpoch().getFilePath(), + getProgressIndex4RealtimeEvent(event))) { + event.skipReportOnCommit(); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug( + "Pipe {} on data region {} skip commit of event {} because it was flushed prematurely.", + pipeName, + dataRegionId, + event.coreReportMessage()); + } + } + } + + private ProgressIndex getProgressIndex4RealtimeEvent(final PipeRealtimeEvent event) { + return event.getEvent() instanceof PipeTsFileInsertionEvent + ? ((PipeTsFileInsertionEvent) event.getEvent()).forceGetProgressIndex() + : event.getProgressIndex(); + } + @Override public String toString() { return "PipeRealtimeDataRegionExtractor{" diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionTsFileExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/PipeRealtimeDataRegionTsFileSource.java similarity index 79% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionTsFileExtractor.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/PipeRealtimeDataRegionTsFileSource.java index 8072499b3daff..a4473bcfb27a0 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionTsFileExtractor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/PipeRealtimeDataRegionTsFileSource.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.pipe.extractor.dataregion.realtime; +package org.apache.iotdb.db.pipe.source.dataregion.realtime; import org.apache.iotdb.commons.exception.pipe.PipeRuntimeNonCriticalException; import org.apache.iotdb.commons.pipe.event.ProgressReportEvent; @@ -25,17 +25,18 @@ import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent; import org.apache.iotdb.db.pipe.event.realtime.PipeRealtimeEvent; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.epoch.TsFileEpoch; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.assigner.PipeTsFileEpochProgressIndexKeeper; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.epoch.TsFileEpoch; import org.apache.iotdb.pipe.api.event.Event; import org.apache.iotdb.pipe.api.event.dml.insertion.TsFileInsertionEvent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class PipeRealtimeDataRegionTsFileExtractor extends PipeRealtimeDataRegionExtractor { +public class PipeRealtimeDataRegionTsFileSource extends PipeRealtimeDataRegionSource { private static final Logger LOGGER = - LoggerFactory.getLogger(PipeRealtimeDataRegionTsFileExtractor.class); + LoggerFactory.getLogger(PipeRealtimeDataRegionTsFileSource.class); @Override protected void doExtract(PipeRealtimeEvent event) { @@ -50,9 +51,11 @@ protected void doExtract(PipeRealtimeEvent event) { } event.getTsFileEpoch().migrateState(this, state -> TsFileEpoch.State.USING_TSFILE); + PipeTsFileEpochProgressIndexKeeper.getInstance() + .registerProgressIndex(dataRegionId, pipeName, event.getTsFileEpoch().getResource()); if (!(event.getEvent() instanceof TsFileInsertionEvent)) { - event.decreaseReferenceCount(PipeRealtimeDataRegionTsFileExtractor.class.getName(), false); + event.decreaseReferenceCount(PipeRealtimeDataRegionTsFileSource.class.getName(), false); return; } @@ -69,7 +72,7 @@ protected void doExtract(PipeRealtimeEvent event) { .report(pipeTaskMeta, new PipeRuntimeNonCriticalException(errorMessage)); // Ignore the event. - event.decreaseReferenceCount(PipeRealtimeDataRegionTsFileExtractor.class.getName(), false); + event.decreaseReferenceCount(PipeRealtimeDataRegionTsFileSource.class.getName(), false); } } @@ -96,7 +99,7 @@ public Event supply() { || realtimeEvent.getEvent() instanceof ProgressReportEvent) { suppliedEvent = supplyDirectly(realtimeEvent); } else if (realtimeEvent.increaseReferenceCount( - PipeRealtimeDataRegionTsFileExtractor.class.getName())) { + PipeRealtimeDataRegionTsFileSource.class.getName())) { suppliedEvent = realtimeEvent.getEvent(); } else { // if the event's reference count can not be increased, it means the data represented by @@ -111,12 +114,16 @@ public Event supply() { LOGGER.error(errorMessage); PipeDataNodeAgent.runtime() .report(pipeTaskMeta, new PipeRuntimeNonCriticalException(errorMessage)); + PipeTsFileEpochProgressIndexKeeper.getInstance() + .eliminateProgressIndex( + dataRegionId, pipeName, realtimeEvent.getTsFileEpoch().getFilePath()); } realtimeEvent.decreaseReferenceCount( - PipeRealtimeDataRegionTsFileExtractor.class.getName(), false); + PipeRealtimeDataRegionTsFileSource.class.getName(), false); if (suppliedEvent != null) { + maySkipIndex4Event(realtimeEvent); return suppliedEvent; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/assigner/DisruptorQueue.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/assigner/DisruptorQueue.java similarity index 74% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/assigner/DisruptorQueue.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/assigner/DisruptorQueue.java index 0a576142a3953..4c3daa4879a44 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/assigner/DisruptorQueue.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/assigner/DisruptorQueue.java @@ -17,15 +17,13 @@ * under the License. */ -package org.apache.iotdb.db.pipe.extractor.dataregion.realtime.assigner; +package org.apache.iotdb.db.pipe.source.dataregion.realtime.assigner; import org.apache.iotdb.commons.concurrent.IoTDBDaemonThreadFactory; import org.apache.iotdb.commons.pipe.config.PipeConfig; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.commons.pipe.metric.PipeEventCounter; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; import org.apache.iotdb.db.pipe.event.realtime.PipeRealtimeEvent; -import org.apache.iotdb.db.pipe.metric.PipeDataRegionEventCounter; import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlock; @@ -35,6 +33,8 @@ import com.lmax.disruptor.dsl.Disruptor; import com.lmax.disruptor.dsl.ProducerType; +import java.util.function.Consumer; + import static org.apache.iotdb.commons.concurrent.ThreadName.PIPE_EXTRACTOR_DISRUPTOR; public class DisruptorQueue { @@ -46,9 +46,11 @@ public class DisruptorQueue { private final Disruptor disruptor; private final RingBuffer ringBuffer; - private final PipeEventCounter eventCounter = new PipeDataRegionEventCounter(); + private volatile boolean isClosed = false; - public DisruptorQueue(final EventHandler eventHandler) { + public DisruptorQueue( + final EventHandler eventHandler, + final Consumer onAssignedHook) { final PipeConfig config = PipeConfig.getInstance(); final int ringBufferSize = config.getPipeExtractorAssignerDisruptorRingBufferSize(); final long ringBufferEntrySizeInBytes = @@ -71,9 +73,9 @@ public DisruptorQueue(final EventHandler eventHandler) { new BlockingWaitStrategy()); disruptor.handleEventsWith( (container, sequence, endOfBatch) -> { - eventHandler.onEvent(container.getEvent(), sequence, endOfBatch); - EnrichedEvent innerEvent = container.getEvent().getEvent(); - eventCounter.decreaseEventCount(innerEvent); + final PipeRealtimeEvent realtimeEvent = container.getEvent(); + eventHandler.onEvent(realtimeEvent, sequence, endOfBatch); + onAssignedHook.accept(realtimeEvent); }); disruptor.setDefaultExceptionHandler(new DisruptorQueueExceptionHandler()); @@ -81,19 +83,24 @@ public DisruptorQueue(final EventHandler eventHandler) { } public void publish(final PipeRealtimeEvent event) { - final EnrichedEvent internalEvent = event.getEvent(); - if (internalEvent instanceof PipeHeartbeatEvent) { - ((PipeHeartbeatEvent) internalEvent).recordDisruptorSize(ringBuffer); + final EnrichedEvent innerEvent = event.getEvent(); + if (innerEvent instanceof PipeHeartbeatEvent) { + ((PipeHeartbeatEvent) innerEvent).recordDisruptorSize(ringBuffer); } ringBuffer.publishEvent((container, sequence, o) -> container.setEvent(event), event); - eventCounter.increaseEventCount(internalEvent); } - public void clear() { - disruptor.halt(); + public void shutdown() { + isClosed = true; + // use shutdown instead of halt to ensure all published events have been handled + disruptor.shutdown(); allocatedMemoryBlock.close(); } + public boolean isClosed() { + return isClosed; + } + private static class EventContainer { private PipeRealtimeEvent event; @@ -108,16 +115,4 @@ public void setEvent(final PipeRealtimeEvent event) { this.event = event; } } - - public int getTabletInsertionEventCount() { - return eventCounter.getTabletInsertionEventCount(); - } - - public int getTsFileInsertionEventCount() { - return eventCounter.getTsFileInsertionEventCount(); - } - - public int getPipeHeartbeatEventCount() { - return eventCounter.getPipeHeartbeatEventCount(); - } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/assigner/DisruptorQueueExceptionHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/assigner/DisruptorQueueExceptionHandler.java similarity index 95% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/assigner/DisruptorQueueExceptionHandler.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/assigner/DisruptorQueueExceptionHandler.java index d7b1beaf092c5..91ad0224fc538 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/assigner/DisruptorQueueExceptionHandler.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/assigner/DisruptorQueueExceptionHandler.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.pipe.extractor.dataregion.realtime.assigner; +package org.apache.iotdb.db.pipe.source.dataregion.realtime.assigner; import com.lmax.disruptor.ExceptionHandler; import org.slf4j.Logger; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/assigner/PipeDataRegionAssigner.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/assigner/PipeDataRegionAssigner.java new file mode 100644 index 0000000000000..6a4f877bf4705 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/assigner/PipeDataRegionAssigner.java @@ -0,0 +1,237 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.source.dataregion.realtime.assigner; + +import org.apache.iotdb.commons.pipe.event.EnrichedEvent; +import org.apache.iotdb.commons.pipe.event.ProgressReportEvent; +import org.apache.iotdb.commons.pipe.metric.PipeEventCounter; +import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; +import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; +import org.apache.iotdb.db.pipe.event.realtime.PipeRealtimeEvent; +import org.apache.iotdb.db.pipe.event.realtime.PipeRealtimeEventFactory; +import org.apache.iotdb.db.pipe.metric.source.PipeAssignerMetrics; +import org.apache.iotdb.db.pipe.metric.source.PipeDataRegionEventCounter; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.PipeRealtimeDataRegionSource; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.matcher.CachedSchemaPatternMatcher; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.matcher.PipeDataRegionMatcher; +import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; +import org.apache.iotdb.pipe.api.event.dml.insertion.TsFileInsertionEvent; + +import org.apache.tsfile.utils.Pair; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.util.Set; + +public class PipeDataRegionAssigner implements Closeable { + + private static final Logger LOGGER = LoggerFactory.getLogger(PipeDataRegionAssigner.class); + + /** + * The {@link PipeDataRegionMatcher} is used to match the event with the extractor based on the + * pattern. + */ + private final PipeDataRegionMatcher matcher; + + /** The {@link DisruptorQueue} is used to assign the event to the extractor. */ + private final DisruptorQueue disruptor; + + private final String dataRegionId; + + private final PipeEventCounter eventCounter = new PipeDataRegionEventCounter(); + + public String getDataRegionId() { + return dataRegionId; + } + + public PipeDataRegionAssigner(final String dataRegionId) { + this.matcher = new CachedSchemaPatternMatcher(); + this.disruptor = new DisruptorQueue(this::assignToExtractor, this::onAssignedHook); + this.dataRegionId = dataRegionId; + PipeAssignerMetrics.getInstance().register(this); + } + + public void publishToAssign(final PipeRealtimeEvent event) { + if (!event.increaseReferenceCount(PipeDataRegionAssigner.class.getName())) { + LOGGER.warn( + "The reference count of the realtime event {} cannot be increased, skipping it.", event); + return; + } + + final EnrichedEvent innerEvent = event.getEvent(); + eventCounter.increaseEventCount(innerEvent); + if (innerEvent instanceof PipeHeartbeatEvent) { + ((PipeHeartbeatEvent) innerEvent).onPublished(); + } + + // use synchronized here for completely preventing reference count leaks under extreme thread + // scheduling when closing + synchronized (this) { + if (!disruptor.isClosed()) { + disruptor.publish(event); + } else { + onAssignedHook(event); + } + } + } + + private void onAssignedHook(final PipeRealtimeEvent realtimeEvent) { + realtimeEvent.gcSchemaInfo(); + realtimeEvent.decreaseReferenceCount(PipeDataRegionAssigner.class.getName(), false); + + final EnrichedEvent innerEvent = realtimeEvent.getEvent(); + if (innerEvent instanceof PipeHeartbeatEvent) { + ((PipeHeartbeatEvent) innerEvent).onAssigned(); + } + + eventCounter.decreaseEventCount(innerEvent); + } + + private void assignToExtractor( + final PipeRealtimeEvent event, final long sequence, final boolean endOfBatch) { + if (disruptor.isClosed()) { + return; + } + + final Pair, Set> + matchedAndUnmatched = matcher.match(event); + + matchedAndUnmatched + .getLeft() + .forEach( + extractor -> { + if (disruptor.isClosed()) { + return; + } + + if (event.getEvent().isGeneratedByPipe() && !extractor.isForwardingPipeRequests()) { + final ProgressReportEvent reportEvent = + new ProgressReportEvent( + extractor.getPipeName(), + extractor.getCreationTime(), + extractor.getPipeTaskMeta()); + reportEvent.bindProgressIndex(event.getProgressIndex()); + if (!reportEvent.increaseReferenceCount(PipeDataRegionAssigner.class.getName())) { + LOGGER.warn( + "The reference count of the event {} cannot be increased, skipping it.", + reportEvent); + return; + } + extractor.extract(PipeRealtimeEventFactory.createRealtimeEvent(reportEvent)); + return; + } + + final PipeRealtimeEvent copiedEvent = + event.shallowCopySelfAndBindPipeTaskMetaForProgressReport( + extractor.getPipeName(), + extractor.getCreationTime(), + extractor.getPipeTaskMeta(), + extractor.getPipePattern(), + extractor.getRealtimeDataExtractionStartTime(), + extractor.getRealtimeDataExtractionEndTime()); + final EnrichedEvent innerEvent = copiedEvent.getEvent(); + if (innerEvent instanceof PipeTsFileInsertionEvent) { + final PipeTsFileInsertionEvent tsFileInsertionEvent = + (PipeTsFileInsertionEvent) innerEvent; + tsFileInsertionEvent.disableMod4NonTransferPipes( + extractor.isShouldTransferModFile()); + } + + if (!copiedEvent.increaseReferenceCount(PipeDataRegionAssigner.class.getName())) { + LOGGER.warn( + "The reference count of the event {} cannot be increased, skipping it.", + copiedEvent); + return; + } + extractor.extract(copiedEvent); + }); + + matchedAndUnmatched + .getRight() + .forEach( + extractor -> { + if (disruptor.isClosed()) { + return; + } + + final EnrichedEvent innerEvent = event.getEvent(); + if (innerEvent instanceof TabletInsertionEvent + || innerEvent instanceof TsFileInsertionEvent) { + final ProgressReportEvent reportEvent = + new ProgressReportEvent( + extractor.getPipeName(), + extractor.getCreationTime(), + extractor.getPipeTaskMeta()); + reportEvent.bindProgressIndex(event.getProgressIndex()); + if (!reportEvent.increaseReferenceCount(PipeDataRegionAssigner.class.getName())) { + LOGGER.warn( + "The reference count of the event {} cannot be increased, skipping it.", + reportEvent); + return; + } + extractor.extract(PipeRealtimeEventFactory.createRealtimeEvent(reportEvent)); + } + }); + } + + public void startAssignTo(final PipeRealtimeDataRegionSource extractor) { + matcher.register(extractor); + } + + public void stopAssignTo(final PipeRealtimeDataRegionSource extractor) { + matcher.deregister(extractor); + } + + public boolean notMoreExtractorNeededToBeAssigned() { + return matcher.getRegisterCount() == 0; + } + + /** + * Clear the matcher and disruptor. The method {@link PipeDataRegionAssigner#publishToAssign} + * should not be used after calling this method. + */ + @Override + // use synchronized here for completely preventing reference count leaks under extreme thread + // scheduling when closing + public synchronized void close() { + PipeAssignerMetrics.getInstance().deregister(dataRegionId); + + final long startTime = System.currentTimeMillis(); + disruptor.shutdown(); + matcher.clear(); + LOGGER.info( + "Pipe: Assigner on data region {} shutdown internal disruptor within {} ms", + dataRegionId, + System.currentTimeMillis() - startTime); + } + + public int getTabletInsertionEventCount() { + return eventCounter.getTabletInsertionEventCount(); + } + + public int getTsFileInsertionEventCount() { + return eventCounter.getTsFileInsertionEventCount(); + } + + public int getPipeHeartbeatEventCount() { + return eventCounter.getPipeHeartbeatEventCount(); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/assigner/PipeTsFileEpochProgressIndexKeeper.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/assigner/PipeTsFileEpochProgressIndexKeeper.java new file mode 100644 index 0000000000000..ff7d90c377d10 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/assigner/PipeTsFileEpochProgressIndexKeeper.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.source.dataregion.realtime.assigner; + +import org.apache.iotdb.commons.consensus.index.ProgressIndex; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; + +import javax.annotation.Nonnull; + +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; + +public class PipeTsFileEpochProgressIndexKeeper { + + // data region id -> pipeName -> tsFile path -> max progress index + private final Map>> progressIndexKeeper = + new ConcurrentHashMap<>(); + + public synchronized void registerProgressIndex( + final String dataRegionId, final String pipeName, final TsFileResource resource) { + progressIndexKeeper + .computeIfAbsent(dataRegionId, k -> new ConcurrentHashMap<>()) + .computeIfAbsent(pipeName, k -> new ConcurrentHashMap<>()) + .putIfAbsent(resource.getTsFilePath(), resource); + } + + public synchronized void eliminateProgressIndex( + final String dataRegionId, final @Nonnull String pipeName, final String filePath) { + progressIndexKeeper + .computeIfAbsent(dataRegionId, k -> new ConcurrentHashMap<>()) + .computeIfAbsent(pipeName, k -> new ConcurrentHashMap<>()) + .remove(filePath); + } + + public synchronized boolean isProgressIndexAfterOrEquals( + final String dataRegionId, + final String pipeName, + final String tsFilePath, + final ProgressIndex progressIndex) { + return progressIndexKeeper + .computeIfAbsent(dataRegionId, k -> new ConcurrentHashMap<>()) + .computeIfAbsent(pipeName, k -> new ConcurrentHashMap<>()) + .entrySet() + .stream() + .filter(entry -> !Objects.equals(entry.getKey(), tsFilePath)) + .map(Entry::getValue) + .filter(Objects::nonNull) + .anyMatch(resource -> !resource.getMaxProgressIndex().isAfter(progressIndex)); + } + + //////////////////////////// singleton //////////////////////////// + + private static class PipeTimePartitionProgressIndexKeeperHolder { + + private static final PipeTsFileEpochProgressIndexKeeper INSTANCE = + new PipeTsFileEpochProgressIndexKeeper(); + + private PipeTimePartitionProgressIndexKeeperHolder() { + // empty constructor + } + } + + public static PipeTsFileEpochProgressIndexKeeper getInstance() { + return PipeTsFileEpochProgressIndexKeeper.PipeTimePartitionProgressIndexKeeperHolder.INSTANCE; + } + + private PipeTsFileEpochProgressIndexKeeper() { + // empty constructor + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/epoch/TsFileEpoch.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/epoch/TsFileEpoch.java similarity index 69% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/epoch/TsFileEpoch.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/epoch/TsFileEpoch.java index abef1db549e6e..b1ba0d3708404 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/epoch/TsFileEpoch.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/epoch/TsFileEpoch.java @@ -17,10 +17,11 @@ * under the License. */ -package org.apache.iotdb.db.pipe.extractor.dataregion.realtime.epoch; +package org.apache.iotdb.db.pipe.source.dataregion.realtime.epoch; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.PipeRealtimeDataRegionExtractor; -import org.apache.iotdb.db.pipe.metric.PipeDataRegionExtractorMetrics; +import org.apache.iotdb.db.pipe.metric.source.PipeDataRegionSourceMetrics; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.PipeRealtimeDataRegionSource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -29,25 +30,25 @@ public class TsFileEpoch { - private final String filePath; - private final ConcurrentMap> + private final TsFileResource resource; + private final ConcurrentMap> dataRegionExtractor2State; private final AtomicLong insertNodeMinTime; - public TsFileEpoch(String filePath) { - this.filePath = filePath; + public TsFileEpoch(final TsFileResource resource) { + this.resource = resource; this.dataRegionExtractor2State = new ConcurrentHashMap<>(); this.insertNodeMinTime = new AtomicLong(Long.MAX_VALUE); } - public TsFileEpoch.State getState(PipeRealtimeDataRegionExtractor extractor) { + public TsFileEpoch.State getState(final PipeRealtimeDataRegionSource extractor) { return dataRegionExtractor2State .computeIfAbsent(extractor, o -> new AtomicReference<>(State.EMPTY)) .get(); } public void migrateState( - PipeRealtimeDataRegionExtractor extractor, TsFileEpochStateMigrator visitor) { + final PipeRealtimeDataRegionSource extractor, final TsFileEpochStateMigrator visitor) { dataRegionExtractor2State .computeIfAbsent(extractor, o -> new AtomicReference<>(State.EMPTY)) .getAndUpdate(visitor::migrate); @@ -56,23 +57,27 @@ public void migrateState( public void setExtractorsRecentProcessedTsFileEpochState() { dataRegionExtractor2State.forEach( (extractor, state) -> - PipeDataRegionExtractorMetrics.getInstance() + PipeDataRegionSourceMetrics.getInstance() .setRecentProcessedTsFileEpochState(extractor.getTaskID(), state.get())); } - public void updateInsertNodeMinTime(long newComingMinTime) { + public void updateInsertNodeMinTime(final long newComingMinTime) { insertNodeMinTime.updateAndGet(recordedMinTime -> Math.min(recordedMinTime, newComingMinTime)); } - public long getInsertNodeMinTime() { - return insertNodeMinTime.get(); + public TsFileResource getResource() { + return resource; + } + + public String getFilePath() { + return resource.getTsFilePath(); } @Override public String toString() { return "TsFileEpoch{" - + "filePath='" - + filePath + + "resource='" + + resource + '\'' + ", dataRegionExtractor2State=" + dataRegionExtractor2State @@ -90,7 +95,7 @@ public enum State { private final int id; - State(int id) { + State(final int id) { this.id = id; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/epoch/TsFileEpochManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/epoch/TsFileEpochManager.java similarity index 96% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/epoch/TsFileEpochManager.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/epoch/TsFileEpochManager.java index 6ee0a73845265..675a5b385de4f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/epoch/TsFileEpochManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/epoch/TsFileEpochManager.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.pipe.extractor.dataregion.realtime.epoch; +package org.apache.iotdb.db.pipe.source.dataregion.realtime.epoch; import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; @@ -56,7 +56,7 @@ public PipeRealtimeEvent bindPipeTsFileInsertionEvent( filePath, path -> { LOGGER.info("TsFileEpoch not found for TsFile {}, creating a new one", path); - return new TsFileEpoch(path); + return new TsFileEpoch(resource); }); final TsFileEpoch epoch = filePath2Epoch.remove(filePath); @@ -79,7 +79,7 @@ public PipeRealtimeEvent bindPipeTsFileInsertionEvent( public PipeRealtimeEvent bindPipeInsertNodeTabletInsertionEvent( PipeInsertNodeTabletInsertionEvent event, InsertNode node, TsFileResource resource) { final TsFileEpoch epoch = - filePath2Epoch.computeIfAbsent(resource.getTsFilePath(), TsFileEpoch::new); + filePath2Epoch.computeIfAbsent(resource.getTsFilePath(), k -> new TsFileEpoch(resource)); epoch.updateInsertNodeMinTime(node.getMinTime()); return new PipeRealtimeEvent( event, diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/epoch/TsFileEpochStateMigrator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/epoch/TsFileEpochStateMigrator.java similarity index 93% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/epoch/TsFileEpochStateMigrator.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/epoch/TsFileEpochStateMigrator.java index 356dc93d72797..f05f7619b4f0c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/epoch/TsFileEpochStateMigrator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/epoch/TsFileEpochStateMigrator.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.pipe.extractor.dataregion.realtime.epoch; +package org.apache.iotdb.db.pipe.source.dataregion.realtime.epoch; @FunctionalInterface public interface TsFileEpochStateMigrator { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/listener/PipeInsertionDataNodeListener.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/listener/PipeInsertionDataNodeListener.java similarity index 86% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/listener/PipeInsertionDataNodeListener.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/listener/PipeInsertionDataNodeListener.java index 3eb118701a78e..d6cfa6f6abc6f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/listener/PipeInsertionDataNodeListener.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/listener/PipeInsertionDataNodeListener.java @@ -17,17 +17,16 @@ * under the License. */ -package org.apache.iotdb.db.pipe.extractor.dataregion.realtime.listener; +package org.apache.iotdb.db.pipe.source.dataregion.realtime.listener; import org.apache.iotdb.commons.pipe.config.PipeConfig; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; import org.apache.iotdb.db.pipe.event.realtime.PipeRealtimeEventFactory; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.PipeRealtimeDataRegionExtractor; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.assigner.PipeDataRegionAssigner; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.PipeRealtimeDataRegionSource; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.assigner.PipeDataRegionAssigner; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.DeleteDataNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -55,7 +54,7 @@ public class PipeInsertionDataNodeListener { //////////////////////////// start & stop //////////////////////////// public synchronized void startListenAndAssign( - String dataRegionId, PipeRealtimeDataRegionExtractor extractor) { + String dataRegionId, PipeRealtimeDataRegionSource extractor) { dataRegionId2Assigner .computeIfAbsent(dataRegionId, o -> new PipeDataRegionAssigner(dataRegionId)) .startAssignTo(extractor); @@ -69,7 +68,7 @@ public synchronized void startListenAndAssign( } public synchronized void stopListenAndAssign( - String dataRegionId, PipeRealtimeDataRegionExtractor extractor) { + String dataRegionId, PipeRealtimeDataRegionSource extractor) { final PipeDataRegionAssigner assigner = dataRegionId2Assigner.get(dataRegionId); if (assigner == null) { return; @@ -95,10 +94,7 @@ public synchronized void stopListenAndAssign( //////////////////////////// listen to events //////////////////////////// public void listenToTsFile( - String dataRegionId, - TsFileResource tsFileResource, - boolean isLoaded, - boolean isGeneratedByPipe) { + final String dataRegionId, final TsFileResource tsFileResource, final boolean isLoaded) { // We don't judge whether listenToTsFileExtractorCount.get() == 0 here on purpose // because extractors may use tsfile events when some exceptions occur in the // insert nodes listening process. @@ -111,14 +107,11 @@ public void listenToTsFile( } assigner.publishToAssign( - PipeRealtimeEventFactory.createRealtimeEvent(tsFileResource, isLoaded, isGeneratedByPipe)); + PipeRealtimeEventFactory.createRealtimeEvent(tsFileResource, isLoaded)); } public void listenToInsertNode( - String dataRegionId, - WALEntryHandler walEntryHandler, - InsertNode insertNode, - TsFileResource tsFileResource) { + String dataRegionId, InsertNode insertNode, TsFileResource tsFileResource) { if (listenToInsertNodeExtractorCount.get() == 0) { return; } @@ -131,7 +124,7 @@ public void listenToInsertNode( } assigner.publishToAssign( - PipeRealtimeEventFactory.createRealtimeEvent(walEntryHandler, insertNode, tsFileResource)); + PipeRealtimeEventFactory.createRealtimeEvent(insertNode, tsFileResource)); } public void listenToHeartbeat(boolean shouldPrintMessage) { @@ -146,6 +139,10 @@ public void listenToDeleteData(DeleteDataNode node) { (key, value) -> value.publishToAssign(PipeRealtimeEventFactory.createRealtimeEvent(node))); } + public boolean isEmpty() { + return dataRegionId2Assigner.isEmpty(); + } + /////////////////////////////// singleton /////////////////////////////// private PipeInsertionDataNodeListener() { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/listener/PipeTimePartitionListener.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/listener/PipeTimePartitionListener.java similarity index 87% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/listener/PipeTimePartitionListener.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/listener/PipeTimePartitionListener.java index b3cff5103ca07..ff66149e3d32a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/listener/PipeTimePartitionListener.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/listener/PipeTimePartitionListener.java @@ -17,9 +17,9 @@ * under the License. */ -package org.apache.iotdb.db.pipe.extractor.dataregion.realtime.listener; +package org.apache.iotdb.db.pipe.source.dataregion.realtime.listener; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.PipeRealtimeDataRegionExtractor; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.PipeRealtimeDataRegionSource; import org.apache.tsfile.utils.Pair; import org.checkerframework.checker.nullness.qual.NonNull; @@ -31,7 +31,7 @@ public class PipeTimePartitionListener { - private final Map> dataRegionId2Extractors = + private final Map> dataRegionId2Extractors = new ConcurrentHashMap<>(); // This variable is used to record the upper and lower bounds that each data region's time @@ -42,7 +42,7 @@ public class PipeTimePartitionListener { //////////////////////////// start & stop //////////////////////////// public synchronized void startListen( - String dataRegionId, PipeRealtimeDataRegionExtractor extractor) { + String dataRegionId, PipeRealtimeDataRegionSource extractor) { dataRegionId2Extractors .computeIfAbsent(dataRegionId, o -> new HashMap<>()) .put(extractor.getTaskID(), extractor); @@ -54,9 +54,8 @@ public synchronized void startListen( } } - public synchronized void stopListen( - String dataRegionId, PipeRealtimeDataRegionExtractor extractor) { - Map extractors = + public synchronized void stopListen(String dataRegionId, PipeRealtimeDataRegionSource extractor) { + Map extractors = dataRegionId2Extractors.get(dataRegionId); if (Objects.isNull(extractors)) { return; @@ -88,7 +87,7 @@ public synchronized void listenToTimePartitionGrow( } if (shouldBroadcastTimePartitionChange) { - Map extractors = + Map extractors = dataRegionId2Extractors.get(dataRegionId); if (Objects.isNull(extractors)) { return; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/pattern/CachedSchemaPatternMatcher.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/matcher/CachedSchemaPatternMatcher.java similarity index 75% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/pattern/CachedSchemaPatternMatcher.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/matcher/CachedSchemaPatternMatcher.java index 4be44a0fecc4f..2c68331705fb9 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/pattern/CachedSchemaPatternMatcher.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/matcher/CachedSchemaPatternMatcher.java @@ -17,27 +17,28 @@ * under the License. */ -package org.apache.iotdb.db.pipe.pattern; +package org.apache.iotdb.db.pipe.source.dataregion.realtime.matcher; import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.pipe.pattern.PipePattern; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent; import org.apache.iotdb.db.pipe.event.realtime.PipeRealtimeEvent; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.PipeRealtimeDataRegionExtractor; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.PipeRealtimeDataRegionSource; import com.github.benmanes.caffeine.cache.Cache; import com.github.benmanes.caffeine.cache.Caffeine; +import org.apache.tsfile.utils.Pair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collections; import java.util.HashSet; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.CopyOnWriteArraySet; import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.stream.Collectors; public class CachedSchemaPatternMatcher implements PipeDataRegionMatcher { @@ -45,8 +46,8 @@ public class CachedSchemaPatternMatcher implements PipeDataRegionMatcher { protected final ReentrantReadWriteLock lock; - protected final Set extractors; - protected final Cache> deviceToExtractorsCache; + protected final Set extractors; + protected final Cache> deviceToExtractorsCache; public CachedSchemaPatternMatcher() { this.lock = new ReentrantReadWriteLock(); @@ -61,7 +62,7 @@ public CachedSchemaPatternMatcher() { } @Override - public void register(final PipeRealtimeDataRegionExtractor extractor) { + public void register(final PipeRealtimeDataRegionSource extractor) { lock.writeLock().lock(); try { extractors.add(extractor); @@ -72,7 +73,7 @@ public void register(final PipeRealtimeDataRegionExtractor extractor) { } @Override - public void deregister(final PipeRealtimeDataRegionExtractor extractor) { + public void deregister(final PipeRealtimeDataRegionSource extractor) { lock.writeLock().lock(); try { extractors.remove(extractor); @@ -93,25 +94,27 @@ public int getRegisterCount() { } @Override - public Set match(final PipeRealtimeEvent event) { - final Set matchedExtractors = new HashSet<>(); + public Pair, Set> match( + final PipeRealtimeEvent event) { + final Set matchedExtractors = new HashSet<>(); lock.readLock().lock(); try { if (extractors.isEmpty()) { - return matchedExtractors; + return new Pair<>(matchedExtractors, extractors); } // HeartbeatEvent will be assigned to all extractors if (event.getEvent() instanceof PipeHeartbeatEvent) { - return extractors; + return new Pair<>(extractors, Collections.EMPTY_SET); } // Deletion event will be assigned to extractors listened to it if (event.getEvent() instanceof PipeSchemaRegionWritePlanEvent) { - return extractors.stream() - .filter(PipeRealtimeDataRegionExtractor::shouldExtractDeletion) - .collect(Collectors.toSet()); + extractors.stream() + .filter(PipeRealtimeDataRegionSource::shouldExtractDeletion) + .forEach(matchedExtractors::add); + return new Pair<>(matchedExtractors, findUnmatchedExtractors(matchedExtractors)); } for (final Map.Entry entry : event.getSchemaInfo().entrySet()) { @@ -119,7 +122,7 @@ public Set match(final PipeRealtimeEvent event) final String[] measurements = entry.getValue(); // 1. try to get matched extractors from cache, if not success, match them by device - final Set extractorsFilteredByDevice = + final Set extractorsFilteredByDevice = deviceToExtractorsCache.get(device, this::filterExtractorsByDevice); // this would not happen if (extractorsFilteredByDevice == null) { @@ -169,17 +172,28 @@ public Set match(final PipeRealtimeEvent event) break; } } + + return new Pair<>(matchedExtractors, findUnmatchedExtractors(matchedExtractors)); } finally { lock.readLock().unlock(); } + } - return matchedExtractors; + private Set findUnmatchedExtractors( + final Set matchedExtractors) { + final Set unmatchedExtractors = new HashSet<>(); + for (final PipeRealtimeDataRegionSource extractor : extractors) { + if (!matchedExtractors.contains(extractor)) { + unmatchedExtractors.add(extractor); + } + } + return unmatchedExtractors; } - protected Set filterExtractorsByDevice(final String device) { - final Set filteredExtractors = new HashSet<>(); + protected Set filterExtractorsByDevice(final String device) { + final Set filteredExtractors = new HashSet<>(); - for (final PipeRealtimeDataRegionExtractor extractor : extractors) { + for (final PipeRealtimeDataRegionSource extractor : extractors) { // Return if the extractor only extract deletion if (!extractor.shouldExtractInsertion()) { continue; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/pattern/PipeDataRegionMatcher.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/matcher/PipeDataRegionMatcher.java similarity index 71% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/pattern/PipeDataRegionMatcher.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/matcher/PipeDataRegionMatcher.java index 5132d6fb49484..810e42a92b228 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/pattern/PipeDataRegionMatcher.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/dataregion/realtime/matcher/PipeDataRegionMatcher.java @@ -17,10 +17,12 @@ * under the License. */ -package org.apache.iotdb.db.pipe.pattern; +package org.apache.iotdb.db.pipe.source.dataregion.realtime.matcher; import org.apache.iotdb.db.pipe.event.realtime.PipeRealtimeEvent; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.PipeRealtimeDataRegionExtractor; +import org.apache.iotdb.db.pipe.source.dataregion.realtime.PipeRealtimeDataRegionSource; + +import org.apache.tsfile.utils.Pair; import java.util.Set; @@ -30,22 +32,23 @@ public interface PipeDataRegionMatcher { * Register a extractor. If the extractor's pattern matches the event's schema info, the event * will be assigned to the extractor. */ - void register(PipeRealtimeDataRegionExtractor extractor); + void register(PipeRealtimeDataRegionSource extractor); /** Deregister a extractor. */ - void deregister(PipeRealtimeDataRegionExtractor extractor); + void deregister(PipeRealtimeDataRegionSource extractor); /** Get the number of registered extractors in this matcher. */ int getRegisterCount(); /** * Match the event's schema info with the registered extractors' patterns. If the event's schema - * info matches the pattern of a extractor, the extractor will be returned. + * info matches the pattern of an extractor, the extractor will be returned. * * @param event the event to be matched - * @return the matched extractors + * @return pair of matched extractors and unmatched extractors. */ - Set match(PipeRealtimeEvent event); + Pair, Set> match( + PipeRealtimeEvent event); /** Clear all the registered extractors and internal data structures. */ void clear(); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/schemaregion/IoTDBSchemaRegionExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/schemaregion/IoTDBSchemaRegionSource.java similarity index 91% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/schemaregion/IoTDBSchemaRegionExtractor.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/schemaregion/IoTDBSchemaRegionSource.java index 209593979c410..93e2c6bd3d72c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/schemaregion/IoTDBSchemaRegionExtractor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/schemaregion/IoTDBSchemaRegionSource.java @@ -17,14 +17,14 @@ * under the License. */ -package org.apache.iotdb.db.pipe.extractor.schemaregion; +package org.apache.iotdb.db.pipe.source.schemaregion; import org.apache.iotdb.commons.consensus.SchemaRegionId; import org.apache.iotdb.commons.pipe.datastructure.queue.listening.AbstractPipeListeningQueue; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; import org.apache.iotdb.commons.pipe.event.PipeSnapshotEvent; import org.apache.iotdb.commons.pipe.event.PipeWritePlanEvent; -import org.apache.iotdb.commons.pipe.extractor.IoTDBNonDataRegionExtractor; +import org.apache.iotdb.commons.pipe.source.IoTDBNonDataRegionSource; import org.apache.iotdb.consensus.ConsensusFactory; import org.apache.iotdb.consensus.exception.ConsensusException; import org.apache.iotdb.db.conf.IoTDBDescriptor; @@ -32,8 +32,8 @@ import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionSnapshotEvent; import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent; -import org.apache.iotdb.db.pipe.metric.PipeDataNodeRemainingEventAndTimeMetrics; -import org.apache.iotdb.db.pipe.metric.PipeSchemaRegionExtractorMetrics; +import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics; +import org.apache.iotdb.db.pipe.metric.schema.PipeSchemaRegionSourceMetrics; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeId; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeType; @@ -48,7 +48,7 @@ import java.util.Optional; import java.util.Set; -public class IoTDBSchemaRegionExtractor extends IoTDBNonDataRegionExtractor { +public class IoTDBSchemaRegionSource extends IoTDBNonDataRegionSource { public static final PipePlanPatternParseVisitor PATTERN_PARSE_VISITOR = new PipePlanPatternParseVisitor(); @@ -74,8 +74,8 @@ public void customize( schemaRegionId = new SchemaRegionId(regionId); listenedTypeSet = SchemaRegionListeningFilter.parseListeningPlanTypeSet(parameters); - PipeSchemaRegionExtractorMetrics.getInstance().register(this); - PipeDataNodeRemainingEventAndTimeMetrics.getInstance().register(this); + PipeSchemaRegionSourceMetrics.getInstance().register(this); + PipeDataNodeSinglePipeMetrics.getInstance().register(this); } @Override @@ -170,7 +170,7 @@ public synchronized void close() throws Exception { PipeDataNodeAgent.runtime().decreaseAndGetSchemaListenerReferenceCount(schemaRegionId); } if (Objects.nonNull(taskID)) { - PipeSchemaRegionExtractorMetrics.getInstance().deregister(taskID); + PipeSchemaRegionSourceMetrics.getInstance().deregister(taskID); } } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/schemaregion/PipePlanPatternParseVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/schemaregion/PipePlanPatternParseVisitor.java similarity index 99% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/schemaregion/PipePlanPatternParseVisitor.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/schemaregion/PipePlanPatternParseVisitor.java index c3669d1b0b27a..80b0302a4fb5f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/schemaregion/PipePlanPatternParseVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/schemaregion/PipePlanPatternParseVisitor.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.db.pipe.extractor.schemaregion; +package org.apache.iotdb.db.pipe.source.schemaregion; import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.pipe.pattern.IoTDBPipePattern; +import org.apache.iotdb.commons.pipe.datastructure.pattern.IoTDBPipePattern; import org.apache.iotdb.commons.schema.view.viewExpression.ViewExpression; import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNode; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/schemaregion/SchemaRegionListeningFilter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/schemaregion/SchemaRegionListeningFilter.java similarity index 94% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/schemaregion/SchemaRegionListeningFilter.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/schemaregion/SchemaRegionListeningFilter.java index 4b6233b9aaf80..71d14474f0c13 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/schemaregion/SchemaRegionListeningFilter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/schemaregion/SchemaRegionListeningFilter.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.pipe.extractor.schemaregion; +package org.apache.iotdb.db.pipe.source.schemaregion; import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.path.PartialPath; @@ -35,12 +35,12 @@ import java.util.Set; import java.util.stream.Collectors; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_EXCLUSION_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_EXCLUSION_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_INCLUSION_DEFAULT_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_INCLUSION_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_EXCLUSION_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_INCLUSION_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_EXCLUSION_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_EXCLUSION_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_INCLUSION_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.EXTRACTOR_INCLUSION_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_EXCLUSION_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant.SOURCE_INCLUSION_KEY; import static org.apache.iotdb.commons.pipe.datastructure.options.PipeInclusionOptions.parseOptions; /** diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/schemaregion/SchemaRegionListeningQueue.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/schemaregion/SchemaRegionListeningQueue.java similarity index 97% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/schemaregion/SchemaRegionListeningQueue.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/schemaregion/SchemaRegionListeningQueue.java index 70ad8b6df246c..36e61cb062af7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/schemaregion/SchemaRegionListeningQueue.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/source/schemaregion/SchemaRegionListeningQueue.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.pipe.extractor.schemaregion; +package org.apache.iotdb.db.pipe.source.schemaregion; import org.apache.iotdb.commons.pipe.datastructure.queue.listening.AbstractPipeListeningQueue; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; @@ -103,7 +103,7 @@ protected Event deserializeFromByteBuffer(final ByteBuffer byteBuffer) { public synchronized boolean createSnapshot(final File snapshotDir) { try { return super.serializeToFile(new File(snapshotDir, SNAPSHOT_FILE_NAME)); - } catch (final IOException e) { + } catch (final Exception e) { LOGGER.warn("Take snapshot error: {}", e.getMessage()); return false; } @@ -112,7 +112,7 @@ public synchronized boolean createSnapshot(final File snapshotDir) { public synchronized void loadSnapshot(final File snapshotDir) { try { super.deserializeFromFile(new File(snapshotDir, SNAPSHOT_FILE_NAME)); - } catch (final IOException e) { + } catch (final Exception e) { LOGGER.error("Failed to load snapshot {}", e.getMessage()); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/builder/PipeDataNodeTaskBuilder.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/builder/PipeDataNodeTaskBuilder.java deleted file mode 100644 index 31f46e73e3ae9..0000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/builder/PipeDataNodeTaskBuilder.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.pipe.task.builder; - -import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex; -import org.apache.iotdb.commons.pipe.config.constant.SystemConstant; -import org.apache.iotdb.commons.pipe.task.meta.PipeStaticMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeType; -import org.apache.iotdb.db.pipe.execution.PipeConnectorSubtaskExecutor; -import org.apache.iotdb.db.pipe.execution.PipeProcessorSubtaskExecutor; -import org.apache.iotdb.db.pipe.execution.PipeSubtaskExecutorManager; -import org.apache.iotdb.db.pipe.task.PipeDataNodeTask; -import org.apache.iotdb.db.pipe.task.stage.PipeTaskConnectorStage; -import org.apache.iotdb.db.pipe.task.stage.PipeTaskExtractorStage; -import org.apache.iotdb.db.pipe.task.stage.PipeTaskProcessorStage; -import org.apache.iotdb.db.subscription.task.stage.SubscriptionTaskConnectorStage; -import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; - -import java.util.Arrays; -import java.util.EnumMap; -import java.util.HashMap; -import java.util.Map; - -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_FORMAT_HYBRID_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_FORMAT_KEY; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_FORMAT_TABLET_VALUE; -import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_FORMAT_KEY; - -public class PipeDataNodeTaskBuilder { - - private final PipeStaticMeta pipeStaticMeta; - private final int regionId; - private final PipeTaskMeta pipeTaskMeta; - - private static final PipeProcessorSubtaskExecutor PROCESSOR_EXECUTOR; - private static final Map CONNECTOR_EXECUTOR_MAP; - - static { - PROCESSOR_EXECUTOR = PipeSubtaskExecutorManager.getInstance().getProcessorExecutor(); - CONNECTOR_EXECUTOR_MAP = new EnumMap<>(PipeType.class); - CONNECTOR_EXECUTOR_MAP.put( - PipeType.USER, PipeSubtaskExecutorManager.getInstance().getConnectorExecutor()); - CONNECTOR_EXECUTOR_MAP.put( - PipeType.SUBSCRIPTION, PipeSubtaskExecutorManager.getInstance().getSubscriptionExecutor()); - CONNECTOR_EXECUTOR_MAP.put( - PipeType.CONSENSUS, PipeSubtaskExecutorManager.getInstance().getConsensusExecutor()); - } - - protected final Map systemParameters = new HashMap<>(); - - public PipeDataNodeTaskBuilder( - final PipeStaticMeta pipeStaticMeta, final int regionId, final PipeTaskMeta pipeTaskMeta) { - this.pipeStaticMeta = pipeStaticMeta; - this.regionId = regionId; - this.pipeTaskMeta = pipeTaskMeta; - generateSystemParameters(); - } - - public PipeDataNodeTask build() { - // Event flow: extractor -> processor -> connector - - // We first build the extractor and connector, then build the processor. - final PipeTaskExtractorStage extractorStage = - new PipeTaskExtractorStage( - pipeStaticMeta.getPipeName(), - pipeStaticMeta.getCreationTime(), - blendUserAndSystemParameters(pipeStaticMeta.getExtractorParameters()), - regionId, - pipeTaskMeta); - - final PipeTaskConnectorStage connectorStage; - final PipeType pipeType = pipeStaticMeta.getPipeType(); - if (PipeType.SUBSCRIPTION.equals(pipeType)) { - connectorStage = - new SubscriptionTaskConnectorStage( - pipeStaticMeta.getPipeName(), - pipeStaticMeta.getCreationTime(), - blendUserAndSystemParameters(pipeStaticMeta.getConnectorParameters()), - regionId, - CONNECTOR_EXECUTOR_MAP.get(pipeType)); - } else { // user pipe or consensus pipe - connectorStage = - new PipeTaskConnectorStage( - pipeStaticMeta.getPipeName(), - pipeStaticMeta.getCreationTime(), - blendUserAndSystemParameters(pipeStaticMeta.getConnectorParameters()), - regionId, - CONNECTOR_EXECUTOR_MAP.get(pipeType)); - } - - // The processor connects the extractor and connector. - final PipeTaskProcessorStage processorStage = - new PipeTaskProcessorStage( - pipeStaticMeta.getPipeName(), - pipeStaticMeta.getCreationTime(), - blendUserAndSystemParameters(pipeStaticMeta.getProcessorParameters()), - regionId, - extractorStage.getEventSupplier(), - connectorStage.getPipeConnectorPendingQueue(), - PROCESSOR_EXECUTOR, - pipeTaskMeta, - pipeStaticMeta - .getConnectorParameters() - .getStringOrDefault( - Arrays.asList(CONNECTOR_FORMAT_KEY, SINK_FORMAT_KEY), - CONNECTOR_FORMAT_HYBRID_VALUE) - .equals(CONNECTOR_FORMAT_TABLET_VALUE)); - - return new PipeDataNodeTask( - pipeStaticMeta.getPipeName(), regionId, extractorStage, processorStage, connectorStage); - } - - private void generateSystemParameters() { - if (!(pipeTaskMeta.getProgressIndex() instanceof MinimumProgressIndex)) { - systemParameters.put(SystemConstant.RESTART_KEY, Boolean.TRUE.toString()); - } - } - - private PipeParameters blendUserAndSystemParameters(final PipeParameters userParameters) { - // Deep copy the user parameters to avoid modification of the original parameters. - // If the original parameters are modified, progress index report will be affected. - final Map blendedParameters = new HashMap<>(userParameters.getAttribute()); - blendedParameters.putAll(systemParameters); - return new PipeParameters(blendedParameters); - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/connector/PipeRealtimePriorityBlockingQueue.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/connector/PipeRealtimePriorityBlockingQueue.java deleted file mode 100644 index a169cb6a33849..0000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/connector/PipeRealtimePriorityBlockingQueue.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.pipe.task.subtask.connector; - -import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.pipe.event.EnrichedEvent; -import org.apache.iotdb.commons.pipe.task.connection.UnboundedBlockingPendingQueue; -import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; -import org.apache.iotdb.db.pipe.metric.PipeDataRegionEventCounter; -import org.apache.iotdb.db.pipe.task.connection.PipeEventCollector; -import org.apache.iotdb.pipe.api.event.Event; -import org.apache.iotdb.pipe.api.event.dml.insertion.TsFileInsertionEvent; - -import java.util.Objects; -import java.util.concurrent.BlockingDeque; -import java.util.concurrent.LinkedBlockingDeque; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Consumer; -import java.util.function.Predicate; - -public class PipeRealtimePriorityBlockingQueue extends UnboundedBlockingPendingQueue { - - private final BlockingDeque tsfileInsertEventDeque = - new LinkedBlockingDeque<>(); - - private final AtomicInteger eventCount = new AtomicInteger(0); - - private static final int pollHistoryThreshold = - PipeConfig.getInstance().getPipeRealTimeQueuePollHistoryThreshold(); - - public PipeRealtimePriorityBlockingQueue() { - super(new PipeDataRegionEventCounter()); - } - - @Override - public boolean directOffer(final Event event) { - if (event instanceof TsFileInsertionEvent) { - tsfileInsertEventDeque.add((TsFileInsertionEvent) event); - return true; - } - - if (event instanceof PipeHeartbeatEvent && super.peekLast() instanceof PipeHeartbeatEvent) { - // We can NOT keep too many PipeHeartbeatEvent in bufferQueue because they may cause OOM. - ((EnrichedEvent) event).decreaseReferenceCount(PipeEventCollector.class.getName(), false); - return false; - } else { - return super.directOffer(event); - } - } - - @Override - public boolean waitedOffer(final Event event) { - return directOffer(event); - } - - @Override - public boolean put(final Event event) { - directOffer(event); - return true; - } - - @Override - public Event directPoll() { - Event event = null; - if (eventCount.get() >= pollHistoryThreshold) { - event = tsfileInsertEventDeque.pollFirst(); - eventCount.set(0); - } - if (Objects.isNull(event)) { - event = super.directPoll(); - if (Objects.isNull(event)) { - event = tsfileInsertEventDeque.pollLast(); - } - if (event != null) { - eventCount.incrementAndGet(); - } - } - - return event; - } - - /** - * When the number of polls exceeds the pollHistoryThreshold, the {@link TsFileInsertionEvent} of - * the earliest write to the queue is returned. if the pollHistoryThreshold is not reached then an - * attempt is made to poll the queue for the latest insertion {@link Event}. First, it tries to - * poll the first provided If there is no such {@link Event}, poll the last supplied {@link - * TsFileInsertionEvent}. If no {@link Event} is available, it blocks until a {@link Event} is - * available. - * - * @return the freshest insertion {@link Event}. can be {@code null} if no {@link Event} is - * available. - */ - @Override - public Event waitedPoll() { - Event event = null; - if (eventCount.get() >= pollHistoryThreshold) { - event = tsfileInsertEventDeque.pollFirst(); - eventCount.set(0); - } - if (event == null) { - // Sequentially poll the first offered non-TsFileInsertionEvent - event = super.directPoll(); - if (event == null && !tsfileInsertEventDeque.isEmpty()) { - // Always poll the last offered event - event = tsfileInsertEventDeque.pollLast(); - } - if (event != null) { - eventCount.incrementAndGet(); - } - } - - // If no event is available, block until an event is available - if (Objects.isNull(event)) { - event = super.waitedPoll(); - if (Objects.isNull(event)) { - event = tsfileInsertEventDeque.pollLast(); - } - if (event != null) { - eventCount.incrementAndGet(); - } - } - - return event; - } - - @Override - public void clear() { - super.clear(); - tsfileInsertEventDeque.clear(); - } - - @Override - public void forEach(final Consumer action) { - super.forEach(action); - tsfileInsertEventDeque.forEach(action); - } - - @Override - public void removeIf(final Predicate filter) { - super.removeIf(filter); - pendingQueue.removeIf(filter); - } - - @Override - public boolean isEmpty() { - return super.isEmpty() && tsfileInsertEventDeque.isEmpty(); - } - - @Override - public int size() { - return super.size() + tsfileInsertEventDeque.size(); - } - - @Override - public int getTsFileInsertionEventCount() { - return tsfileInsertEventDeque.size(); - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/client/ConfigNodeClient.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/client/ConfigNodeClient.java index c897d3000c37f..2825eefaf4c4d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/client/ConfigNodeClient.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/client/ConfigNodeClient.java @@ -24,6 +24,7 @@ import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.common.rpc.thrift.TFlushReq; import org.apache.iotdb.common.rpc.thrift.TNodeLocations; +import org.apache.iotdb.common.rpc.thrift.TPipeHeartbeatResp; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.common.rpc.thrift.TSetConfigurationReq; import org.apache.iotdb.common.rpc.thrift.TSetSpaceQuotaReq; @@ -40,6 +41,12 @@ import org.apache.iotdb.commons.client.sync.SyncThriftClientWithErrorHandler; import org.apache.iotdb.commons.consensus.ConfigRegionId; import org.apache.iotdb.confignode.rpc.thrift.IConfigNodeRPCService; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeConfigurationResp; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeRegisterReq; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeRegisterResp; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeRemoveReq; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeRestartReq; +import org.apache.iotdb.confignode.rpc.thrift.TAINodeRestartResp; import org.apache.iotdb.confignode.rpc.thrift.TAddConsensusGroupReq; import org.apache.iotdb.confignode.rpc.thrift.TAlterLogicalViewReq; import org.apache.iotdb.confignode.rpc.thrift.TAlterPipeReq; @@ -59,6 +66,7 @@ import org.apache.iotdb.confignode.rpc.thrift.TCreateCQReq; import org.apache.iotdb.confignode.rpc.thrift.TCreateConsumerReq; import org.apache.iotdb.confignode.rpc.thrift.TCreateFunctionReq; +import org.apache.iotdb.confignode.rpc.thrift.TCreateModelReq; import org.apache.iotdb.confignode.rpc.thrift.TCreatePipePluginReq; import org.apache.iotdb.confignode.rpc.thrift.TCreatePipeReq; import org.apache.iotdb.confignode.rpc.thrift.TCreateSchemaTemplateReq; @@ -82,10 +90,13 @@ import org.apache.iotdb.confignode.rpc.thrift.TDeleteTimeSeriesReq; import org.apache.iotdb.confignode.rpc.thrift.TDropCQReq; import org.apache.iotdb.confignode.rpc.thrift.TDropFunctionReq; +import org.apache.iotdb.confignode.rpc.thrift.TDropModelReq; import org.apache.iotdb.confignode.rpc.thrift.TDropPipePluginReq; import org.apache.iotdb.confignode.rpc.thrift.TDropPipeReq; +import org.apache.iotdb.confignode.rpc.thrift.TDropSubscriptionReq; import org.apache.iotdb.confignode.rpc.thrift.TDropTopicReq; import org.apache.iotdb.confignode.rpc.thrift.TDropTriggerReq; +import org.apache.iotdb.confignode.rpc.thrift.TExtendRegionReq; import org.apache.iotdb.confignode.rpc.thrift.TGetAllPipeInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TGetAllSubscriptionInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TGetAllTemplatesResp; @@ -96,6 +107,8 @@ import org.apache.iotdb.confignode.rpc.thrift.TGetJarInListReq; import org.apache.iotdb.confignode.rpc.thrift.TGetJarInListResp; import org.apache.iotdb.confignode.rpc.thrift.TGetLocationForTriggerResp; +import org.apache.iotdb.confignode.rpc.thrift.TGetModelInfoReq; +import org.apache.iotdb.confignode.rpc.thrift.TGetModelInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TGetPathsSetTemplatesReq; import org.apache.iotdb.confignode.rpc.thrift.TGetPathsSetTemplatesResp; import org.apache.iotdb.confignode.rpc.thrift.TGetPipePluginTableResp; @@ -113,7 +126,9 @@ import org.apache.iotdb.confignode.rpc.thrift.TPermissionInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TPipeConfigTransferReq; import org.apache.iotdb.confignode.rpc.thrift.TPipeConfigTransferResp; +import org.apache.iotdb.confignode.rpc.thrift.TReconstructRegionReq; import org.apache.iotdb.confignode.rpc.thrift.TRegionRouteMapResp; +import org.apache.iotdb.confignode.rpc.thrift.TRemoveRegionReq; import org.apache.iotdb.confignode.rpc.thrift.TSchemaNodeManagementReq; import org.apache.iotdb.confignode.rpc.thrift.TSchemaNodeManagementResp; import org.apache.iotdb.confignode.rpc.thrift.TSchemaPartitionReq; @@ -123,11 +138,14 @@ import org.apache.iotdb.confignode.rpc.thrift.TSetSchemaReplicationFactorReq; import org.apache.iotdb.confignode.rpc.thrift.TSetSchemaTemplateReq; import org.apache.iotdb.confignode.rpc.thrift.TSetTimePartitionIntervalReq; +import org.apache.iotdb.confignode.rpc.thrift.TShowAINodesResp; import org.apache.iotdb.confignode.rpc.thrift.TShowCQResp; import org.apache.iotdb.confignode.rpc.thrift.TShowClusterResp; import org.apache.iotdb.confignode.rpc.thrift.TShowConfigNodesResp; import org.apache.iotdb.confignode.rpc.thrift.TShowDataNodesResp; import org.apache.iotdb.confignode.rpc.thrift.TShowDatabaseResp; +import org.apache.iotdb.confignode.rpc.thrift.TShowModelReq; +import org.apache.iotdb.confignode.rpc.thrift.TShowModelResp; import org.apache.iotdb.confignode.rpc.thrift.TShowPipeReq; import org.apache.iotdb.confignode.rpc.thrift.TShowPipeResp; import org.apache.iotdb.confignode.rpc.thrift.TShowRegionReq; @@ -168,14 +186,18 @@ public class ConfigNodeClient implements IConfigNodeRPCService.Iface, ThriftClie private static final Logger logger = LoggerFactory.getLogger(ConfigNodeClient.class); - private static final int RETRY_NUM = 10; + private static final int RETRY_NUM = 15; public static final String MSG_RECONNECTION_FAIL = "Fail to connect to any config node. Please check status of ConfigNodes or logs of connected DataNode"; private static final String MSG_RECONNECTION_DATANODE_FAIL = "Failed to connect to ConfigNode %s from DataNode %s when executing %s, Exception:"; - private static final int RETRY_INTERVAL_MS = 1000; + private static final long RETRY_INTERVAL_MS = 1000L; + private static final long WAIT_CN_LEADER_ELECTION_INTERVAL_MS = 2000L; + + private static final String UNSUPPORTED_INVOCATION = + "This method is not supported for invocation by DataNode"; private final ThriftClientProperty property; @@ -257,7 +279,7 @@ private void tryToConnect(int timeoutMs) throws TException { connect(configLeader, timeoutMs); return; } catch (TException ignore) { - logger.warn("The current node may have been down {},try next node", configLeader); + logger.warn("The current node leader may have been down {}, try next node", configLeader); configLeader = null; } } else { @@ -353,12 +375,14 @@ private boolean updateConfigNodeLeader(TSStatus status) { */ private T executeRemoteCallWithRetry(final Operation call, final Predicate check) throws TException { + int detectedNodeNum = 0; for (int i = 0; i < RETRY_NUM; i++) { try { final T result = call.execute(); if (check.test(result)) { return result; } + detectedNodeNum++; } catch (TException e) { final String message = String.format( @@ -369,6 +393,22 @@ private T executeRemoteCallWithRetry(final Operation call, final Predicat logger.warn(message, e); configLeader = null; } + + // If we have detected all configNodes and still not return + if (detectedNodeNum >= configNodes.size()) { + // Clear count + detectedNodeNum = 0; + // Wait to start the next try + try { + Thread.sleep(WAIT_CN_LEADER_ELECTION_INTERVAL_MS); + } catch (InterruptedException ignore) { + Thread.currentThread().interrupt(); + logger.warn( + "Unexpected interruption when waiting to try to connect to ConfigNode, may because current node has been down. Will break current execution process to avoid meaningless wait."); + break; + } + } + connectAndSync(); } throw new TException(MSG_RECONNECTION_FAIL); @@ -428,6 +468,32 @@ public TDataNodeRestartResp restartDataNode(TDataNodeRestartReq req) throws TExc () -> client.restartDataNode(req), resp -> !updateConfigNodeLeader(resp.status)); } + @Override + public TAINodeRegisterResp registerAINode(TAINodeRegisterReq req) throws TException { + throw new UnsupportedOperationException(UNSUPPORTED_INVOCATION); + } + + @Override + public TAINodeRestartResp restartAINode(TAINodeRestartReq req) throws TException { + throw new UnsupportedOperationException(UNSUPPORTED_INVOCATION); + } + + @Override + public TSStatus removeAINode(TAINodeRemoveReq req) throws TException { + throw new UnsupportedOperationException(UNSUPPORTED_INVOCATION); + } + + @Override + public TShowAINodesResp showAINodes() throws TException { + return executeRemoteCallWithRetry( + () -> client.showAINodes(), resp -> !updateConfigNodeLeader(resp.status)); + } + + @Override + public TAINodeConfigurationResp getAINodeConfiguration(int aiNodeId) throws TException { + throw new UnsupportedOperationException(UNSUPPORTED_INVOCATION); + } + @Override public TDataNodeRemoveResp removeDataNode(TDataNodeRemoveReq req) throws TException { return executeRemoteCallWithRetry( @@ -628,7 +694,8 @@ public TSStatus notifyRegisterSuccess() throws TException { @Override public TSStatus removeConfigNode(TConfigNodeLocation configNodeLocation) throws TException { - throw new TException("DataNode to ConfigNode client doesn't support removeConfigNode."); + return executeRemoteCallWithRetry( + () -> client.removeConfigNode(configNodeLocation), resp -> !updateConfigNodeLeader(resp)); } @Override @@ -643,8 +710,8 @@ public TSStatus reportConfigNodeShutdown(TConfigNodeLocation configNodeLocation) } @Override - public TSStatus stopConfigNode(TConfigNodeLocation configNodeLocation) throws TException { - throw new TException("DataNode to ConfigNode client doesn't support stopConfigNode."); + public TSStatus stopAndClearConfigNode(TConfigNodeLocation configNodeLocation) throws TException { + throw new TException("DataNode to ConfigNode client doesn't support stopAndClearConfigNode."); } @Override @@ -691,7 +758,8 @@ public TSStatus submitLoadConfigurationTask() throws TException { @Override public TSStatus loadConfiguration() throws TException { - throw new UnsupportedOperationException("Please call submitLoadConfigurationTask instead"); + throw new UnsupportedOperationException( + UNSUPPORTED_INVOCATION + ", please call submitLoadConfigurationTask instead"); } @Override @@ -1047,6 +1115,12 @@ public TSStatus dropSubscription(TUnsubscribeReq req) throws TException { () -> client.dropSubscription(req), status -> !updateConfigNodeLeader(status)); } + @Override + public TSStatus dropSubscriptionById(TDropSubscriptionReq req) throws TException { + return executeRemoteCallWithRetry( + () -> client.dropSubscriptionById(req), status -> !updateConfigNodeLeader(status)); + } + @Override public TShowSubscriptionResp showSubscription(TShowSubscriptionReq req) throws TException { return executeRemoteCallWithRetry( @@ -1102,6 +1176,24 @@ public TSStatus migrateRegion(TMigrateRegionReq req) throws TException { () -> client.migrateRegion(req), status -> !updateConfigNodeLeader(status)); } + @Override + public TSStatus reconstructRegion(TReconstructRegionReq req) throws TException { + return executeRemoteCallWithRetry( + () -> client.reconstructRegion(req), status -> !updateConfigNodeLeader(status)); + } + + @Override + public TSStatus extendRegion(TExtendRegionReq req) throws TException { + return executeRemoteCallWithRetry( + () -> client.extendRegion(req), status -> !updateConfigNodeLeader(status)); + } + + @Override + public TSStatus removeRegion(TRemoveRegionReq req) throws TException { + return executeRemoteCallWithRetry( + () -> client.removeRegion(req), status -> !updateConfigNodeLeader(status)); + } + @Override public TSStatus createCQ(TCreateCQReq req) throws TException { return executeRemoteCallWithRetry( @@ -1120,6 +1212,30 @@ public TShowCQResp showCQ() throws TException { () -> client.showCQ(), resp -> !updateConfigNodeLeader(resp.status)); } + @Override + public TSStatus createModel(TCreateModelReq req) throws TException { + return executeRemoteCallWithRetry( + () -> client.createModel(req), status -> !updateConfigNodeLeader(status)); + } + + @Override + public TSStatus dropModel(TDropModelReq req) throws TException { + return executeRemoteCallWithRetry( + () -> client.dropModel(req), status -> !updateConfigNodeLeader(status)); + } + + @Override + public TShowModelResp showModel(TShowModelReq req) throws TException { + return executeRemoteCallWithRetry( + () -> client.showModel(req), resp -> !updateConfigNodeLeader(resp.status)); + } + + @Override + public TGetModelInfoResp getModelInfo(TGetModelInfoReq req) throws TException { + return executeRemoteCallWithRetry( + () -> client.getModelInfo(req), resp -> !updateConfigNodeLeader(resp.getStatus())); + } + @Override public TSStatus setSpaceQuota(TSetSpaceQuotaReq req) throws TException { return executeRemoteCallWithRetry( @@ -1156,6 +1272,13 @@ public TThrottleQuotaResp getThrottleQuota() throws TException { () -> client.getThrottleQuota(), resp -> !updateConfigNodeLeader(resp.status)); } + @Override + public TSStatus pushHeartbeat(final int dataNodeId, final TPipeHeartbeatResp resp) + throws TException { + return executeRemoteCallWithRetry( + () -> client.pushHeartbeat(dataNodeId, resp), status -> !updateConfigNodeLeader(status)); + } + public static class Factory extends ThriftClientFactory { public Factory( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/client/ConfigNodeInfo.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/client/ConfigNodeInfo.java index c803dd5f09eba..ededae611ed07 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/client/ConfigNodeInfo.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/client/ConfigNodeInfo.java @@ -22,6 +22,7 @@ import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.commons.consensus.ConfigRegionId; import org.apache.iotdb.commons.exception.BadNodeUrlException; +import org.apache.iotdb.commons.exception.StartupException; import org.apache.iotdb.commons.file.SystemPropertiesHandler; import org.apache.iotdb.commons.utils.NodeUrlUtils; import org.apache.iotdb.db.conf.DataNodeSystemPropertiesHandler; @@ -109,7 +110,7 @@ public void storeConfigNodeList() throws IOException { CONFIG_NODE_LIST, NodeUrlUtils.convertTEndPointUrls(new ArrayList<>(onlineConfigNodes))); } - public void loadConfigNodeList() { + public void loadConfigNodeList() throws StartupException { long startTime = System.currentTimeMillis(); // properties contain CONFIG_NODE_LIST only when start as Data node configNodeInfoReadWriteLock.writeLock().lock(); @@ -121,6 +122,11 @@ public void loadConfigNodeList() { onlineConfigNodes.addAll( NodeUrlUtils.parseTEndPointUrls(properties.getProperty(CONFIG_NODE_LIST))); } + if (onlineConfigNodes.isEmpty()) { + throw new StartupException( + "Removing is only allowed in an environment where the datanode has been successfully started. " + + "Please check whether it is removed on the confignode, or if you have deleted the system.properties file by mistake."); + } long endTime = System.currentTimeMillis(); logger.info( "Load ConfigNode successfully: {}, which takes {} ms.", diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/client/DataNodeClientPoolFactory.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/client/DataNodeClientPoolFactory.java index 03300e82e3a02..b5f5df430129f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/client/DataNodeClientPoolFactory.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/client/DataNodeClientPoolFactory.java @@ -28,7 +28,6 @@ import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.commons.pool2.KeyedObjectPool; import org.apache.commons.pool2.impl.GenericKeyedObjectPool; public class DataNodeClientPoolFactory { @@ -43,7 +42,7 @@ public static class ConfigNodeClientPoolFactory implements IClientPoolFactory { @Override - public KeyedObjectPool createClientPool( + public GenericKeyedObjectPool createClientPool( ClientManager manager) { GenericKeyedObjectPool clientPool = new GenericKeyedObjectPool<>( @@ -67,7 +66,7 @@ public static class ClusterDeletionConfigNodeClientPoolFactory implements IClientPoolFactory { @Override - public KeyedObjectPool createClientPool( + public GenericKeyedObjectPool createClientPool( ClientManager manager) { GenericKeyedObjectPool clientPool = new GenericKeyedObjectPool<>( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/mqtt/JSONPayloadFormatter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/mqtt/JSONPayloadFormatter.java index 524b0a4718ded..2dc637434299a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/mqtt/JSONPayloadFormatter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/mqtt/JSONPayloadFormatter.java @@ -27,6 +27,7 @@ import com.google.gson.JsonParseException; import com.google.gson.reflect.TypeToken; import io.netty.buffer.ByteBuf; +import org.apache.commons.lang3.NotImplementedException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; @@ -48,7 +49,7 @@ public class JSONPayloadFormatter implements PayloadFormatter { private static final Gson GSON = new GsonBuilder().create(); @Override - public List format(ByteBuf payload) { + public List format(String topic, ByteBuf payload) { if (payload == null) { return new ArrayList<>(); } @@ -79,6 +80,12 @@ public List format(ByteBuf payload) { throw new JsonParseException("payload is invalidate"); } + @Override + @Deprecated + public List format(ByteBuf payload) { + throw new NotImplementedException(); + } + private List formatJson(JsonObject jsonObject) { Message message = new Message(); message.setDevice(jsonObject.get(JSON_KEY_DEVICE).getAsString()); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/mqtt/MPPPublishHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/mqtt/MPPPublishHandler.java index 3fc50278ff760..c24b816420fb6 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/mqtt/MPPPublishHandler.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/mqtt/MPPPublishHandler.java @@ -79,6 +79,10 @@ public String getID() { @Override public void onConnect(InterceptConnectMessage msg) { + if (msg.getClientID() == null || msg.getClientID().trim().isEmpty()) { + LOG.error( + "Connection refused: client_id is missing or empty. A valid client_id is required to establish a connection."); + } if (!clientIdToSessionMap.containsKey(msg.getClientID())) { MqttClientSession session = new MqttClientSession(msg.getClientID()); sessionManager.login( @@ -88,6 +92,7 @@ public void onConnect(InterceptConnectMessage msg) { ZoneId.systemDefault().toString(), TSProtocolVersion.IOTDB_SERVICE_PROTOCOL_V3, ClientVersion.V_1_0); + sessionManager.registerSessionForMqtt(session); clientIdToSessionMap.put(msg.getClientID(), session); } } @@ -96,6 +101,7 @@ public void onConnect(InterceptConnectMessage msg) { public void onDisconnect(InterceptDisconnectMessage msg) { MqttClientSession session = clientIdToSessionMap.remove(msg.getClientID()); if (null != session) { + sessionManager.removeCurrSessionForMqtt(session); sessionManager.closeSession(session, Coordinator.getInstance()::cleanupQueryExecution); } } @@ -121,7 +127,7 @@ public void onPublish(InterceptPublishMessage msg) { topic, payload); - List events = payloadFormat.format(payload); + List events = payloadFormat.format(topic, payload); if (events == null) { return; } @@ -169,7 +175,8 @@ public void onPublish(InterceptPublishMessage msg) { "", partitionFetcher, schemaFetcher, - config.getQueryTimeoutThreshold()); + config.getQueryTimeoutThreshold(), + false); tsStatus = result.status; } } catch (Exception e) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/mqtt/PayloadFormatter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/mqtt/PayloadFormatter.java index 5f6527be997af..3027c1a6e9450 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/mqtt/PayloadFormatter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/mqtt/PayloadFormatter.java @@ -36,8 +36,20 @@ public interface PayloadFormatter { * @param payload * @return */ + @Deprecated List format(ByteBuf payload); + /** + * format a payload of a topic to a list of messages + * + * @param topic + * @param payload + * @return + */ + default List format(String topic, ByteBuf payload) { + return format(payload); + } + /** * get the formatter name * diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/impl/PingApiServiceImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/impl/PingApiServiceImpl.java index d16988e43129c..f85de024b4780 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/impl/PingApiServiceImpl.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/impl/PingApiServiceImpl.java @@ -20,7 +20,7 @@ import org.apache.iotdb.commons.service.ThriftService; import org.apache.iotdb.db.protocol.rest.PingApiService; import org.apache.iotdb.db.protocol.rest.model.ExecutionStatus; -import org.apache.iotdb.db.service.RPCService; +import org.apache.iotdb.db.service.ExternalRPCService; import org.apache.iotdb.rpc.TSStatusCode; import javax.ws.rs.core.Response; @@ -32,7 +32,7 @@ public class PingApiServiceImpl extends PingApiService { @Override public Response tryPing(SecurityContext securityContext) { - if (RPCService.getInstance().getRPCServiceStatus().equals(ThriftService.STATUS_DOWN)) { + if (ExternalRPCService.getInstance().getRPCServiceStatus().equals(ThriftService.STATUS_DOWN)) { return Response.status(Response.Status.SERVICE_UNAVAILABLE) .entity( new ExecutionStatus() diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v1/handler/ExecuteStatementHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v1/handler/ExecuteStatementHandler.java index 618c1a4b3813e..7eec34964bcfc 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v1/handler/ExecuteStatementHandler.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v1/handler/ExecuteStatementHandler.java @@ -22,6 +22,7 @@ import org.apache.iotdb.db.queryengine.plan.statement.crud.QueryStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.GetRegionIdStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.ShowStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.model.ShowModelsStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.template.DropSchemaTemplateStatement; import org.apache.iotdb.db.queryengine.plan.statement.sys.AuthorStatement; @@ -30,6 +31,7 @@ private ExecuteStatementHandler() {} public static boolean validateStatement(Statement statement) { return !(statement instanceof QueryStatement) + && !(statement instanceof ShowModelsStatement) && !(statement instanceof ShowStatement && !(statement instanceof DropSchemaTemplateStatement)) && !(statement instanceof AuthorStatement diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v1/handler/QueryDataSetHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v1/handler/QueryDataSetHandler.java index b21947591fbe2..c82b07c060328 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v1/handler/QueryDataSetHandler.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v1/handler/QueryDataSetHandler.java @@ -26,6 +26,7 @@ import org.apache.iotdb.db.queryengine.plan.statement.metadata.GetRegionIdStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.ShowChildPathsStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.ShowStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.model.ShowModelsStatement; import org.apache.iotdb.db.queryengine.plan.statement.sys.AuthorStatement; import org.apache.iotdb.rpc.TSStatusCode; @@ -52,6 +53,7 @@ public static Response fillQueryDataSet( IQueryExecution queryExecution, Statement statement, int actualRowSizeLimit) throws IoTDBException { if (statement instanceof ShowStatement + || statement instanceof ShowModelsStatement || statement instanceof AuthorStatement || statement instanceof GetRegionIdStatement) { return fillShowPlanDataSet(queryExecution, actualRowSizeLimit); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v1/impl/GrafanaApiServiceImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v1/impl/GrafanaApiServiceImpl.java index 07c735faa7f24..bd955fd00da9b 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v1/impl/GrafanaApiServiceImpl.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v1/impl/GrafanaApiServiceImpl.java @@ -117,7 +117,8 @@ public Response variables(SQL sql, SecurityContext securityContext) { sql.getSql(), partitionFetcher, schemaFetcher, - config.getQueryTimeoutThreshold()); + config.getQueryTimeoutThreshold(), + true); if (result.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode() && result.status.code != TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()) { return Response.ok() @@ -184,7 +185,8 @@ public Response expression(ExpressionRequest expressionRequest, SecurityContext sql, partitionFetcher, schemaFetcher, - config.getQueryTimeoutThreshold()); + config.getQueryTimeoutThreshold(), + true); if (result.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode() && result.status.code != TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()) { return Response.ok() @@ -246,7 +248,8 @@ public Response node(List requestBody, SecurityContext securityContext) sql, partitionFetcher, schemaFetcher, - config.getQueryTimeoutThreshold()); + config.getQueryTimeoutThreshold(), + true); if (result.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode() && result.status.code != TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()) { return Response.ok() diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v1/impl/RestApiServiceImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v1/impl/RestApiServiceImpl.java index 428f2c3baaa56..084b7cc53e9cb 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v1/impl/RestApiServiceImpl.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v1/impl/RestApiServiceImpl.java @@ -32,6 +32,7 @@ import org.apache.iotdb.db.protocol.rest.v1.model.InsertTabletRequest; import org.apache.iotdb.db.protocol.rest.v1.model.SQL; import org.apache.iotdb.db.protocol.session.SessionManager; +import org.apache.iotdb.db.protocol.thrift.OperationType; import org.apache.iotdb.db.queryengine.plan.Coordinator; import org.apache.iotdb.db.queryengine.plan.analyze.ClusterPartitionFetcher; import org.apache.iotdb.db.queryengine.plan.analyze.IPartitionFetcher; @@ -42,6 +43,7 @@ import org.apache.iotdb.db.queryengine.plan.parser.StatementGenerator; import org.apache.iotdb.db.queryengine.plan.statement.Statement; import org.apache.iotdb.db.queryengine.plan.statement.crud.InsertTabletStatement; +import org.apache.iotdb.db.utils.CommonUtils; import org.apache.iotdb.db.utils.SetThreadName; import org.apache.iotdb.rpc.TSStatusCode; @@ -49,6 +51,7 @@ import javax.ws.rs.core.SecurityContext; import java.time.ZoneId; +import java.util.Optional; public class RestApiServiceImpl extends RestApiService { @@ -76,12 +79,20 @@ public RestApiServiceImpl() { @Override public Response executeNonQueryStatement(SQL sql, SecurityContext securityContext) { Long queryId = null; + long startTime = System.nanoTime(); + boolean finish = false; + Statement statement = null; try { RequestValidationHandler.validateSQL(sql); - - Statement statement = - StatementGenerator.createStatement(sql.getSql(), ZoneId.systemDefault()); - + statement = StatementGenerator.createStatement(sql.getSql(), ZoneId.systemDefault()); + if (statement == null) { + return Response.ok() + .entity( + new org.apache.iotdb.db.protocol.rest.model.ExecutionStatus() + .code(TSStatusCode.SQL_PARSE_ERROR.getStatusCode()) + .message("This operation type is not supported")) + .build(); + } if (!ExecuteStatementHandler.validateStatement(statement)) { return Response.ok() .entity( @@ -104,8 +115,9 @@ public Response executeNonQueryStatement(SQL sql, SecurityContext securityContex sql.getSql(), partitionFetcher, schemaFetcher, - config.getQueryTimeoutThreshold()); - + config.getQueryTimeoutThreshold(), + false); + finish = true; return Response.ok() .entity( (result.status.code == TSStatusCode.SUCCESS_STATUS.getStatusCode() @@ -118,9 +130,22 @@ public Response executeNonQueryStatement(SQL sql, SecurityContext securityContex .message(result.status.getMessage())) .build(); } catch (Exception e) { + finish = true; return Response.ok().entity(ExceptionHandler.tryCatchException(e)).build(); } finally { + long costTime = System.nanoTime() - startTime; + Optional.ofNullable(statement) + .ifPresent( + s -> { + CommonUtils.addStatementExecutionLatency( + OperationType.EXECUTE_NON_QUERY_PLAN, s.getType().name(), costTime); + }); if (queryId != null) { + if (finish) { + long executeTime = COORDINATOR.getTotalExecutionTime(queryId); + CommonUtils.addQueryLatency( + statement.getType(), executeTime > 0 ? executeTime : costTime); + } COORDINATOR.cleanupQueryExecution(queryId); } } @@ -129,12 +154,20 @@ public Response executeNonQueryStatement(SQL sql, SecurityContext securityContex @Override public Response executeQueryStatement(SQL sql, SecurityContext securityContext) { Long queryId = null; + long startTime = System.nanoTime(); + boolean finish = false; + Statement statement = null; try { RequestValidationHandler.validateSQL(sql); - - Statement statement = - StatementGenerator.createStatement(sql.getSql(), ZoneId.systemDefault()); - + statement = StatementGenerator.createStatement(sql.getSql(), ZoneId.systemDefault()); + if (statement == null) { + return Response.ok() + .entity( + new org.apache.iotdb.db.protocol.rest.model.ExecutionStatus() + .code(TSStatusCode.SQL_PARSE_ERROR.getStatusCode()) + .message("This operation type is not supported")) + .build(); + } if (ExecuteStatementHandler.validateStatement(statement)) { return Response.ok() .entity( @@ -158,7 +191,9 @@ public Response executeQueryStatement(SQL sql, SecurityContext securityContext) sql.getSql(), partitionFetcher, schemaFetcher, - config.getQueryTimeoutThreshold()); + config.getQueryTimeoutThreshold(), + true); + finish = true; if (result.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode() && result.status.code != TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()) { return Response.ok() @@ -176,9 +211,22 @@ public Response executeQueryStatement(SQL sql, SecurityContext securityContext) sql.getRowLimit() == null ? defaultQueryRowLimit : sql.getRowLimit()); } } catch (Exception e) { + finish = true; return Response.ok().entity(ExceptionHandler.tryCatchException(e)).build(); } finally { + long costTime = System.nanoTime() - startTime; + Optional.ofNullable(statement) + .ifPresent( + s -> { + CommonUtils.addStatementExecutionLatency( + OperationType.EXECUTE_QUERY_STATEMENT, s.getType().name(), costTime); + }); if (queryId != null) { + if (finish) { + long executeTime = COORDINATOR.getTotalExecutionTime(queryId); + CommonUtils.addQueryLatency( + statement.getType(), executeTime > 0 ? executeTime : costTime); + } COORDINATOR.cleanupQueryExecution(queryId); } } @@ -188,6 +236,8 @@ public Response executeQueryStatement(SQL sql, SecurityContext securityContext) public Response insertTablet( InsertTabletRequest insertTabletRequest, SecurityContext securityContext) { Long queryId = null; + long startTime = System.nanoTime(); + InsertTabletStatement insertTabletStatement = null; try { RequestValidationHandler.validateInsertTabletRequest(insertTabletRequest); @@ -200,8 +250,8 @@ public Response insertTablet( InsertTabletSortDataUtils.sortList( insertTabletRequest.getValues(), index, insertTabletRequest.getDataTypes().size())); } - - InsertTabletStatement insertTabletStatement = + startTime = System.nanoTime(); + insertTabletStatement = StatementConstructionHandler.constructInsertTabletStatement(insertTabletRequest); Response response = @@ -218,7 +268,8 @@ public Response insertTablet( "", partitionFetcher, schemaFetcher, - config.getQueryTimeoutThreshold()); + config.getQueryTimeoutThreshold(), + false); return Response.ok() .entity( @@ -234,6 +285,13 @@ public Response insertTablet( } catch (Exception e) { return Response.ok().entity(ExceptionHandler.tryCatchException(e)).build(); } finally { + long costTime = System.nanoTime() - startTime; + Optional.ofNullable(insertTabletStatement) + .ifPresent( + s -> { + CommonUtils.addStatementExecutionLatency( + OperationType.INSERT_TABLET, s.getType().name(), costTime); + }); if (queryId != null) { COORDINATOR.cleanupQueryExecution(queryId); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v2/handler/ExecuteStatementHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v2/handler/ExecuteStatementHandler.java index f8293be440406..53bf7f896a9c5 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v2/handler/ExecuteStatementHandler.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v2/handler/ExecuteStatementHandler.java @@ -22,6 +22,7 @@ import org.apache.iotdb.db.queryengine.plan.statement.crud.QueryStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.GetRegionIdStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.ShowStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.model.ShowModelsStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.template.DropSchemaTemplateStatement; import org.apache.iotdb.db.queryengine.plan.statement.sys.AuthorStatement; @@ -30,6 +31,7 @@ private ExecuteStatementHandler() {} public static boolean validateStatement(Statement statement) { return !(statement instanceof QueryStatement) + && !(statement instanceof ShowModelsStatement) && !(statement instanceof ShowStatement && !(statement instanceof DropSchemaTemplateStatement)) && !(statement instanceof AuthorStatement diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v2/handler/QueryDataSetHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v2/handler/QueryDataSetHandler.java index 57179b1dadcdc..f3f7d68dd0bdf 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v2/handler/QueryDataSetHandler.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v2/handler/QueryDataSetHandler.java @@ -26,6 +26,7 @@ import org.apache.iotdb.db.queryengine.plan.statement.metadata.GetRegionIdStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.ShowChildPathsStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.ShowStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.model.ShowModelsStatement; import org.apache.iotdb.db.queryengine.plan.statement.sys.AuthorStatement; import org.apache.iotdb.rpc.TSStatusCode; @@ -52,6 +53,7 @@ public static Response fillQueryDataSet( IQueryExecution queryExecution, Statement statement, int actualRowSizeLimit) throws IoTDBException { if (statement instanceof ShowStatement + || statement instanceof ShowModelsStatement || statement instanceof AuthorStatement || statement instanceof GetRegionIdStatement) { return fillShowPlanDataSet(queryExecution, actualRowSizeLimit); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v2/impl/GrafanaApiServiceImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v2/impl/GrafanaApiServiceImpl.java index e3385885a0130..6a5ae13a0cbba 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v2/impl/GrafanaApiServiceImpl.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v2/impl/GrafanaApiServiceImpl.java @@ -117,7 +117,8 @@ public Response variables(SQL sql, SecurityContext securityContext) { sql.getSql(), partitionFetcher, schemaFetcher, - config.getQueryTimeoutThreshold()); + config.getQueryTimeoutThreshold(), + true); if (result.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode() && result.status.code != TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()) { return Response.ok() @@ -184,7 +185,8 @@ public Response expression(ExpressionRequest expressionRequest, SecurityContext sql, partitionFetcher, schemaFetcher, - config.getQueryTimeoutThreshold()); + config.getQueryTimeoutThreshold(), + true); if (result.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode() && result.status.code != TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()) { return Response.ok() @@ -246,7 +248,8 @@ public Response node(List requestBody, SecurityContext securityContext) sql, partitionFetcher, schemaFetcher, - config.getQueryTimeoutThreshold()); + config.getQueryTimeoutThreshold(), + true); if (result.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode() && result.status.code != TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()) { return Response.ok() diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v2/impl/RestApiServiceImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v2/impl/RestApiServiceImpl.java index 854fd7feb7032..6028d9d257eaf 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v2/impl/RestApiServiceImpl.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/rest/v2/impl/RestApiServiceImpl.java @@ -34,6 +34,7 @@ import org.apache.iotdb.db.protocol.rest.v2.model.InsertTabletRequest; import org.apache.iotdb.db.protocol.rest.v2.model.SQL; import org.apache.iotdb.db.protocol.session.SessionManager; +import org.apache.iotdb.db.protocol.thrift.OperationType; import org.apache.iotdb.db.queryengine.plan.Coordinator; import org.apache.iotdb.db.queryengine.plan.analyze.ClusterPartitionFetcher; import org.apache.iotdb.db.queryengine.plan.analyze.IPartitionFetcher; @@ -45,6 +46,7 @@ import org.apache.iotdb.db.queryengine.plan.statement.Statement; import org.apache.iotdb.db.queryengine.plan.statement.crud.InsertRowsStatement; import org.apache.iotdb.db.queryengine.plan.statement.crud.InsertTabletStatement; +import org.apache.iotdb.db.utils.CommonUtils; import org.apache.iotdb.db.utils.SetThreadName; import org.apache.iotdb.rpc.TSStatusCode; @@ -53,6 +55,7 @@ import java.time.ZoneId; import java.util.List; +import java.util.Optional; public class RestApiServiceImpl extends RestApiService { @@ -80,12 +83,20 @@ public RestApiServiceImpl() { @Override public Response executeNonQueryStatement(SQL sql, SecurityContext securityContext) { Long queryId = null; + Statement statement = null; + long startTime = System.nanoTime(); + boolean finish = false; try { RequestValidationHandler.validateSQL(sql); - - Statement statement = - StatementGenerator.createStatement(sql.getSql(), ZoneId.systemDefault()); - + statement = StatementGenerator.createStatement(sql.getSql(), ZoneId.systemDefault()); + if (statement == null) { + return Response.ok() + .entity( + new org.apache.iotdb.db.protocol.rest.model.ExecutionStatus() + .code(TSStatusCode.SQL_PARSE_ERROR.getStatusCode()) + .message("This operation type is not supported")) + .build(); + } if (!ExecuteStatementHandler.validateStatement(statement)) { return Response.ok() .entity( @@ -108,13 +119,27 @@ public Response executeNonQueryStatement(SQL sql, SecurityContext securityContex sql.getSql(), partitionFetcher, schemaFetcher, - config.getQueryTimeoutThreshold()); - + config.getQueryTimeoutThreshold(), + false); + finish = true; return responseGenerateHelper(result); } catch (Exception e) { + finish = true; return Response.ok().entity(ExceptionHandler.tryCatchException(e)).build(); } finally { + long costTime = System.nanoTime() - startTime; + Optional.ofNullable(statement) + .ifPresent( + s -> { + CommonUtils.addStatementExecutionLatency( + OperationType.EXECUTE_NON_QUERY_PLAN, s.getType().name(), costTime); + }); if (queryId != null) { + if (finish) { + long executionTime = COORDINATOR.getTotalExecutionTime(queryId); + CommonUtils.addQueryLatency( + statement.getType(), executionTime > 0 ? executionTime : costTime); + } COORDINATOR.cleanupQueryExecution(queryId); } } @@ -123,11 +148,21 @@ public Response executeNonQueryStatement(SQL sql, SecurityContext securityContex @Override public Response executeQueryStatement(SQL sql, SecurityContext securityContext) { Long queryId = null; + Statement statement = null; + long startTime = System.nanoTime(); + boolean finish = false; try { RequestValidationHandler.validateSQL(sql); + statement = StatementGenerator.createStatement(sql.getSql(), ZoneId.systemDefault()); - Statement statement = - StatementGenerator.createStatement(sql.getSql(), ZoneId.systemDefault()); + if (statement == null) { + return Response.ok() + .entity( + new org.apache.iotdb.db.protocol.rest.model.ExecutionStatus() + .code(TSStatusCode.SQL_PARSE_ERROR.getStatusCode()) + .message("This operation type is not supported")) + .build(); + } if (ExecuteStatementHandler.validateStatement(statement)) { return Response.ok() @@ -153,7 +188,9 @@ public Response executeQueryStatement(SQL sql, SecurityContext securityContext) sql.getSql(), partitionFetcher, schemaFetcher, - config.getQueryTimeoutThreshold()); + config.getQueryTimeoutThreshold(), + true); + finish = true; if (result.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode() && result.status.code != TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()) { return Response.ok() @@ -171,9 +208,22 @@ public Response executeQueryStatement(SQL sql, SecurityContext securityContext) sql.getRowLimit() == null ? defaultQueryRowLimit : sql.getRowLimit()); } } catch (Exception e) { + finish = true; return Response.ok().entity(ExceptionHandler.tryCatchException(e)).build(); } finally { + long costTime = System.nanoTime() - startTime; + Optional.ofNullable(statement) + .ifPresent( + s -> { + CommonUtils.addStatementExecutionLatency( + OperationType.EXECUTE_QUERY_STATEMENT, s.getType().name(), costTime); + }); if (queryId != null) { + if (finish) { + long executionTime = COORDINATOR.getTotalExecutionTime(queryId); + CommonUtils.addQueryLatency( + statement.getType(), executionTime > 0 ? executionTime : costTime); + } COORDINATOR.cleanupQueryExecution(queryId); } } @@ -183,10 +233,12 @@ public Response executeQueryStatement(SQL sql, SecurityContext securityContext) public Response insertRecords( InsertRecordsRequest insertRecordsRequest, SecurityContext securityContext) { Long queryId = null; + long startTime = System.nanoTime(); + InsertRowsStatement insertRowsStatement = null; try { RequestValidationHandler.validateInsertRecordsRequest(insertRecordsRequest); - InsertRowsStatement insertRowsStatement = + insertRowsStatement = StatementConstructionHandler.createInsertRowsStatement(insertRecordsRequest); Response response = authorizationHandler.checkAuthority(securityContext, insertRowsStatement); @@ -202,12 +254,20 @@ public Response insertRecords( "", partitionFetcher, schemaFetcher, - config.getQueryTimeoutThreshold()); + config.getQueryTimeoutThreshold(), + false); return responseGenerateHelper(result); } catch (Exception e) { return Response.ok().entity(ExceptionHandler.tryCatchException(e)).build(); } finally { + long costTime = System.nanoTime() - startTime; + Optional.ofNullable(insertRowsStatement) + .ifPresent( + s -> { + CommonUtils.addStatementExecutionLatency( + OperationType.INSERT_RECORDS, s.getType().name(), costTime); + }); if (queryId != null) { COORDINATOR.cleanupQueryExecution(queryId); } @@ -218,6 +278,8 @@ public Response insertRecords( public Response insertTablet( InsertTabletRequest insertTabletRequest, SecurityContext securityContext) { Long queryId = null; + long startTime = System.nanoTime(); + InsertTabletStatement insertTabletStatement = null; try { RequestValidationHandler.validateInsertTabletRequest(insertTabletRequest); @@ -231,7 +293,7 @@ public Response insertTablet( insertTabletRequest.getValues(), index, insertTabletRequest.getDataTypes().size())); } - InsertTabletStatement insertTabletStatement = + insertTabletStatement = StatementConstructionHandler.constructInsertTabletStatement(insertTabletRequest); Response response = @@ -248,12 +310,19 @@ public Response insertTablet( "", partitionFetcher, schemaFetcher, - config.getQueryTimeoutThreshold()); - + config.getQueryTimeoutThreshold(), + false); return responseGenerateHelper(result); } catch (Exception e) { return Response.ok().entity(ExceptionHandler.tryCatchException(e)).build(); } finally { + long costTime = System.nanoTime() - startTime; + Optional.ofNullable(insertTabletStatement) + .ifPresent( + s -> { + CommonUtils.addStatementExecutionLatency( + OperationType.INSERT_TABLET, s.getType().name(), costTime); + }); if (queryId != null) { COORDINATOR.cleanupQueryExecution(queryId); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/session/IClientSession.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/session/IClientSession.java index c79d1573a0a6f..c8e0b96a7c66f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/session/IClientSession.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/session/IClientSession.java @@ -45,7 +45,7 @@ public abstract class IClientSession { public abstract String getClientAddress(); - abstract int getClientPort(); + public abstract int getClientPort(); abstract TSConnectionType getConnectionType(); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/session/RestClientSession.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/session/RestClientSession.java index 30ca7509d6006..fa830ace3fbcf 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/session/RestClientSession.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/session/RestClientSession.java @@ -38,7 +38,7 @@ public String getClientAddress() { } @Override - int getClientPort() { + public int getClientPort() { return 0; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/session/SessionManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/session/SessionManager.java index 647a4729bde01..e32034c9ab07f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/session/SessionManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/session/SessionManager.java @@ -309,11 +309,19 @@ public TimeZone getSessionTimeZone() { */ public void removeCurrSession() { IClientSession session = currSession.get(); - sessions.remove(session); + if (session != null) { + sessions.remove(session); + } currSession.remove(); currSessionIdleTime.remove(); } + public void removeCurrSessionForMqtt(MqttClientSession mqttClientSession) { + if (mqttClientSession != null) { + sessions.remove(mqttClientSession); + } + } + /** * this method can be only used in client-thread model. Do not use this method in message-thread * model based service. @@ -331,6 +339,14 @@ public boolean registerSession(IClientSession session) { return true; } + /** + * this method can be only used in mqtt model. Do not use this method in client-thread model based + * service. + */ + public void registerSessionForMqtt(IClientSession session) { + sessions.put(session, placeHolder); + } + /** must be called after registerSession()) will mark the session login. */ public void supplySession( IClientSession session, diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/ClientRPCServiceImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/ClientRPCServiceImpl.java index e74319c9a7adc..3c710d0a82f82 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/ClientRPCServiceImpl.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/ClientRPCServiceImpl.java @@ -39,9 +39,6 @@ import org.apache.iotdb.commons.path.AlignedPath; import org.apache.iotdb.commons.path.MeasurementPath; import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.service.metric.MetricService; -import org.apache.iotdb.commons.service.metric.enums.Metric; -import org.apache.iotdb.commons.service.metric.enums.Tag; import org.apache.iotdb.commons.utils.PathUtils; import org.apache.iotdb.commons.utils.TimePartitionUtils; import org.apache.iotdb.db.audit.AuditLogger; @@ -56,9 +53,6 @@ import org.apache.iotdb.db.protocol.session.IClientSession; import org.apache.iotdb.db.protocol.session.SessionManager; import org.apache.iotdb.db.protocol.thrift.OperationType; -import org.apache.iotdb.db.queryengine.common.FragmentInstanceId; -import org.apache.iotdb.db.queryengine.common.PlanFragmentId; -import org.apache.iotdb.db.queryengine.common.QueryId; import org.apache.iotdb.db.queryengine.common.SessionInfo; import org.apache.iotdb.db.queryengine.common.header.ColumnHeader; import org.apache.iotdb.db.queryengine.common.header.DatasetHeader; @@ -66,14 +60,12 @@ import org.apache.iotdb.db.queryengine.execution.aggregation.AccumulatorFactory; import org.apache.iotdb.db.queryengine.execution.aggregation.Aggregator; import org.apache.iotdb.db.queryengine.execution.driver.DriverContext; -import org.apache.iotdb.db.queryengine.execution.fragment.FragmentInstanceContext; -import org.apache.iotdb.db.queryengine.execution.fragment.FragmentInstanceManager; -import org.apache.iotdb.db.queryengine.execution.fragment.FragmentInstanceStateMachine; +import org.apache.iotdb.db.queryengine.execution.fragment.FakedFragmentInstanceContext; +import org.apache.iotdb.db.queryengine.execution.operator.OperatorContext; import org.apache.iotdb.db.queryengine.execution.operator.process.last.LastQueryUtil; import org.apache.iotdb.db.queryengine.execution.operator.source.AbstractSeriesAggregationScanOperator; import org.apache.iotdb.db.queryengine.execution.operator.source.AlignedSeriesAggregationScanOperator; import org.apache.iotdb.db.queryengine.execution.operator.source.SeriesAggregationScanOperator; -import org.apache.iotdb.db.queryengine.execution.operator.source.SeriesScanOperator; import org.apache.iotdb.db.queryengine.plan.Coordinator; import org.apache.iotdb.db.queryengine.plan.analyze.ClusterPartitionFetcher; import org.apache.iotdb.db.queryengine.plan.analyze.IPartitionFetcher; @@ -112,13 +104,14 @@ import org.apache.iotdb.db.schemaengine.template.TemplateQueryType; import org.apache.iotdb.db.storageengine.StorageEngine; import org.apache.iotdb.db.storageengine.dataregion.DataRegion; +import org.apache.iotdb.db.storageengine.dataregion.read.QueryDataSource; import org.apache.iotdb.db.storageengine.rescon.quotas.DataNodeThrottleQuotaManager; import org.apache.iotdb.db.storageengine.rescon.quotas.OperationQuota; import org.apache.iotdb.db.subscription.agent.SubscriptionAgent; +import org.apache.iotdb.db.utils.CommonUtils; import org.apache.iotdb.db.utils.QueryDataSetUtils; import org.apache.iotdb.db.utils.SchemaUtils; import org.apache.iotdb.db.utils.SetThreadName; -import org.apache.iotdb.metrics.utils.MetricLevel; import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.TSStatusCode; import org.apache.iotdb.service.rpc.thrift.ServerProperties; @@ -208,9 +201,9 @@ import static org.apache.iotdb.commons.partition.DataPartition.NOT_ASSIGNED; import static org.apache.iotdb.db.queryengine.common.DataNodeEndPoints.isSameNode; -import static org.apache.iotdb.db.queryengine.execution.fragment.FragmentInstanceContext.createFragmentInstanceContext; import static org.apache.iotdb.db.queryengine.execution.operator.AggregationUtil.initTimeRangeIterator; import static org.apache.iotdb.db.utils.CommonUtils.getContentOfRequest; +import static org.apache.iotdb.db.utils.CommonUtils.getContentOfTSFastLastDataQueryForOneDeviceReq; import static org.apache.iotdb.db.utils.ErrorHandlingUtils.onIoTDBException; import static org.apache.iotdb.db.utils.ErrorHandlingUtils.onNpeOrUnexpectedException; import static org.apache.iotdb.db.utils.ErrorHandlingUtils.onQueryException; @@ -222,6 +215,11 @@ public class ClientRPCServiceImpl implements IClientRPCServiceWithHandler { private static final IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig(); + private static final CommonConfig COMMON_CONFIG = CommonDescriptor.getInstance().getConfig(); + + private static final Logger SAMPLED_QUERIES_LOGGER = + LoggerFactory.getLogger(IoTDBConstant.SAMPLED_QUERIES_LOGGER_NAME); + private static final Coordinator COORDINATOR = Coordinator.getInstance(); private static final SessionManager SESSION_MANAGER = SessionManager.getInstance(); @@ -241,7 +239,7 @@ public class ClientRPCServiceImpl implements IClientRPCServiceWithHandler { private final DataNodeSchemaCache DATA_NODE_SCHEMA_CACHE = DataNodeSchemaCache.getInstance(); - public static Duration DEFAULT_TIME_SLICE = new Duration(60_000, TimeUnit.MILLISECONDS); + public static final Duration DEFAULT_TIME_SLICE = new Duration(60_000, TimeUnit.MILLISECONDS); private static final int DEFAULT_MAX_TSBLOCK_SIZE_IN_BYTES = TSFileDescriptor.getInstance().getConfig().getMaxTsBlockSizeInBytes(); @@ -319,7 +317,8 @@ private TSExecuteStatementResp executeStatementInternal( statement, partitionFetcher, schemaFetcher, - req.getTimeout()); + req.getTimeout(), + true); if (result.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode() && result.status.code != TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()) { @@ -358,14 +357,15 @@ private TSExecuteStatementResp executeStatementInternal( // record each operation time cost if (statementType != null) { - addStatementExecutionLatency( + CommonUtils.addStatementExecutionLatency( OperationType.EXECUTE_QUERY_STATEMENT, statementType.name(), currentOperationCost); } if (finished) { // record total time cost for one query long executionTime = COORDINATOR.getTotalExecutionTime(queryId); - addQueryLatency(statementType, executionTime > 0 ? executionTime : currentOperationCost); + CommonUtils.addQueryLatency( + statementType, executionTime > 0 ? executionTime : currentOperationCost); COORDINATOR.cleanupQueryExecution(queryId, req, t); } SESSION_MANAGER.updateIdleTime(); @@ -412,7 +412,8 @@ private TSExecuteStatementResp executeRawDataQueryInternal( "", partitionFetcher, schemaFetcher, - req.getTimeout()); + req.getTimeout(), + true); if (result.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { finished = true; @@ -448,13 +449,13 @@ private TSExecuteStatementResp executeRawDataQueryInternal( COORDINATOR.recordExecutionTime(queryId, currentOperationCost); // record each operation time cost - addStatementExecutionLatency( + CommonUtils.addStatementExecutionLatency( OperationType.EXECUTE_RAW_DATA_QUERY, StatementType.QUERY.name(), currentOperationCost); if (finished) { // record total time cost for one query long executionTime = COORDINATOR.getTotalExecutionTime(queryId); - addQueryLatency( + CommonUtils.addQueryLatency( StatementType.QUERY, executionTime > 0 ? executionTime : currentOperationCost); COORDINATOR.cleanupQueryExecution(queryId, req, t); } @@ -502,7 +503,8 @@ private TSExecuteStatementResp executeLastDataQueryInternal( "", partitionFetcher, schemaFetcher, - req.getTimeout()); + req.getTimeout(), + true); if (result.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { finished = true; @@ -540,13 +542,13 @@ private TSExecuteStatementResp executeLastDataQueryInternal( COORDINATOR.recordExecutionTime(queryId, currentOperationCost); // record each operation time cost - addStatementExecutionLatency( + CommonUtils.addStatementExecutionLatency( OperationType.EXECUTE_LAST_DATA_QUERY, StatementType.QUERY.name(), currentOperationCost); if (finished) { // record total time cost for one query long executionTime = COORDINATOR.getTotalExecutionTime(queryId); - addQueryLatency( + CommonUtils.addQueryLatency( StatementType.QUERY, executionTime > 0 ? executionTime : currentOperationCost); COORDINATOR.cleanupQueryExecution(queryId, req, t); } @@ -591,7 +593,8 @@ private TSExecuteStatementResp executeAggregationQueryInternal( "", partitionFetcher, schemaFetcher, - req.getTimeout()); + req.getTimeout(), + true); if (result.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { finished = true; @@ -629,13 +632,13 @@ private TSExecuteStatementResp executeAggregationQueryInternal( COORDINATOR.recordExecutionTime(queryId, currentOperationCost); // record each operation time cost - addStatementExecutionLatency( + CommonUtils.addStatementExecutionLatency( OperationType.EXECUTE_AGG_QUERY, StatementType.QUERY.name(), currentOperationCost); if (finished) { // record total time cost for one query long executionTime = COORDINATOR.getTotalExecutionTime(queryId); - addQueryLatency( + CommonUtils.addQueryLatency( StatementType.QUERY, executionTime > 0 ? executionTime : currentOperationCost); COORDINATOR.cleanupQueryExecution(queryId, req, t); } @@ -647,6 +650,9 @@ private TSExecuteStatementResp executeAggregationQueryInternal( } } + private final List inputLocationList = + Collections.singletonList(new InputLocation[] {new InputLocation(0, 0)}); + @SuppressWarnings("java:S2095") // close() do nothing private List executeGroupByQueryInternal( SessionInfo sessionInfo, @@ -669,21 +675,14 @@ private List executeGroupByQueryInternal( Filter timeFilter = TimeFilterApi.between(startTime, endTime - 1); - QueryId queryId = new QueryId("stub_query"); - FragmentInstanceId instanceId = - new FragmentInstanceId(new PlanFragmentId(queryId, 0), "stub-instance"); - FragmentInstanceStateMachine stateMachine = - new FragmentInstanceStateMachine( - instanceId, FragmentInstanceManager.getInstance().instanceNotificationExecutor); - FragmentInstanceContext fragmentInstanceContext = - createFragmentInstanceContext( - instanceId, stateMachine, sessionInfo, dataRegionList.get(0), timeFilter); + FakedFragmentInstanceContext fragmentInstanceContext = + new FakedFragmentInstanceContext(timeFilter, dataRegionList.get(0)); + DriverContext driverContext = new DriverContext(fragmentInstanceContext, 0); PlanNodeId planNodeId = new PlanNodeId("1"); - driverContext.addOperatorContext(1, planNodeId, SeriesScanOperator.class.getSimpleName()); - driverContext - .getOperatorContexts() - .forEach(operatorContext -> operatorContext.setMaxRunTime(DEFAULT_TIME_SLICE)); + OperatorContext operatorContext = + new OperatorContext(1, planNodeId, "SeriesAggregationScanOperator", driverContext); + operatorContext.setMaxRunTime(DEFAULT_TIME_SLICE); SeriesScanOptions.Builder scanOptionsBuilder = new SeriesScanOptions.Builder(); scanOptionsBuilder.withAllSensors(Collections.singleton(measurement)); @@ -701,7 +700,7 @@ private List executeGroupByQueryInternal( true, true), AggregationStep.SINGLE, - Collections.singletonList(new InputLocation[] {new InputLocation(0, 0)})); + inputLocationList); GroupByTimeParameter groupByTimeParameter = new GroupByTimeParameter( @@ -709,11 +708,15 @@ private List executeGroupByQueryInternal( IMeasurementSchema measurementSchema = new MeasurementSchema(measurement, dataType); AbstractSeriesAggregationScanOperator operator; + boolean canUseStatistics = + !TSDataType.BLOB.equals(dataType) + || (!TAggregationType.LAST_VALUE.equals(aggregationType) + && !TAggregationType.FIRST_VALUE.equals(aggregationType)); PartialPath path; if (isAligned) { path = new AlignedPath( - device, + device.split("\\."), Collections.singletonList(measurement), Collections.singletonList(measurementSchema)); operator = @@ -722,36 +725,36 @@ private List executeGroupByQueryInternal( (AlignedPath) path, Ordering.ASC, scanOptionsBuilder.build(), - driverContext.getOperatorContexts().get(0), + operatorContext, Collections.singletonList(aggregator), - initTimeRangeIterator(groupByTimeParameter, true, true), + initTimeRangeIterator(groupByTimeParameter, true, true, sessionInfo.getZoneId()), groupByTimeParameter, DEFAULT_MAX_TSBLOCK_SIZE_IN_BYTES, - !TSDataType.BLOB.equals(dataType) - || (!TAggregationType.LAST_VALUE.equals(aggregationType) - && !TAggregationType.FIRST_VALUE.equals(aggregationType))); + canUseStatistics); } else { - path = new MeasurementPath(device, measurement, measurementSchema); + String[] splits = device.split("\\."); + String[] fullPaths = new String[splits.length + 1]; + System.arraycopy(splits, 0, fullPaths, 0, splits.length); + fullPaths[splits.length] = measurement; + path = new MeasurementPath(fullPaths, measurementSchema); operator = new SeriesAggregationScanOperator( planNodeId, path, Ordering.ASC, scanOptionsBuilder.build(), - driverContext.getOperatorContexts().get(0), + operatorContext, Collections.singletonList(aggregator), - initTimeRangeIterator(groupByTimeParameter, true, true), + initTimeRangeIterator(groupByTimeParameter, true, true, sessionInfo.getZoneId()), groupByTimeParameter, DEFAULT_MAX_TSBLOCK_SIZE_IN_BYTES, - !TSDataType.BLOB.equals(dataType) - || (!TAggregationType.LAST_VALUE.equals(aggregationType) - && !TAggregationType.FIRST_VALUE.equals(aggregationType))); + canUseStatistics); } try { List result = new ArrayList<>(); - fragmentInstanceContext.setSourcePaths(Collections.singletonList(path)); - operator.initQueryDataSource(fragmentInstanceContext.getSharedQueryDataSource()); + QueryDataSource dataSource = fragmentInstanceContext.getSharedQueryDataSource(path); + operator.initQueryDataSource(dataSource); while (operator.hasNext()) { result.add(operator.next()); @@ -761,7 +764,7 @@ private List executeGroupByQueryInternal( } catch (Exception e) { throw new RuntimeException(e); } finally { - fragmentInstanceContext.releaseResource(); + fragmentInstanceContext.releaseSharedQueryDataSource(); } } @@ -836,6 +839,7 @@ public TSExecuteStatementResp executeFastLastDataQueryForOneDeviceV2( resp.setQueryResult(Collections.emptyList()); finished = true; resp.setMoreData(false); + sampleForCacheHitFastLastDataQueryForOneDevice(req); return resp; } @@ -899,6 +903,7 @@ public TSExecuteStatementResp executeFastLastDataQueryForOneDeviceV2( } finished = true; resp.setMoreData(false); + sampleForCacheHitFastLastDataQueryForOneDevice(req); return resp; } } @@ -927,7 +932,8 @@ public TSExecuteStatementResp executeFastLastDataQueryForOneDeviceV2( "", partitionFetcher, schemaFetcher, - req.getTimeout()); + req.getTimeout(), + true); if (result.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { finished = true; @@ -972,13 +978,13 @@ public TSExecuteStatementResp executeFastLastDataQueryForOneDeviceV2( COORDINATOR.recordExecutionTime(queryId, currentOperationCost); // record each operation time cost - addStatementExecutionLatency( + CommonUtils.addStatementExecutionLatency( OperationType.EXECUTE_LAST_DATA_QUERY, StatementType.QUERY.name(), currentOperationCost); if (finished) { // record total time cost for one query long executionTime = COORDINATOR.getTotalExecutionTime(queryId); - addQueryLatency( + CommonUtils.addQueryLatency( StatementType.QUERY, executionTime > 0 ? executionTime : currentOperationCost); COORDINATOR.cleanupQueryExecution(queryId, req, t); } @@ -1003,6 +1009,22 @@ private TSLastDataQueryReq convert(TSFastLastDataQueryForOneDeviceReq req) { return tsLastDataQueryReq; } + private static void sampleForCacheHitFastLastDataQueryForOneDevice( + TSFastLastDataQueryForOneDeviceReq req) { + // only sample successful query + if (COMMON_CONFIG.isEnableQuerySampling()) { // sampling is enabled + String queryRequest = getContentOfTSFastLastDataQueryForOneDeviceReq(req); + if (COMMON_CONFIG.isQuerySamplingHasRateLimit()) { + if (COMMON_CONFIG.getQuerySamplingRateLimiter().tryAcquire(queryRequest.length())) { + SAMPLED_QUERIES_LOGGER.info(queryRequest); + } + } else { + // no limit, always sampled + SAMPLED_QUERIES_LOGGER.info(queryRequest); + } + } + } + @Override public TSExecuteStatementResp executeAggregationQueryV2(TSAggregationQueryReq req) { return executeAggregationQueryInternal(req, SELECT_RESULT); @@ -1048,7 +1070,7 @@ public TSExecuteStatementResp executeGroupByQueryIntervalQuery(TSGroupByQueryInt deviceId, measurementId, dataType, - true, + req.isAligned, req.getStartTime(), req.getEndTime(), req.getInterval(), @@ -1125,13 +1147,13 @@ public TSFetchResultsResp fetchResultsV2(TSFetchResultsReq req) { COORDINATOR.recordExecutionTime(req.queryId, currentOperationCost); // record each operation time cost - addStatementExecutionLatency( + CommonUtils.addStatementExecutionLatency( OperationType.FETCH_RESULTS, statementType, currentOperationCost); if (finished) { // record total time cost for one query long executionTime = COORDINATOR.getTotalExecutionTime(req.queryId); - addQueryLatency( + CommonUtils.addQueryLatency( StatementType.QUERY, executionTime > 0 ? executionTime : currentOperationCost); COORDINATOR.cleanupQueryExecution(req.queryId, req, t); } @@ -1558,19 +1580,18 @@ public TSStatus executeBatchStatement(TSExecuteBatchStatementReq req) { statement, partitionFetcher, schemaFetcher, - config.getQueryTimeoutThreshold()); + config.getQueryTimeoutThreshold(), + false); results.add(result.status); } catch (Exception e) { LOGGER.warn("Error occurred when executing executeBatchStatement: ", e); TSStatus status = onQueryException( e, "\"" + statement + "\". " + OperationType.EXECUTE_BATCH_STATEMENT); - if (status.getCode() != TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode()) { - isAllSuccessful = false; - } + isAllSuccessful = false; results.add(status); } finally { - addStatementExecutionLatency( + CommonUtils.addStatementExecutionLatency( OperationType.EXECUTE_STATEMENT, type, System.nanoTime() - t2); if (quota != null) { quota.close(); @@ -1578,7 +1599,7 @@ public TSStatus executeBatchStatement(TSExecuteBatchStatementReq req) { } } } finally { - addStatementExecutionLatency( + CommonUtils.addStatementExecutionLatency( OperationType.EXECUTE_BATCH_STATEMENT, StatementType.NULL.name(), System.nanoTime() - t1); SESSION_MANAGER.updateIdleTime(); } @@ -1648,13 +1669,13 @@ public TSFetchResultsResp fetchResults(TSFetchResultsReq req) { COORDINATOR.recordExecutionTime(req.queryId, currentOperationCost); // record each operation time cost - addStatementExecutionLatency( + CommonUtils.addStatementExecutionLatency( OperationType.FETCH_RESULTS, statementType, currentOperationCost); if (finished) { // record total time cost for one query long executionTime = COORDINATOR.getTotalExecutionTime(req.queryId); - addQueryLatency( + CommonUtils.addQueryLatency( StatementType.QUERY, executionTime > 0 ? executionTime : currentOperationCost); COORDINATOR.cleanupQueryExecution(req.queryId, req, t); } @@ -1721,7 +1742,7 @@ public TSStatus insertRecords(TSInsertRecordsReq req) { return onNpeOrUnexpectedException( e, OperationType.INSERT_RECORDS, TSStatusCode.EXECUTE_STATEMENT_ERROR); } finally { - addStatementExecutionLatency( + CommonUtils.addStatementExecutionLatency( OperationType.INSERT_RECORDS, StatementType.BATCH_INSERT_ROWS.name(), System.nanoTime() - t1); @@ -1790,7 +1811,7 @@ public TSStatus insertRecordsOfOneDevice(TSInsertRecordsOfOneDeviceReq req) { return onNpeOrUnexpectedException( e, OperationType.INSERT_RECORDS_OF_ONE_DEVICE, TSStatusCode.EXECUTE_STATEMENT_ERROR); } finally { - addStatementExecutionLatency( + CommonUtils.addStatementExecutionLatency( OperationType.INSERT_RECORDS_OF_ONE_DEVICE, StatementType.BATCH_INSERT_ONE_DEVICE.name(), System.nanoTime() - t1); @@ -1861,7 +1882,7 @@ public TSStatus insertStringRecordsOfOneDevice(TSInsertStringRecordsOfOneDeviceR OperationType.INSERT_STRING_RECORDS_OF_ONE_DEVICE, TSStatusCode.EXECUTE_STATEMENT_ERROR); } finally { - addStatementExecutionLatency( + CommonUtils.addStatementExecutionLatency( OperationType.INSERT_STRING_RECORDS_OF_ONE_DEVICE, StatementType.BATCH_INSERT_ONE_DEVICE.name(), System.nanoTime() - t1); @@ -1927,7 +1948,7 @@ public TSStatus insertRecord(TSInsertRecordReq req) { return onNpeOrUnexpectedException( e, OperationType.INSERT_RECORD, TSStatusCode.EXECUTE_STATEMENT_ERROR); } finally { - addStatementExecutionLatency( + CommonUtils.addStatementExecutionLatency( OperationType.INSERT_RECORD, StatementType.INSERT.name(), System.nanoTime() - t1); SESSION_MANAGER.updateIdleTime(); if (quota != null) { @@ -1984,7 +2005,7 @@ public TSStatus insertTablets(TSInsertTabletsReq req) { return onNpeOrUnexpectedException( e, OperationType.INSERT_TABLETS, TSStatusCode.EXECUTE_STATEMENT_ERROR); } finally { - addStatementExecutionLatency( + CommonUtils.addStatementExecutionLatency( OperationType.INSERT_TABLETS, StatementType.MULTI_BATCH_INSERT.name(), System.nanoTime() - t1); @@ -2042,7 +2063,7 @@ public TSStatus insertTablet(TSInsertTabletReq req) { return onNpeOrUnexpectedException( e, OperationType.INSERT_TABLET, TSStatusCode.EXECUTE_STATEMENT_ERROR); } finally { - addStatementExecutionLatency( + CommonUtils.addStatementExecutionLatency( OperationType.INSERT_TABLET, StatementType.BATCH_INSERT.name(), System.nanoTime() - t1); SESSION_MANAGER.updateIdleTime(); if (quota != null) { @@ -2107,7 +2128,7 @@ public TSStatus insertStringRecords(TSInsertStringRecordsReq req) { return onNpeOrUnexpectedException( e, OperationType.INSERT_STRING_RECORDS, TSStatusCode.EXECUTE_STATEMENT_ERROR); } finally { - addStatementExecutionLatency( + CommonUtils.addStatementExecutionLatency( OperationType.INSERT_STRING_RECORDS, StatementType.BATCH_INSERT_ROWS.name(), System.nanoTime() - t1); @@ -2414,7 +2435,8 @@ private TSQueryTemplateResp executeTemplateQueryStatement( null, partitionFetcher, schemaFetcher, - config.getQueryTimeoutThreshold()); + config.getQueryTimeoutThreshold(), + true); if (executionResult.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode() && executionResult.status.code != TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()) { @@ -2450,7 +2472,7 @@ private TSQueryTemplateResp executeTemplateQueryStatement( onQueryException(e, "\"" + statement + "\". " + OperationType.EXECUTE_STATEMENT)); return null; } finally { - addStatementExecutionLatency( + CommonUtils.addStatementExecutionLatency( OperationType.EXECUTE_STATEMENT, statement.getType().name(), System.nanoTime() - startTime); @@ -2740,7 +2762,7 @@ public TSStatus insertStringRecord(final TSInsertStringRecordReq req) { return onNpeOrUnexpectedException( e, OperationType.INSERT_STRING_RECORD, TSStatusCode.EXECUTE_STATEMENT_ERROR); } finally { - addStatementExecutionLatency( + CommonUtils.addStatementExecutionLatency( OperationType.INSERT_STRING_RECORD, StatementType.INSERT.name(), System.nanoTime() - t1); SESSION_MANAGER.updateIdleTime(); if (quota != null) { @@ -2768,43 +2790,6 @@ protected TSStatus getNotLoggedInStatus() { "Log in failed. Either you are not authorized or the session has timed out."); } - /** Add stat of whole stage query into metrics */ - private void addQueryLatency(StatementType statementType, long costTimeInNanos) { - if (statementType == null) { - return; - } - - MetricService.getInstance() - .timer( - costTimeInNanos, - TimeUnit.NANOSECONDS, - Metric.PERFORMANCE_OVERVIEW.toString(), - MetricLevel.CORE, - Tag.INTERFACE.toString(), - OperationType.QUERY_LATENCY.toString(), - Tag.TYPE.toString(), - statementType.name()); - } - - /** Add stat of operation into metrics */ - private void addStatementExecutionLatency( - OperationType operation, String statementType, long costTime) { - if (statementType == null) { - return; - } - - MetricService.getInstance() - .timer( - costTime, - TimeUnit.NANOSECONDS, - Metric.PERFORMANCE_OVERVIEW.toString(), - MetricLevel.CORE, - Tag.INTERFACE.toString(), - operation.toString(), - Tag.TYPE.toString(), - statementType); - } - private String checkIdentifierAndRemoveBackQuotesIfNecessary(String identifier) { return identifier == null ? null : ASTVisitor.parseIdentifier(identifier); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java index ac2bf85ad755d..d4c35b700cbc7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java @@ -24,7 +24,9 @@ import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.common.rpc.thrift.TFlushReq; +import org.apache.iotdb.common.rpc.thrift.TLoadSample; import org.apache.iotdb.common.rpc.thrift.TNodeLocations; +import org.apache.iotdb.common.rpc.thrift.TPipeHeartbeatResp; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.common.rpc.thrift.TSender; import org.apache.iotdb.common.rpc.thrift.TServiceType; @@ -36,8 +38,12 @@ import org.apache.iotdb.common.rpc.thrift.TShowConfigurationResp; import org.apache.iotdb.common.rpc.thrift.TTestConnectionResp; import org.apache.iotdb.common.rpc.thrift.TTestConnectionResult; +import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; import org.apache.iotdb.commons.client.request.AsyncRequestContext; import org.apache.iotdb.commons.cluster.NodeStatus; +import org.apache.iotdb.commons.concurrent.IoTThreadFactory; +import org.apache.iotdb.commons.concurrent.ThreadName; +import org.apache.iotdb.commons.concurrent.threadpool.WrappedThreadPoolExecutor; import org.apache.iotdb.commons.conf.CommonConfig; import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.conf.ConfigurationFileUtils; @@ -52,12 +58,14 @@ import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PathDeserializeUtil; import org.apache.iotdb.commons.path.PathPatternTree; -import org.apache.iotdb.commons.pipe.plugin.meta.PipePluginMeta; -import org.apache.iotdb.commons.pipe.task.meta.PipeMeta; +import org.apache.iotdb.commons.pipe.agent.plugin.meta.PipePluginMeta; +import org.apache.iotdb.commons.pipe.agent.task.PipeTaskAgent; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta; import org.apache.iotdb.commons.schema.SchemaConstant; import org.apache.iotdb.commons.schema.view.viewExpression.ViewExpression; import org.apache.iotdb.commons.service.metric.MetricService; import org.apache.iotdb.commons.service.metric.enums.Tag; +import org.apache.iotdb.commons.subscription.config.SubscriptionConfig; import org.apache.iotdb.commons.subscription.meta.consumer.ConsumerGroupMeta; import org.apache.iotdb.commons.subscription.meta.topic.TopicMeta; import org.apache.iotdb.commons.trigger.TriggerInformation; @@ -69,7 +77,6 @@ import org.apache.iotdb.consensus.exception.ConsensusGroupAlreadyExistException; import org.apache.iotdb.consensus.exception.ConsensusGroupNotExistException; import org.apache.iotdb.db.auth.AuthorityChecker; -import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.consensus.DataRegionConsensusImpl; import org.apache.iotdb.db.consensus.SchemaRegionConsensusImpl; @@ -143,9 +150,11 @@ import org.apache.iotdb.db.schemaengine.template.TemplateInternalRPCUpdateType; import org.apache.iotdb.db.service.DataNode; import org.apache.iotdb.db.service.RegionMigrateService; +import org.apache.iotdb.db.service.metrics.FileMetrics; import org.apache.iotdb.db.storageengine.StorageEngine; import org.apache.iotdb.db.storageengine.dataregion.compaction.repair.RepairTaskStatus; import org.apache.iotdb.db.storageengine.dataregion.compaction.schedule.CompactionScheduleTaskManager; +import org.apache.iotdb.db.storageengine.dataregion.compaction.schedule.CompactionTaskManager; import org.apache.iotdb.db.storageengine.dataregion.compaction.settle.SettleRequestHandler; import org.apache.iotdb.db.storageengine.rescon.quotas.DataNodeSpaceQuotaManager; import org.apache.iotdb.db.storageengine.rescon.quotas.DataNodeThrottleQuotaManager; @@ -168,6 +177,7 @@ import org.apache.iotdb.mpp.rpc.thrift.TCheckSchemaRegionUsingTemplateResp; import org.apache.iotdb.mpp.rpc.thrift.TCheckTimeSeriesExistenceReq; import org.apache.iotdb.mpp.rpc.thrift.TCheckTimeSeriesExistenceResp; +import org.apache.iotdb.mpp.rpc.thrift.TCleanDataNodeCacheReq; import org.apache.iotdb.mpp.rpc.thrift.TConstructSchemaBlackListReq; import org.apache.iotdb.mpp.rpc.thrift.TConstructSchemaBlackListWithTemplateReq; import org.apache.iotdb.mpp.rpc.thrift.TConstructViewSchemaBlackListReq; @@ -185,7 +195,6 @@ import org.apache.iotdb.mpp.rpc.thrift.TDeleteDataForDeleteSchemaReq; import org.apache.iotdb.mpp.rpc.thrift.TDeleteTimeSeriesReq; import org.apache.iotdb.mpp.rpc.thrift.TDeleteViewSchemaReq; -import org.apache.iotdb.mpp.rpc.thrift.TDisableDataNodeReq; import org.apache.iotdb.mpp.rpc.thrift.TDropFunctionInstanceReq; import org.apache.iotdb.mpp.rpc.thrift.TDropPipePluginInstanceReq; import org.apache.iotdb.mpp.rpc.thrift.TDropTriggerInstanceReq; @@ -204,10 +213,9 @@ import org.apache.iotdb.mpp.rpc.thrift.TInvalidatePermissionCacheReq; import org.apache.iotdb.mpp.rpc.thrift.TLoadCommandReq; import org.apache.iotdb.mpp.rpc.thrift.TLoadResp; -import org.apache.iotdb.mpp.rpc.thrift.TLoadSample; import org.apache.iotdb.mpp.rpc.thrift.TMaintainPeerReq; +import org.apache.iotdb.mpp.rpc.thrift.TNotifyRegionMigrationReq; import org.apache.iotdb.mpp.rpc.thrift.TPipeHeartbeatReq; -import org.apache.iotdb.mpp.rpc.thrift.TPipeHeartbeatResp; import org.apache.iotdb.mpp.rpc.thrift.TPushConsumerGroupMetaReq; import org.apache.iotdb.mpp.rpc.thrift.TPushConsumerGroupMetaResp; import org.apache.iotdb.mpp.rpc.thrift.TPushConsumerGroupMetaRespExceptionMessage; @@ -268,9 +276,17 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -312,6 +328,18 @@ public class DataNodeInternalRPCServiceImpl implements IDataNodeRPCService.Iface private final CommonConfig commonConfig = CommonDescriptor.getInstance().getConfig(); + private final ExecutorService schemaExecutor = + new WrappedThreadPoolExecutor( + 0, + IoTDBDescriptor.getInstance().getConfig().getSchemaThreadCount(), + 0L, + TimeUnit.SECONDS, + new ArrayBlockingQueue<>( + IoTDBDescriptor.getInstance().getConfig().getSchemaThreadCount()), + new IoTThreadFactory(ThreadName.SCHEMA_PARALLEL_POOL.getName()), + ThreadName.SCHEMA_PARALLEL_POOL.getName(), + new ThreadPoolExecutor.CallerRunsPolicy()); + private static final String SYSTEM = "system"; public DataNodeInternalRPCServiceImpl() { @@ -357,7 +385,7 @@ public TSendFragmentInstanceResp sendFragmentInstance(TSendFragmentInstanceReq r TSendFragmentInstanceResp resp = new TSendFragmentInstanceResp(); resp.setAccepted(executionResult.isAccepted()); resp.setMessage(executionResult.getMessage()); - resp.setNeedRetry(executionResult.isNeedRetry()); + resp.setNeedRetry(executionResult.isReadNeedRetry()); resp.setStatus(executionResult.getStatus()); return resp; } @@ -398,6 +426,7 @@ public TFragmentInstanceInfoResp fetchFragmentInstanceInfo(TFetchFragmentInstanc failureInfoList.add(failureInfo.serialize()); } resp.setFailureInfoList(failureInfoList); + info.getErrorCode().ifPresent(resp::setErrorCode); return resp; } catch (IOException e) { return resp; @@ -456,14 +485,18 @@ public TLoadResp sendTsFilePieceNode(TTsFilePieceReq req) { @Override public TLoadResp sendLoadCommand(TLoadCommandReq req) { - final ProgressIndex progressIndex; - if (req.isSetProgressIndex()) { - progressIndex = ProgressIndexType.deserializeFrom(ByteBuffer.wrap(req.getProgressIndex())); + final Map timePartitionProgressIndexMap = new HashMap<>(); + if (req.isSetTimePartition2ProgressIndex()) { + for (Map.Entry entry : + req.getTimePartition2ProgressIndex().entrySet()) { + timePartitionProgressIndexMap.put( + entry.getKey(), ProgressIndexType.deserializeFrom(entry.getValue())); + } } else { - // fallback to use local generated progress index for compatibility - progressIndex = PipeDataNodeAgent.runtime().getNextProgressIndexForTsFileLoad(); - LOGGER.info( - "Use local generated load progress index {} for uuid {}.", progressIndex, req.uuid); + final TSStatus status = new TSStatus(); + status.setCode(TSStatusCode.LOAD_FILE_ERROR.getStatusCode()); + status.setMessage("Load command requires time partition to progress index map"); + return createTLoadResp(status); } return createTLoadResp( @@ -472,7 +505,7 @@ public TLoadResp sendLoadCommand(TLoadCommandReq req) { LoadTsFileScheduler.LoadCommand.values()[req.commandType], req.uuid, req.isSetIsGeneratedByPipe() && req.isGeneratedByPipe, - progressIndex)); + timePartitionProgressIndexMap)); } private TLoadResp createTLoadResp(TSStatus resultStatus) { @@ -496,18 +529,25 @@ public TSStatus createDataRegion(TCreateDataRegionReq req) { } @Override - public TSStatus invalidatePartitionCache(TInvalidateCacheReq req) { + public TSStatus invalidatePartitionCache(final TInvalidateCacheReq req) { ClusterPartitionFetcher.getInstance().invalidAllCache(); return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } @Override - public TSStatus invalidateSchemaCache(TInvalidateCacheReq req) { + public TSStatus invalidateLastCache(final String database) { + DataNodeSchemaCache.getInstance().invalidateLastCacheInDataRegion(database); + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } + + @Override + public TSStatus invalidateSchemaCache(final TInvalidateCacheReq req) { DataNodeSchemaCache.getInstance().takeWriteLock(); try { // req.getFullPath() is a database path DataNodeSchemaCache.getInstance().invalidate(req.getFullPath()); ClusterTemplateManager.getInstance().invalid(req.getFullPath()); + LOGGER.info("Schema cache of {} has been invalidated", req.getFullPath()); return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } finally { DataNodeSchemaCache.getInstance().releaseWriteLock(); @@ -1047,14 +1087,18 @@ public TSStatus alterView(TAlterViewReq req) { @Override public TPushPipeMetaResp pushPipeMeta(TPushPipeMetaReq req) { - final List pipeMetas = new ArrayList<>(); - for (ByteBuffer byteBuffer : req.getPipeMetas()) { - pipeMetas.add(PipeMeta.deserialize(byteBuffer)); - } try { - List exceptionMessages = - PipeDataNodeAgent.task().handlePipeMetaChanges(pipeMetas); - + final List exceptionMessages = + PipeDataNodeAgent.task() + .handlePipeMetaChanges( + req.getPipeMetas().stream() + .map(PipeMeta::deserialize4TaskAgent) + .collect(Collectors.toList())); + + if (Objects.isNull(exceptionMessages)) { + return new TPushPipeMetaResp() + .setStatus(new TSStatus(TSStatusCode.PIPE_PUSH_META_TIMEOUT.getStatusCode())); + } return exceptionMessages.isEmpty() ? new TPushPipeMetaResp() .setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())) @@ -1075,17 +1119,28 @@ public TPushPipeMetaResp pushSinglePipeMeta(TPushSinglePipeMetaReq req) { if (req.isSetPipeNameToDrop()) { exceptionMessage = PipeDataNodeAgent.task().handleDropPipe(req.getPipeNameToDrop()); } else if (req.isSetPipeMeta()) { - final PipeMeta pipeMeta = PipeMeta.deserialize(ByteBuffer.wrap(req.getPipeMeta())); + final PipeMeta pipeMeta = + PipeMeta.deserialize4TaskAgent(ByteBuffer.wrap(req.getPipeMeta())); exceptionMessage = PipeDataNodeAgent.task().handleSinglePipeMetaChanges(pipeMeta); } else { throw new Exception("Invalid TPushSinglePipeMetaReq"); } - return exceptionMessage == null - ? new TPushPipeMetaResp() - .setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())) - : new TPushPipeMetaResp() - .setStatus(new TSStatus(TSStatusCode.PIPE_PUSH_META_ERROR.getStatusCode())) + if (exceptionMessage != null) { + if (exceptionMessage.message != null + && exceptionMessage.message.contains(PipeTaskAgent.MESSAGE_PIPE_NOT_ENOUGH_MEMORY)) { + return new TPushPipeMetaResp() + .setStatus( + new TSStatus(TSStatusCode.PIPE_PUSH_META_NOT_ENOUGH_MEMORY.getStatusCode())) .setExceptionMessages(Collections.singletonList(exceptionMessage)); + } + + return new TPushPipeMetaResp() + .setStatus(new TSStatus(TSStatusCode.PIPE_PUSH_META_ERROR.getStatusCode())) + .setExceptionMessages(Collections.singletonList(exceptionMessage)); + } + + return new TPushPipeMetaResp() + .setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); } catch (Exception e) { LOGGER.error("Error occurred when pushing single pipe meta", e); return new TPushPipeMetaResp() @@ -1112,7 +1167,7 @@ public TPushPipeMetaResp pushMultiPipeMeta(TPushMultiPipeMetaReq req) { } } else if (req.isSetPipeMetas()) { for (ByteBuffer byteBuffer : req.getPipeMetas()) { - final PipeMeta pipeMeta = PipeMeta.deserialize(byteBuffer); + final PipeMeta pipeMeta = PipeMeta.deserialize4TaskAgent(byteBuffer); TPushPipeMetaRespExceptionMessage message = PipeDataNodeAgent.task().handleSinglePipeMetaChanges(pipeMeta); exceptionMessages.add(message); @@ -1142,6 +1197,11 @@ public TPushPipeMetaResp pushMultiPipeMeta(TPushMultiPipeMetaReq req) { @Override public TPushTopicMetaResp pushTopicMeta(TPushTopicMetaReq req) { + if (!SubscriptionConfig.getInstance().getSubscriptionEnabled()) { + return new TPushTopicMetaResp() + .setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); + } + final List topicMetas = new ArrayList<>(); for (ByteBuffer byteBuffer : req.getTopicMetas()) { topicMetas.add(TopicMeta.deserialize(byteBuffer)); @@ -1165,6 +1225,11 @@ public TPushTopicMetaResp pushTopicMeta(TPushTopicMetaReq req) { @Override public TPushTopicMetaResp pushSingleTopicMeta(TPushSingleTopicMetaReq req) { + if (!SubscriptionConfig.getInstance().getSubscriptionEnabled()) { + return new TPushTopicMetaResp() + .setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); + } + try { final TPushTopicMetaRespExceptionMessage exceptionMessage; if (req.isSetTopicNameToDrop()) { @@ -1193,6 +1258,11 @@ public TPushTopicMetaResp pushSingleTopicMeta(TPushSingleTopicMetaReq req) { @Override public TPushTopicMetaResp pushMultiTopicMeta(TPushMultiTopicMetaReq req) { + if (!SubscriptionConfig.getInstance().getSubscriptionEnabled()) { + return new TPushTopicMetaResp() + .setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); + } + boolean hasException = false; // If there is any exception, we use the size of exceptionMessages to record the fail index List exceptionMessages = new ArrayList<>(); @@ -1240,6 +1310,11 @@ public TPushTopicMetaResp pushMultiTopicMeta(TPushMultiTopicMetaReq req) { @Override public TPushConsumerGroupMetaResp pushConsumerGroupMeta(TPushConsumerGroupMetaReq req) { + if (!SubscriptionConfig.getInstance().getSubscriptionEnabled()) { + return new TPushConsumerGroupMetaResp() + .setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); + } + final List consumerGroupMetas = new ArrayList<>(); for (ByteBuffer byteBuffer : req.getConsumerGroupMetas()) { consumerGroupMetas.add(ConsumerGroupMeta.deserialize(byteBuffer)); @@ -1264,6 +1339,11 @@ public TPushConsumerGroupMetaResp pushConsumerGroupMeta(TPushConsumerGroupMetaRe @Override public TPushConsumerGroupMetaResp pushSingleConsumerGroupMeta( TPushSingleConsumerGroupMetaReq req) { + if (!SubscriptionConfig.getInstance().getSubscriptionEnabled()) { + return new TPushConsumerGroupMetaResp() + .setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); + } + try { final TPushConsumerGroupMetaRespExceptionMessage exceptionMessage; if (req.isSetConsumerGroupNameToDrop()) { @@ -1301,18 +1381,37 @@ public TPipeHeartbeatResp pipeHeartbeat(TPipeHeartbeatReq req) throws TException private TSStatus executeSchemaBlackListTask( final List consensusGroupIdList, final Function executeOnOneRegion) { - final List statusList = new ArrayList<>(); - TSStatus status; - boolean hasFailure = false; - for (final TConsensusGroupId consensusGroupId : consensusGroupIdList) { - status = executeOnOneRegion.apply(consensusGroupId); - if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode() - && status.getCode() != TSStatusCode.ONLY_LOGICAL_VIEW.getStatusCode()) { - hasFailure = true; + // Not guarantee sequence + final List statusList = Collections.synchronizedList(new ArrayList<>()); + final AtomicBoolean hasFailure = new AtomicBoolean(false); + + final Set> schemaFuture = new HashSet<>(); + + consensusGroupIdList.forEach( + consensusGroupId -> + schemaFuture.add( + schemaExecutor.submit( + () -> { + final TSStatus status = executeOnOneRegion.apply(consensusGroupId); + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode() + && status.getCode() != TSStatusCode.ONLY_LOGICAL_VIEW.getStatusCode()) { + hasFailure.set(true); + } + statusList.add(status); + }))); + + for (final Future future : schemaFuture) { + try { + future.get(); + } catch (final ExecutionException | InterruptedException e) { + LOGGER.warn("Exception occurs when executing internal schema task: ", e); + statusList.add( + new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode()) + .setMessage(e.toString())); } - statusList.add(status); } - if (hasFailure) { + + if (hasFailure.get()) { return RpcUtils.getStatus(statusList); } else { return statusList.stream() @@ -1323,19 +1422,38 @@ private TSStatus executeSchemaBlackListTask( } private TSStatus executeInternalSchemaTask( - List consensusGroupIdList, - Function executeOnOneRegion) { - List statusList = new ArrayList<>(); - TSStatus status; - boolean hasFailure = false; - for (TConsensusGroupId consensusGroupId : consensusGroupIdList) { - status = executeOnOneRegion.apply(consensusGroupId); - if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - hasFailure = true; + final List consensusGroupIdList, + final Function executeOnOneRegion) { + // Not guarantee sequence + final List statusList = Collections.synchronizedList(new ArrayList<>()); + final AtomicBoolean hasFailure = new AtomicBoolean(false); + + final Set> schemaFuture = new HashSet<>(); + + consensusGroupIdList.forEach( + consensusGroupId -> + schemaFuture.add( + schemaExecutor.submit( + () -> { + final TSStatus status = executeOnOneRegion.apply(consensusGroupId); + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + hasFailure.set(true); + } + statusList.add(status); + }))); + + for (final Future future : schemaFuture) { + try { + future.get(); + } catch (final ExecutionException | InterruptedException e) { + LOGGER.warn("Exception occurs when executing internal schema task: ", e); + statusList.add( + new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode()) + .setMessage(e.toString())); } - statusList.add(status); } - if (hasFailure) { + + if (hasFailure.get()) { return RpcUtils.getStatus(statusList); } else { return RpcUtils.SUCCESS_STATUS; @@ -1397,7 +1515,8 @@ public TSStatus executeCQ(TExecuteCQ req) { executedSQL, partitionFetcher, schemaFetcher, - req.getTimeout()); + req.getTimeout(), + false); if (result.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode() && result.status.code != TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()) { @@ -1592,6 +1711,8 @@ public TDataNodeHeartbeatResp getDataNodeHeartBeat(TDataNodeHeartbeatReq req) th sampleDiskLoad(loadSample); resp.setLoadSample(loadSample); + + resp.setRegionDisk(FileMetrics.getInstance().getRegionSizeMap()); } AuthorityChecker.getAuthorityFetcher().refreshToken(); resp.setHeartbeatTimestamp(req.getHeartbeatTimestamp()); @@ -1625,6 +1746,15 @@ public TDataNodeHeartbeatResp getDataNodeHeartBeat(TDataNodeHeartbeatReq req) th } } + if (req.isSetCurrentRegionOperations()) { + RegionMigrateService.getInstance() + .notifyRegionMigration( + new TNotifyRegionMigrationReq( + req.getLogicalClock(), + req.getHeartbeatTimestamp(), + req.getCurrentRegionOperations())); + } + return resp; } @@ -1747,7 +1877,7 @@ private void sampleDiskLoad(TLoadSample loadSample) { "The available disk space is : {}, " + "the total disk space is : {}, " + "and the remaining disk usage ratio: {} is " - + "less than disk_spec_warning_threshold: {}, set system to readonly!", + + "less than disk_space_warning_threshold: {}, set system to readonly!", RamUsageEstimator.humanReadableUnits((long) availableDisk), RamUsageEstimator.humanReadableUnits((long) totalDisk), freeDiskRatio, @@ -1782,14 +1912,13 @@ public TSStatus merge() throws TException { @Override public TSStatus startRepairData() throws TException { - if (!storageEngine.isAllSgReady()) { + if (!storageEngine.isReadyForNonReadWriteFunctions()) { return RpcUtils.getStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR, "not all sg is ready"); } - IoTDBConfig iotdbConfig = IoTDBDescriptor.getInstance().getConfig(); - if (!iotdbConfig.isEnableSeqSpaceCompaction() || !iotdbConfig.isEnableUnseqSpaceCompaction()) { + if (!CompactionTaskManager.getInstance().isInit()) { return RpcUtils.getStatus( TSStatusCode.EXECUTE_STATEMENT_ERROR, - "cannot start repair task because inner space compaction is not enabled"); + "cannot start repair task because compaction is not enabled"); } try { if (storageEngine.repairData()) { @@ -1876,9 +2005,6 @@ public TShowConfigurationResp showConfiguration() { public TSStatus setSystemStatus(String status) throws TException { try { commonConfig.setNodeStatus(NodeStatus.parse(status)); - if (commonConfig.getNodeStatus().equals(NodeStatus.Removing)) { - PipeDataNodeAgent.runtime().stop(); - } } catch (Exception e) { return RpcUtils.getStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR, e.getMessage()); } @@ -1914,24 +2040,24 @@ public TSStatus setTTL(TSetTTLReq req) throws TException { } @Override - public TSStatus updateTemplate(TUpdateTemplateReq req) { + public TSStatus updateTemplate(final TUpdateTemplateReq req) { switch (TemplateInternalRPCUpdateType.getType(req.type)) { - case ADD_TEMPLATE_SET_INFO: + case ROLLBACK_INVALIDATE_TEMPLATE_SET_INFO: + ClusterTemplateManager.getInstance().addTemplateSetInfo(req.getTemplateInfo()); + break; + case INVALIDATE_TEMPLATE_SET_INFO: + ClusterTemplateManager.getInstance().invalidateTemplateSetInfo(req.getTemplateInfo()); + break; + case ADD_TEMPLATE_PRE_SET_INFO: DataNodeSchemaLockManager.getInstance() .takeWriteLock(SchemaLockType.TIMESERIES_VS_TEMPLATE); try { - ClusterTemplateManager.getInstance().addTemplateSetInfo(req.getTemplateInfo()); + ClusterTemplateManager.getInstance().addTemplatePreSetInfo(req.getTemplateInfo()); } finally { DataNodeSchemaLockManager.getInstance() .releaseWriteLock(SchemaLockType.TIMESERIES_VS_TEMPLATE); } break; - case INVALIDATE_TEMPLATE_SET_INFO: - ClusterTemplateManager.getInstance().invalidateTemplateSetInfo(req.getTemplateInfo()); - break; - case ADD_TEMPLATE_PRE_SET_INFO: - ClusterTemplateManager.getInstance().addTemplatePreSetInfo(req.getTemplateInfo()); - break; case COMMIT_TEMPLATE_SET_INFO: ClusterTemplateManager.getInstance().commitTemplatePreSetInfo(req.getTemplateInfo()); break; @@ -2127,6 +2253,12 @@ public TRegionMigrateResult getRegionMaintainResult(long taskId) throws TExcepti return RegionMigrateService.getInstance().getRegionMaintainResult(taskId); } + @Override + public TSStatus notifyRegionMigration(TNotifyRegionMigrationReq req) throws TException { + RegionMigrateService.getInstance().notifyRegionMigration(req); + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } + private TSStatus createNewRegion(ConsensusGroupId regionId, String storageGroup) { return regionManager.createNewRegion(regionId, storageGroup); } @@ -2347,7 +2479,7 @@ private TSStatus createNewRegionPeer(ConsensusGroupId regionId, List peers } @Override - public TSStatus disableDataNode(TDisableDataNodeReq req) { + public TSStatus cleanDataNodeCache(TCleanDataNodeCacheReq req) { LOGGER.info("start disable data node in the request: {}", req); TSStatus status = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); status.setMessage("disable datanode succeed"); @@ -2365,20 +2497,21 @@ public TSStatus disableDataNode(TDisableDataNodeReq req) { @SuppressWarnings("squid:S2142") // ignore Either re-interrupt this method or rethrow @Override - public TSStatus stopDataNode() { + public TSStatus stopAndClearDataNode() { TSStatus status = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); - LOGGER.info("Execute stopDataNode RPC method"); + LOGGER.info("Execute stopAndClearDataNode RPC method"); - // kill the datanode process 20 seconds later + // kill the datanode process 30 seconds later // because datanode process cannot exit normally for the reason of InterruptedException new Thread( () -> { try { - TimeUnit.SECONDS.sleep(20); + TimeUnit.SECONDS.sleep(30); } catch (InterruptedException e) { - LOGGER.warn("Meets InterruptedException in stopDataNode RPC method"); + LOGGER.warn("Meets InterruptedException in stopAndClearDataNode RPC method"); } finally { - LOGGER.info("Executing system.exit(0) in stopDataNode RPC method after 20 seconds"); + LOGGER.info( + "Executing system.exit(0) in stopAndClearDataNode RPC method after 30 seconds"); System.exit(0); } }) @@ -2386,9 +2519,10 @@ public TSStatus stopDataNode() { try { DataNode.getInstance().stop(); - status.setMessage("stop datanode succeed"); + status.setMessage("Stop And Clear Data Node succeed"); + DataNode.getInstance().deleteDataNodeSystemProperties(); } catch (Exception e) { - LOGGER.warn("Stop Data Node error", e); + LOGGER.warn("Stop And Clear Data Node error", e); status.setCode(TSStatusCode.DATANODE_STOP_ERROR.getStatusCode()); status.setMessage(e.getMessage()); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeRegionManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeRegionManager.java index 18ddd6db478a4..1b474ac725790 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeRegionManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeRegionManager.java @@ -100,8 +100,8 @@ private DataNodeRegionManager() {} public ReentrantReadWriteLock getRegionLock(ConsensusGroupId consensusGroupId) { return consensusGroupId instanceof DataRegionId - ? dataRegionLockMap.get((DataRegionId) consensusGroupId) - : schemaRegionLockMap.get((SchemaRegionId) consensusGroupId); + ? dataRegionLockMap.get(consensusGroupId) + : schemaRegionLockMap.get(consensusGroupId); } public TSStatus createSchemaRegion(TRegionReplicaSet regionReplicaSet, String storageGroup) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/MPPQueryContext.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/MPPQueryContext.java index 7eebccdd67513..fc457ab9ce487 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/MPPQueryContext.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/MPPQueryContext.java @@ -33,10 +33,10 @@ import org.apache.tsfile.read.filter.basic.Filter; import java.time.ZoneId; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.LongConsumer; /** * This class is used to record the context of a query including QueryId, query statement, session @@ -65,13 +65,14 @@ public class MPPQueryContext { // When some DataNode cannot be connected, its endPoint will be put // in this list. And the following retry will avoid planning fragment // onto this node. - private final List endPointBlackList; + // When dispatch FI fails, this structure may be modified concurrently + private final Set endPointBlackList; private final TypeProvider typeProvider = new TypeProvider(); private Filter globalTimeFilter; - private Map acquiredLockNumMap = new HashMap<>(); + private final Set acquiredLocks = new HashSet<>(); private boolean isExplainAnalyze = false; @@ -81,9 +82,22 @@ public class MPPQueryContext { // constructing some Expression and PlanNode. private final MemoryReservationManager memoryReservationManager; + private static final int minSizeToUseSampledTimeseriesOperandMemCost = 100; + private double avgTimeseriesOperandMemCost = 0; + private int numsOfSampledTimeseriesOperand = 0; + // When there is no view in a last query and no device exists in multiple regions, + // the updateScanNum process in distributed planning can be skipped. + private boolean needUpdateScanNumForLastQuery = false; + + private long reservedMemoryCostForSchemaTree = 0; + private boolean releaseSchemaTreeAfterAnalyzing = true; + private LongConsumer reserveMemoryForSchemaTreeFunc = null; + + private boolean userQuery = false; + public MPPQueryContext(QueryId queryId) { this.queryId = queryId; - this.endPointBlackList = new LinkedList<>(); + this.endPointBlackList = ConcurrentHashMap.newKeySet(); this.memoryReservationManager = new NotThreadSafeMemoryReservationManager(queryId, this.getClass().getName()); } @@ -119,6 +133,34 @@ public MPPQueryContext( this.initResultNodeContext(); } + public void setReserveMemoryForSchemaTreeFunc(LongConsumer reserveMemoryForSchemaTreeFunc) { + this.reserveMemoryForSchemaTreeFunc = reserveMemoryForSchemaTreeFunc; + } + + public void reserveMemoryForSchemaTree(long memoryCost) { + if (reserveMemoryForSchemaTreeFunc == null) { + return; + } + reserveMemoryForSchemaTreeFunc.accept(memoryCost); + this.reservedMemoryCostForSchemaTree += memoryCost; + } + + public void setReleaseSchemaTreeAfterAnalyzing(boolean releaseSchemaTreeAfterAnalyzing) { + this.releaseSchemaTreeAfterAnalyzing = releaseSchemaTreeAfterAnalyzing; + } + + public boolean releaseSchemaTreeAfterAnalyzing() { + return releaseSchemaTreeAfterAnalyzing; + } + + public void releaseMemoryForSchemaTree() { + if (reservedMemoryCostForSchemaTree <= 0) { + return; + } + this.memoryReservationManager.releaseMemoryCumulatively(reservedMemoryCostForSchemaTree); + reservedMemoryCostForSchemaTree = 0; + } + public void prepareForRetry() { this.initResultNodeContext(); this.releaseAllMemoryReservedForFrontEnd(); @@ -180,7 +222,7 @@ public void addFailedEndPoint(TEndPoint endPoint) { this.endPointBlackList.add(endPoint); } - public List getEndPointBlackList() { + public Set getEndPointBlackList() { return endPointBlackList; } @@ -200,16 +242,12 @@ public String getSql() { return sql; } - public Map getAcquiredLockNumMap() { - return acquiredLockNumMap; + public Set getAcquiredLocks() { + return acquiredLocks; } - public void addAcquiredLockNum(SchemaLockType lockType) { - if (acquiredLockNumMap.containsKey(lockType)) { - acquiredLockNumMap.put(lockType, acquiredLockNumMap.get(lockType) + 1); - } else { - acquiredLockNumMap.put(lockType, 1); - } + public boolean addAcquiredLock(final SchemaLockType lockType) { + return acquiredLocks.add(lockType); } public void generateGlobalTimeFilter(Analysis analysis) { @@ -333,5 +371,35 @@ public void releaseMemoryReservedForFrontEnd(final long bytes) { this.memoryReservationManager.releaseMemoryCumulatively(bytes); } + public boolean useSampledAvgTimeseriesOperandMemCost() { + return numsOfSampledTimeseriesOperand >= minSizeToUseSampledTimeseriesOperandMemCost; + } + + public long getAvgTimeseriesOperandMemCost() { + return (long) avgTimeseriesOperandMemCost; + } + + public void calculateAvgTimeseriesOperandMemCost(long current) { + numsOfSampledTimeseriesOperand++; + avgTimeseriesOperandMemCost += + (current - avgTimeseriesOperandMemCost) / numsOfSampledTimeseriesOperand; + } + // endregion + + public boolean needUpdateScanNumForLastQuery() { + return needUpdateScanNumForLastQuery; + } + + public void setNeedUpdateScanNumForLastQuery(boolean needUpdateScanNumForLastQuery) { + this.needUpdateScanNumForLastQuery = needUpdateScanNumForLastQuery; + } + + public boolean isUserQuery() { + return userQuery; + } + + public void setUserQuery(boolean userQuery) { + this.userQuery = userQuery; + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/header/ColumnHeaderConstant.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/header/ColumnHeaderConstant.java index f4fd39bf504d3..0f26c42e39d7e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/header/ColumnHeaderConstant.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/header/ColumnHeaderConstant.java @@ -46,6 +46,7 @@ private ColumnHeaderConstant() { public static final String COMPRESSION = "Compression"; public static final String TAGS = "Tags"; public static final String ATTRIBUTES = "Attributes"; + public static final String NOTES = "Notes"; public static final String DEADBAND = "Deadband"; public static final String DEADBAND_PARAMETERS = "DeadbandParameters"; public static final String IS_ALIGNED = "IsAligned"; @@ -109,6 +110,8 @@ private ColumnHeaderConstant() { public static final String TRIGGER_NAME = "TriggerName"; public static final String EVENT = "Event"; public static final String STATE = "State"; + public static final String MODEL_TYPE = "ModelType"; + public static final String CONFIGS = "Configs"; public static final String PATH_PATTERN = "PathPattern"; public static final String CLASS_NAME = "ClassName"; @@ -124,10 +127,12 @@ private ColumnHeaderConstant() { // column names for show subscriptions statement public static final String CONSUMER_GROUP_NAME = "ConsumerGroupName"; public static final String SUBSCRIBED_CONSUMERS = "SubscribedConsumers"; + public static final String SUBSCRIPTION_ID = "SubscriptionID"; // show cluster status public static final String NODE_TYPE_CONFIG_NODE = "ConfigNode"; public static final String NODE_TYPE_DATA_NODE = "DataNode"; + public static final String NODE_TYPE_AI_NODE = "AINode"; public static final String COLUMN_CLUSTER_NAME = "ClusterName"; public static final String CONFIG_NODE_CONSENSUS_PROTOCOL_CLASS = "ConfigNodeConsensusProtocolClass"; @@ -155,6 +160,7 @@ private ColumnHeaderConstant() { public static final String START_TIME = "StartTime"; public static final String ROLE = "Role"; public static final String CREATE_TIME = "CreateTime"; + public static final String TSFILE_SIZE = "TsFileSize"; // column names for show datanodes public static final String SCHEMA_REGION_NUM = "SchemaRegionNum"; @@ -200,6 +206,9 @@ private ColumnHeaderConstant() { public static final String USER = "User"; public static final String READ_WRITE = "Read/Write"; + // column names for show models/trials + public static final String MODEL_ID = "ModelId"; + // column names for views (e.g. logical view) public static final String VIEW_TYPE = "ViewType"; public static final String SOURCE = "Source"; @@ -327,7 +336,15 @@ private ColumnHeaderConstant() { new ColumnHeader(RPC_PORT, TSDataType.INT32), new ColumnHeader(INTERNAL_ADDRESS, TSDataType.TEXT), new ColumnHeader(ROLE, TSDataType.TEXT), - new ColumnHeader(CREATE_TIME, TSDataType.TEXT)); + new ColumnHeader(CREATE_TIME, TSDataType.TEXT), + new ColumnHeader(TSFILE_SIZE, TSDataType.TEXT)); + + public static final List showAINodesColumnHeaders = + ImmutableList.of( + new ColumnHeader(NODE_ID, TSDataType.INT32), + new ColumnHeader(STATUS, TSDataType.TEXT), + new ColumnHeader(RPC_ADDRESS, TSDataType.TEXT), + new ColumnHeader(RPC_PORT, TSDataType.INT32)); public static final List showDataNodesColumnHeaders = ImmutableList.of( @@ -439,6 +456,7 @@ private ColumnHeaderConstant() { public static final List showSubscriptionColumnHeaders = ImmutableList.of( + new ColumnHeader(SUBSCRIPTION_ID, TSDataType.TEXT), new ColumnHeader(TOPIC_NAME, TSDataType.TEXT), new ColumnHeader(CONSUMER_GROUP_NAME, TSDataType.TEXT), new ColumnHeader(SUBSCRIBED_CONSUMERS, TSDataType.TEXT)); @@ -497,6 +515,14 @@ private ColumnHeaderConstant() { new ColumnHeader(LIMIT, TSDataType.TEXT), new ColumnHeader(READ_WRITE, TSDataType.TEXT)); + public static final List showModelsColumnHeaders = + ImmutableList.of( + new ColumnHeader(MODEL_ID, TSDataType.TEXT), + new ColumnHeader(MODEL_TYPE, TSDataType.TEXT), + new ColumnHeader(STATE, TSDataType.TEXT), + new ColumnHeader(CONFIGS, TSDataType.TEXT), + new ColumnHeader(NOTES, TSDataType.TEXT)); + public static final List showLogicalViewColumnHeaders = ImmutableList.of( new ColumnHeader(TIMESERIES, TSDataType.TEXT), diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/header/DatasetHeaderFactory.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/header/DatasetHeaderFactory.java index 9c0f89b19a570..132dafd246d93 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/header/DatasetHeaderFactory.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/header/DatasetHeaderFactory.java @@ -119,6 +119,10 @@ public static DatasetHeader getShowRegionHeader() { return new DatasetHeader(ColumnHeaderConstant.showRegionColumnHeaders, true); } + public static DatasetHeader getShowAINodesHeader() { + return new DatasetHeader(ColumnHeaderConstant.showAINodesColumnHeaders, true); + } + public static DatasetHeader getShowDataNodesHeader() { return new DatasetHeader(ColumnHeaderConstant.showDataNodesColumnHeaders, true); } @@ -201,6 +205,10 @@ public static DatasetHeader getShowThrottleQuotaHeader() { return new DatasetHeader(ColumnHeaderConstant.showThrottleQuotaColumnHeaders, true); } + public static DatasetHeader getShowModelsHeader() { + return new DatasetHeader(ColumnHeaderConstant.showModelsColumnHeaders, true); + } + public static DatasetHeader getShowLogicalViewHeader() { return new DatasetHeader(ColumnHeaderConstant.showLogicalViewColumnHeaders, true); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/ClusterSchemaTree.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/ClusterSchemaTree.java index dc9776ab54ab6..40a49c368d922 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/ClusterSchemaTree.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/ClusterSchemaTree.java @@ -39,6 +39,7 @@ import org.apache.iotdb.db.schemaengine.template.Template; import org.apache.tsfile.utils.Pair; +import org.apache.tsfile.utils.RamUsageEstimator; import org.apache.tsfile.utils.ReadWriteIOUtils; import org.apache.tsfile.write.schema.IMeasurementSchema; @@ -49,8 +50,10 @@ import java.util.ArrayList; import java.util.Deque; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.NoSuchElementException; import java.util.Set; import static org.apache.iotdb.commons.conf.IoTDBConstant.PATH_ROOT; @@ -60,6 +63,8 @@ import static org.apache.iotdb.db.queryengine.common.schematree.node.SchemaNode.SCHEMA_MEASUREMENT_NODE; public class ClusterSchemaTree implements ISchemaTree { + private static final long SHALLOW_SIZE = + RamUsageEstimator.shallowSizeOfInstance(ClusterSchemaTree.class); private static final ClusterTemplateManager templateManager = ClusterTemplateManager.getInstance(); @@ -75,6 +80,8 @@ public class ClusterSchemaTree implements ISchemaTree { private Map templateMap = new HashMap<>(); + private long ramBytesUsed; + public ClusterSchemaTree() { root = new SchemaInternalNode(PATH_ROOT); } @@ -484,59 +491,158 @@ public void serialize(OutputStream outputStream) throws IOException { root.serialize(outputStream); } - public static ClusterSchemaTree deserialize(InputStream inputStream) throws IOException { + public Iterator getIteratorForSerialize() { + return new SchemaNodePostOrderIterator(root); + } - byte nodeType; - int childNum; - Deque stack = new ArrayDeque<>(); - SchemaNode child; - boolean hasLogicalView = false; - boolean hasNormalTimeSeries = false; - Map templateMap = new HashMap<>(); - - while (inputStream.available() > 0) { - nodeType = ReadWriteIOUtils.readByte(inputStream); - if (nodeType == SCHEMA_MEASUREMENT_NODE) { - SchemaMeasurementNode measurementNode = SchemaMeasurementNode.deserialize(inputStream); - stack.push(measurementNode); - if (measurementNode.isLogicalView()) { - hasLogicalView = true; + @Override + public long ramBytesUsed() { + if (ramBytesUsed > 0) { + return ramBytesUsed; + } + ramBytesUsed = + root.ramBytesUsed() + + SHALLOW_SIZE + + RamUsageEstimator.sizeOfMapWithKnownShallowSize( + templateMap, + RamUsageEstimator.SHALLOW_SIZE_OF_HASHMAP, + RamUsageEstimator.SHALLOW_SIZE_OF_HASHMAP_ENTRY); + return ramBytesUsed; + } + + public void setRamBytesUsed(long ramBytesUsed) { + this.ramBytesUsed = ramBytesUsed; + } + + private static class SchemaNodePostOrderIterator implements Iterator { + // This class is likely to be faster than Stack when used as a stack + private final Deque>> stack = new ArrayDeque<>(); + private SchemaNode nextNode; + + public SchemaNodePostOrderIterator(SchemaNode root) { + stack.push(new Pair<>(root, root.getChildrenIterator())); + prepareNext(); + } + + @Override + public boolean hasNext() { + return nextNode != null; + } + + @Override + public SchemaNode next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + SchemaNode result = nextNode; + prepareNext(); + return result; + } + + private void prepareNext() { + nextNode = null; + while (!stack.isEmpty()) { + Pair> pair = stack.peek(); + SchemaNode currentNode = pair.getLeft(); + Iterator childrenIterator = pair.getRight(); + if (childrenIterator.hasNext()) { + SchemaNode child = childrenIterator.next(); + stack.push(new Pair<>(child, child.getChildrenIterator())); + } else { + stack.pop(); + nextNode = currentNode; + return; } - hasNormalTimeSeries = true; - } else { - SchemaInternalNode internalNode; - if (nodeType == SCHEMA_ENTITY_NODE) { - internalNode = SchemaEntityNode.deserialize(inputStream); - int templateId = internalNode.getAsEntityNode().getTemplateId(); - if (templateId != NON_TEMPLATE) { - templateMap.putIfAbsent(templateId, templateManager.getTemplate(templateId)); + } + } + } + + public static class SchemaNodeBatchDeserializer { + private byte nodeType; + private int childNum; + // This class is likely to be faster than Stack when used as a stack + private final Deque stack = new ArrayDeque<>(); + private SchemaNode child; + private boolean hasLogicalView = false; + private boolean hasNormalTimeSeries = false; + private Map templateMap = new HashMap<>(); + private boolean isFirstBatch = true; + + public boolean isFirstBatch() { + return isFirstBatch; + } + + public void deserializeFromBatch(InputStream inputStream) throws IOException { + isFirstBatch = false; + while (inputStream.available() > 0) { + nodeType = ReadWriteIOUtils.readByte(inputStream); + if (nodeType == SCHEMA_MEASUREMENT_NODE) { + SchemaMeasurementNode measurementNode = SchemaMeasurementNode.deserialize(inputStream); + stack.push(measurementNode); + if (measurementNode.isLogicalView()) { + hasLogicalView = true; } + hasNormalTimeSeries = true; } else { - internalNode = SchemaInternalNode.deserialize(inputStream); - } + SchemaInternalNode internalNode; + if (nodeType == SCHEMA_ENTITY_NODE) { + internalNode = SchemaEntityNode.deserialize(inputStream); + int templateId = internalNode.getAsEntityNode().getTemplateId(); + if (templateId != NON_TEMPLATE) { + templateMap.putIfAbsent(templateId, templateManager.getTemplate(templateId)); + } + } else { + internalNode = SchemaInternalNode.deserialize(inputStream); + } - childNum = ReadWriteIOUtils.readInt(inputStream); - while (childNum > 0) { - child = stack.pop(); - internalNode.addChild(child.getName(), child); - if (child.isMeasurement()) { - SchemaMeasurementNode measurementNode = child.getAsMeasurementNode(); - if (measurementNode.getAlias() != null) { - internalNode - .getAsEntityNode() - .addAliasChild(measurementNode.getAlias(), measurementNode); + childNum = ReadWriteIOUtils.readInt(inputStream); + while (childNum > 0) { + child = stack.pop(); + internalNode.addChild(child.getName(), child); + if (child.isMeasurement()) { + SchemaMeasurementNode measurementNode = child.getAsMeasurementNode(); + if (measurementNode.getAlias() != null) { + internalNode + .getAsEntityNode() + .addAliasChild(measurementNode.getAlias(), measurementNode); + } } + childNum--; } - childNum--; + stack.push(internalNode); } - stack.push(internalNode); } } - ClusterSchemaTree result = new ClusterSchemaTree(stack.poll()); - result.templateMap = templateMap; - result.hasLogicalMeasurementPath = hasLogicalView; - result.hasNormalTimeSeries = hasNormalTimeSeries; - return result; + + public ClusterSchemaTree finish() { + try { + ClusterSchemaTree result = new ClusterSchemaTree(stack.poll()); + result.templateMap = templateMap; + result.hasLogicalMeasurementPath = hasLogicalView; + result.hasNormalTimeSeries = hasNormalTimeSeries; + return result; + } finally { + reset(); + } + } + + private void reset() { + nodeType = 0; + childNum = 0; + stack.clear(); + child = null; + hasLogicalView = false; + hasNormalTimeSeries = false; + // templateMap is set to the returned schema tree, so we should create a new one + templateMap = new HashMap<>(); + isFirstBatch = true; + } + } + + public static ClusterSchemaTree deserialize(InputStream inputStream) throws IOException { + SchemaNodeBatchDeserializer schemaNodeBatchDeserializer = new SchemaNodeBatchDeserializer(); + schemaNodeBatchDeserializer.deserializeFromBatch(inputStream); + return schemaNodeBatchDeserializer.finish(); } /** diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/ISchemaTree.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/ISchemaTree.java index 0288033b6955c..34cb2a09786a8 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/ISchemaTree.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/ISchemaTree.java @@ -24,12 +24,13 @@ import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.db.schemaengine.template.Template; +import org.apache.tsfile.utils.Accountable; import org.apache.tsfile.utils.Pair; import java.util.List; import java.util.Set; -public interface ISchemaTree { +public interface ISchemaTree extends Accountable { /** * Return all measurement paths for given path pattern and filter the result by slimit and offset. * diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/node/SchemaEntityNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/node/SchemaEntityNode.java index b877dc9d7a258..cf111e61b29e3 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/node/SchemaEntityNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/node/SchemaEntityNode.java @@ -19,6 +19,7 @@ package org.apache.iotdb.db.queryengine.common.schematree.node; +import org.apache.tsfile.utils.RamUsageEstimator; import org.apache.tsfile.utils.ReadWriteIOUtils; import java.io.IOException; @@ -31,6 +32,9 @@ public class SchemaEntityNode extends SchemaInternalNode { + private static final long SHALLOW_SIZE = + RamUsageEstimator.shallowSizeOfInstance(SchemaEntityNode.class); + private boolean isAligned; private Map aliasChildren; @@ -117,6 +121,11 @@ public byte getType() { @Override public void serialize(OutputStream outputStream) throws IOException { serializeChildren(outputStream); + this.serializeNodeOwnContent(outputStream); + } + + @Override + public void serializeNodeOwnContent(OutputStream outputStream) throws IOException { ReadWriteIOUtils.write(getType(), outputStream); ReadWriteIOUtils.write(name, outputStream); ReadWriteIOUtils.write(isAligned, outputStream); @@ -133,4 +142,18 @@ public static SchemaEntityNode deserialize(InputStream inputStream) throws IOExc entityNode.setTemplateId(templateId); return entityNode; } + + @Override + public long ramBytesUsed() { + return SHALLOW_SIZE + + RamUsageEstimator.sizeOf(name) + + RamUsageEstimator.sizeOfMapWithKnownShallowSize( + children, + RamUsageEstimator.SHALLOW_SIZE_OF_HASHMAP, + RamUsageEstimator.SHALLOW_SIZE_OF_HASHMAP_ENTRY) + + RamUsageEstimator.sizeOfMapWithKnownShallowSize( + aliasChildren, + RamUsageEstimator.SHALLOW_SIZE_OF_HASHMAP, + RamUsageEstimator.SHALLOW_SIZE_OF_HASHMAP_ENTRY); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/node/SchemaInternalNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/node/SchemaInternalNode.java index 5c6de241baf2b..84ccf5b35fa2a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/node/SchemaInternalNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/node/SchemaInternalNode.java @@ -19,6 +19,7 @@ package org.apache.iotdb.db.queryengine.common.schematree.node; +import org.apache.tsfile.utils.RamUsageEstimator; import org.apache.tsfile.utils.ReadWriteIOUtils; import java.io.IOException; @@ -30,6 +31,9 @@ public class SchemaInternalNode extends SchemaNode { + private static final long SHALLOW_SIZE = + RamUsageEstimator.shallowSizeOfInstance(SchemaInternalNode.class); + protected Map children = new HashMap<>(); public SchemaInternalNode(String name) { @@ -85,7 +89,10 @@ public byte getType() { public void serialize(OutputStream outputStream) throws IOException { serializeChildren(outputStream); + serializeNodeOwnContent(outputStream); + } + public void serializeNodeOwnContent(OutputStream outputStream) throws IOException { ReadWriteIOUtils.write(getType(), outputStream); ReadWriteIOUtils.write(name, outputStream); ReadWriteIOUtils.write(children.size(), outputStream); @@ -102,4 +109,14 @@ public static SchemaInternalNode deserialize(InputStream inputStream) throws IOE return new SchemaInternalNode(name); } + + @Override + public long ramBytesUsed() { + return SHALLOW_SIZE + + RamUsageEstimator.sizeOf(name) + + RamUsageEstimator.sizeOfMapWithKnownShallowSize( + children, + RamUsageEstimator.SHALLOW_SIZE_OF_HASHMAP, + RamUsageEstimator.SHALLOW_SIZE_OF_HASHMAP_ENTRY); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/node/SchemaMeasurementNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/node/SchemaMeasurementNode.java index b1eaa7ff5c100..6f5bd07f08f05 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/node/SchemaMeasurementNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/node/SchemaMeasurementNode.java @@ -22,6 +22,7 @@ import org.apache.iotdb.commons.schema.view.LogicalViewSchema; import org.apache.iotdb.db.queryengine.common.schematree.IMeasurementSchemaInfo; +import org.apache.tsfile.utils.RamUsageEstimator; import org.apache.tsfile.utils.ReadWriteIOUtils; import org.apache.tsfile.write.schema.IMeasurementSchema; import org.apache.tsfile.write.schema.MeasurementSchema; @@ -34,6 +35,9 @@ public class SchemaMeasurementNode extends SchemaNode implements IMeasurementSchemaInfo { + private static final long SHALLOW_SIZE = + RamUsageEstimator.shallowSizeOfInstance(SchemaMeasurementNode.class); + private String alias; private IMeasurementSchema schema; private Map tagMap; @@ -44,6 +48,22 @@ public SchemaMeasurementNode(String name, IMeasurementSchema schema) { this.schema = schema; } + @Override + public long ramBytesUsed() { + return SHALLOW_SIZE + + RamUsageEstimator.sizeOf(name) + + RamUsageEstimator.sizeOf(alias) + + schema.ramBytesUsed() + + RamUsageEstimator.sizeOfMapWithKnownShallowSize( + tagMap, + RamUsageEstimator.SHALLOW_SIZE_OF_HASHMAP, + RamUsageEstimator.SHALLOW_SIZE_OF_HASHMAP_ENTRY) + + RamUsageEstimator.sizeOfMapWithKnownShallowSize( + attributeMap, + RamUsageEstimator.SHALLOW_SIZE_OF_HASHMAP, + RamUsageEstimator.SHALLOW_SIZE_OF_HASHMAP_ENTRY); + } + public String getAlias() { return alias; } @@ -143,6 +163,10 @@ public byte getType() { @Override public void serialize(OutputStream outputStream) throws IOException { + serializeNodeOwnContent(outputStream); + } + + public void serializeNodeOwnContent(OutputStream outputStream) throws IOException { ReadWriteIOUtils.write(getType(), outputStream); ReadWriteIOUtils.write(name, outputStream); ReadWriteIOUtils.write(alias, outputStream); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/node/SchemaNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/node/SchemaNode.java index e2625cd97acf4..dd4e5e57a2eea 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/node/SchemaNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/common/schematree/node/SchemaNode.java @@ -21,13 +21,15 @@ import org.apache.iotdb.commons.schema.tree.ITreeNode; +import org.apache.tsfile.utils.Accountable; + import java.io.IOException; import java.io.OutputStream; import java.util.Collections; import java.util.Iterator; import java.util.Map; -public abstract class SchemaNode implements ITreeNode { +public abstract class SchemaNode implements ITreeNode, Accountable { public static final byte SCHEMA_INTERNAL_NODE = 0; public static final byte SCHEMA_ENTITY_NODE = 1; @@ -80,4 +82,6 @@ public SchemaMeasurementNode getAsMeasurementNode() { public abstract byte getType(); public abstract void serialize(OutputStream outputStream) throws IOException; + + public abstract void serializeNodeOwnContent(OutputStream outputStream) throws IOException; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/MemoryEstimationHelper.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/MemoryEstimationHelper.java index a18e2dbc58bd0..ba6660f3d6af1 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/MemoryEstimationHelper.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/MemoryEstimationHelper.java @@ -23,12 +23,15 @@ import org.apache.iotdb.commons.path.MeasurementPath; import org.apache.iotdb.commons.path.PartialPath; +import org.apache.tsfile.read.common.TimeRange; import org.apache.tsfile.utils.Accountable; import org.apache.tsfile.utils.RamUsageEstimator; import javax.annotation.Nullable; +import java.util.ArrayList; import java.util.Arrays; +import java.util.List; public class MemoryEstimationHelper { @@ -41,6 +44,13 @@ public class MemoryEstimationHelper { private static final long MEASUREMENT_PATH_INSTANCE_SIZE = RamUsageEstimator.shallowSizeOfInstance(AlignedPath.class); + private static final long ARRAY_LIST_INSTANCE_SIZE = + RamUsageEstimator.shallowSizeOfInstance(ArrayList.class); + private static final long INTEGER_INSTANCE_SIZE = + RamUsageEstimator.shallowSizeOfInstance(Integer.class); + public static final long TIME_RANGE_INSTANCE_SIZE = + RamUsageEstimator.shallowSizeOfInstance(TimeRange.class); + private MemoryEstimationHelper() { // hide the constructor } @@ -86,4 +96,38 @@ public static long getEstimatedSizeOfPartialPath(@Nullable final PartialPath par } return totalSize; } + + public static long getEstimatedSizeOfMeasurementPathNodes( + @Nullable final PartialPath partialPath) { + if (partialPath == null) { + return 0; + } + long totalSize = MEASUREMENT_PATH_INSTANCE_SIZE; + String[] nodes = partialPath.getNodes(); + if (nodes != null && nodes.length > 0) { + totalSize += Arrays.stream(nodes).mapToLong(RamUsageEstimator::sizeOf).sum(); + } + return totalSize; + } + + // This method should only be called if the content in the current PartialPath comes from other + // structures whose memory cost have already been calculated. + public static long getEstimatedSizeOfCopiedPartialPath(@Nullable final PartialPath partialPath) { + if (partialPath == null) { + return 0; + } + return PARTIAL_PATH_INSTANCE_SIZE + RamUsageEstimator.shallowSizeOf(partialPath.getNodes()); + } + + public static long getEstimatedSizeOfIntegerArrayList(List integerArrayList) { + if (integerArrayList == null) { + return 0L; + } + long size = ARRAY_LIST_INSTANCE_SIZE; + size += + (long) RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + + (long) integerArrayList.size() * (long) RamUsageEstimator.NUM_BYTES_OBJECT_REF; + size += INTEGER_INSTANCE_SIZE * integerArrayList.size(); + return RamUsageEstimator.alignObjectSize(size); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/QueryStateMachine.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/QueryStateMachine.java index ab9201787a6a7..cc0f787b014a8 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/QueryStateMachine.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/QueryStateMachine.java @@ -21,13 +21,15 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.exception.IoTDBException; +import org.apache.iotdb.commons.exception.IoTDBRuntimeException; import org.apache.iotdb.db.queryengine.common.QueryId; import org.apache.iotdb.db.queryengine.plan.execution.QueryExecution; +import org.apache.iotdb.rpc.RpcUtils; import com.google.common.util.concurrent.ListenableFuture; -import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicReference; import static com.google.common.base.Preconditions.checkArgument; import static java.util.Objects.requireNonNull; @@ -40,6 +42,7 @@ import static org.apache.iotdb.db.queryengine.execution.QueryState.PLANNED; import static org.apache.iotdb.db.queryengine.execution.QueryState.QUEUED; import static org.apache.iotdb.db.queryengine.execution.QueryState.RUNNING; +import static org.apache.iotdb.db.utils.ErrorHandlingUtils.getRootCause; /** * State machine for a {@link QueryExecution}. It stores the states for the {@link QueryExecution}. @@ -48,19 +51,13 @@ public class QueryStateMachine { private final StateMachine queryState; - // The executor will be used in all the state machines belonged to this query. - private Executor stateMachineExecutor; - private Throwable failureException; - private TSStatus failureStatus; + private final AtomicReference failureException = new AtomicReference<>(); + private final AtomicReference failureStatus = new AtomicReference<>(); public QueryStateMachine(QueryId queryId, ExecutorService executor) { - this.stateMachineExecutor = executor; this.queryState = new StateMachine<>( - queryId.toString(), - this.stateMachineExecutor, - QUEUED, - QueryState.TERMINAL_INSTANCE_STATES); + queryId.toString(), executor, QUEUED, QueryState.TERMINAL_INSTANCE_STATES); } public void addStateChangeListener( @@ -78,6 +75,8 @@ public QueryState getState() { public void transitionToQueued() { queryState.set(QUEUED); + failureException.set(null); + failureStatus.set(null); } public void transitionToPlanned() { @@ -89,7 +88,7 @@ public void transitionToDispatching() { } public void transitionToPendingRetry(TSStatus failureStatus) { - this.failureStatus = failureStatus; + this.failureStatus.compareAndSet(null, failureStatus); queryState.setIf(PENDING_RETRY, currentState -> currentState == DISPATCHING); } @@ -109,8 +108,8 @@ public void transitionToCanceled() { } public void transitionToCanceled(Throwable throwable, TSStatus failureStatus) { - this.failureException = throwable; - this.failureStatus = failureStatus; + this.failureStatus.compareAndSet(null, failureStatus); + this.failureException.compareAndSet(null, throwable); transitionToDoneState(CANCELED); } @@ -123,38 +122,54 @@ public void transitionToFailed() { } public void transitionToFailed(Throwable throwable) { - this.failureException = throwable; + this.failureException.compareAndSet(null, throwable); transitionToDoneState(FAILED); } public void transitionToFailed(TSStatus failureStatus) { - this.failureStatus = failureStatus; + this.failureStatus.compareAndSet(null, failureStatus); transitionToDoneState(FAILED); } - private void transitionToDoneState(QueryState doneState) { + private boolean transitionToDoneState(QueryState doneState) { requireNonNull(doneState, "doneState is null"); checkArgument(doneState.isDone(), "doneState %s is not a done state", doneState); - queryState.setIf(doneState, currentState -> !currentState.isDone()); + return queryState.setIf(doneState, currentState -> !currentState.isDone()); } public String getFailureMessage() { - if (failureException != null) { - return failureException.getMessage(); + Throwable throwable = failureException.get(); + if (throwable != null) { + return throwable.getMessage(); } return "no detailed failure reason in QueryStateMachine"; } public Throwable getFailureException() { - if (failureException == null) { + Throwable throwable = failureException.get(); + if (throwable == null) { return new IoTDBException(getFailureStatus().getMessage(), getFailureStatus().code); } else { - return failureException; + return throwable; } } public TSStatus getFailureStatus() { - return failureStatus; + TSStatus status = failureStatus.get(); + if (status != null) { + return status; + } else { + Throwable throwable = failureException.get(); + if (throwable != null) { + Throwable t = getRootCause(throwable); + if (t instanceof IoTDBRuntimeException) { + return RpcUtils.getStatus(((IoTDBRuntimeException) t).getErrorCode(), t.getMessage()); + } else if (t instanceof IoTDBException) { + return RpcUtils.getStatus(((IoTDBException) t).getErrorCode(), t.getMessage()); + } + } + return failureStatus.get(); + } } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/AccumulatorFactory.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/AccumulatorFactory.java index a31dc6e582d2a..24a998f54a917 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/AccumulatorFactory.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/AccumulatorFactory.java @@ -159,6 +159,10 @@ private static Accumulator createModeAccumulator(TSDataType tsDataType) { return new FloatModeAccumulator(); case DOUBLE: return new DoubleModeAccumulator(); + case BLOB: + case STRING: + case TIMESTAMP: + case DATE: default: throw new IllegalArgumentException("Unknown data type: " + tsDataType); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/AvgAccumulator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/AvgAccumulator.java index 96d963ad2b491..c6d1baa33830a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/AvgAccumulator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/AvgAccumulator.java @@ -56,7 +56,11 @@ public void addInput(Column[] columns, BitMap bitMap) { addDoubleInput(columns, bitMap); return; case TEXT: + case BLOB: + case STRING: case BOOLEAN: + case DATE: + case TIMESTAMP: default: throw new UnSupportedDataTypeException( String.format("Unsupported data type in aggregation AVG : %s", seriesDataType)); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/ExtremeAccumulator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/ExtremeAccumulator.java index 776e391120301..76a42b41c7180 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/ExtremeAccumulator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/ExtremeAccumulator.java @@ -56,7 +56,11 @@ public void addInput(Column[] columns, BitMap bitMap) { addDoubleInput(columns, bitMap); return; case TEXT: + case STRING: + case BLOB: case BOOLEAN: + case DATE: + case TIMESTAMP: default: throw new UnSupportedDataTypeException( String.format("Unsupported data type in Extreme: %s", seriesDataType)); @@ -84,7 +88,11 @@ public void addIntermediate(Column[] partialResult) { updateDoubleResult(partialResult[0].getDouble(0)); break; case TEXT: + case STRING: + case BLOB: case BOOLEAN: + case DATE: + case TIMESTAMP: default: throw new UnSupportedDataTypeException( String.format("Unsupported data type in Extreme: %s", seriesDataType)); @@ -114,7 +122,11 @@ public void addStatistics(Statistics statistics) { updateDoubleResult((double) statistics.getMinValue()); break; case TEXT: + case STRING: + case BLOB: case BOOLEAN: + case DATE: + case TIMESTAMP: default: throw new UnSupportedDataTypeException( String.format("Unsupported data type in Extreme: %s", seriesDataType)); @@ -141,7 +153,11 @@ public void setFinal(Column finalResult) { extremeResult.setDouble(finalResult.getDouble(0)); break; case TEXT: + case STRING: + case BLOB: case BOOLEAN: + case DATE: + case TIMESTAMP: default: throw new UnSupportedDataTypeException( String.format("Unsupported data type in Extreme: %s", seriesDataType)); @@ -170,7 +186,11 @@ public void outputIntermediate(ColumnBuilder[] columnBuilders) { columnBuilders[0].writeDouble(extremeResult.getDouble()); break; case TEXT: + case STRING: + case BLOB: case BOOLEAN: + case DATE: + case TIMESTAMP: default: throw new UnSupportedDataTypeException( String.format("Unsupported data type in Extreme: %s", seriesDataType)); @@ -197,7 +217,11 @@ public void outputFinal(ColumnBuilder columnBuilder) { columnBuilder.writeDouble(extremeResult.getDouble()); break; case TEXT: + case STRING: + case BLOB: case BOOLEAN: + case DATE: + case TIMESTAMP: default: throw new UnSupportedDataTypeException( String.format("Unsupported data type in Extreme: %s", seriesDataType)); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/MaxMinByBaseAccumulator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/MaxMinByBaseAccumulator.java index c1e9b006c053f..a0e4f80120cb0 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/MaxMinByBaseAccumulator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/MaxMinByBaseAccumulator.java @@ -86,6 +86,7 @@ public void addInput(Column[] column, BitMap bitMap) { addBinaryInput(column, bitMap); return; case TEXT: + case BLOB: case BOOLEAN: default: throw new UnSupportedDataTypeException(String.format(UNSUPPORTED_TYPE_MESSAGE, yDataType)); @@ -438,6 +439,7 @@ private void updateFromBytesIntermediateInput(byte[] bytes) { updateBinaryResult(time, binaryMaxVal, columnBuilder.build(), 0); break; case TEXT: + case BLOB: case BOOLEAN: default: throw new UnSupportedDataTypeException(String.format(UNSUPPORTED_TYPE_MESSAGE, yDataType)); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/MaxValueAccumulator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/MaxValueAccumulator.java index 75e7a2f74e716..0d58de8064f26 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/MaxValueAccumulator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/MaxValueAccumulator.java @@ -63,6 +63,7 @@ public void addInput(Column[] columns, BitMap bitMap) { addBinaryInput(columns, bitMap); return; case TEXT: + case BLOB: case BOOLEAN: default: throw new UnSupportedDataTypeException( @@ -96,6 +97,7 @@ public void addIntermediate(Column[] partialResult) { updateBinaryResult(partialResult[0].getBinary(0)); break; case TEXT: + case BLOB: case BOOLEAN: default: throw new UnSupportedDataTypeException( @@ -127,6 +129,7 @@ public void addStatistics(Statistics statistics) { updateBinaryResult((Binary) statistics.getMaxValue()); break; case TEXT: + case BLOB: case BOOLEAN: default: throw new UnSupportedDataTypeException( @@ -160,6 +163,7 @@ public void setFinal(Column finalResult) { maxResult.setBinary(finalResult.getBinary(0)); break; case TEXT: + case BLOB: case BOOLEAN: default: throw new UnSupportedDataTypeException( @@ -194,6 +198,7 @@ public void outputIntermediate(ColumnBuilder[] columnBuilders) { columnBuilders[0].writeBinary(maxResult.getBinary()); break; case TEXT: + case BLOB: case BOOLEAN: default: throw new UnSupportedDataTypeException( @@ -226,6 +231,7 @@ public void outputFinal(ColumnBuilder columnBuilder) { columnBuilder.writeBinary(maxResult.getBinary()); break; case TEXT: + case BLOB: case BOOLEAN: default: throw new UnSupportedDataTypeException( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/MinValueAccumulator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/MinValueAccumulator.java index c8e403bf7ec1d..1d9cc59aa17de 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/MinValueAccumulator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/MinValueAccumulator.java @@ -63,6 +63,7 @@ public void addInput(Column[] columns, BitMap bitMap) { addBinaryInput(columns, bitMap); return; case TEXT: + case BLOB: case BOOLEAN: default: throw new UnSupportedDataTypeException( @@ -96,6 +97,7 @@ public void addIntermediate(Column[] partialResult) { updateBinaryResult(partialResult[0].getBinary(0)); break; case TEXT: + case BLOB: case BOOLEAN: default: throw new UnSupportedDataTypeException( @@ -127,6 +129,7 @@ public void addStatistics(Statistics statistics) { updateBinaryResult((Binary) statistics.getMinValue()); break; case TEXT: + case BLOB: case BOOLEAN: default: throw new UnSupportedDataTypeException( @@ -160,6 +163,7 @@ public void setFinal(Column finalResult) { minResult.setBinary(finalResult.getBinary(0)); break; case TEXT: + case BLOB: case BOOLEAN: default: throw new UnSupportedDataTypeException( @@ -194,6 +198,7 @@ public void outputIntermediate(ColumnBuilder[] columnBuilders) { columnBuilders[0].writeBinary(minResult.getBinary()); break; case TEXT: + case BLOB: case BOOLEAN: default: throw new UnSupportedDataTypeException( @@ -226,6 +231,7 @@ public void outputFinal(ColumnBuilder columnBuilder) { columnBuilder.writeBinary(minResult.getBinary()); break; case TEXT: + case BLOB: case BOOLEAN: default: throw new UnSupportedDataTypeException( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/SumAccumulator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/SumAccumulator.java index c948ed18e3b48..37daf1a84b1e0 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/SumAccumulator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/SumAccumulator.java @@ -56,7 +56,11 @@ public void addInput(Column[] columns, BitMap bitMap) { addDoubleInput(columns, bitMap); return; case TEXT: + case BLOB: case BOOLEAN: + case TIMESTAMP: + case DATE: + case STRING: default: throw new UnSupportedDataTypeException( String.format("Unsupported data type in aggregation AVG : %s", seriesDataType)); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/VarianceAccumulator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/VarianceAccumulator.java index 7f7dd1bfd6ded..3242518c3dc2d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/VarianceAccumulator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/VarianceAccumulator.java @@ -69,7 +69,11 @@ public void addInput(Column[] columns, BitMap bitMap) { addDoubleInput(columns, bitMap); return; case TEXT: + case BLOB: case BOOLEAN: + case DATE: + case STRING: + case TIMESTAMP: default: throw new UnSupportedDataTypeException( String.format("Unsupported data type in aggregation variance : %s", seriesDataType)); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/timerangeiterator/AggrWindowIterator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/timerangeiterator/AggrWindowIterator.java index b2dfc4909eb95..e9847f814cff4 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/timerangeiterator/AggrWindowIterator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/timerangeiterator/AggrWindowIterator.java @@ -25,6 +25,8 @@ import org.apache.tsfile.read.common.TimeRange; import org.apache.tsfile.utils.TimeDuration; +import java.time.ZoneId; + /** * This class iteratively generates aggregated time windows. * @@ -46,6 +48,8 @@ public class AggrWindowIterator implements ITimeRangeIterator { // The number of current timeRange, it's used to calculate the cpu when there contains month private int timeRangeCount; + private final ZoneId zoneId; + @SuppressWarnings("squid:S107") public AggrWindowIterator( long startTime, @@ -53,7 +57,8 @@ public AggrWindowIterator( TimeDuration interval, TimeDuration slidingStep, boolean isAscending, - boolean leftCRightO) { + boolean leftCRightO, + ZoneId zoneId) { this.startTime = startTime; this.endTime = endTime; this.interval = interval; @@ -61,6 +66,7 @@ public AggrWindowIterator( this.isAscending = isAscending; this.leftCRightO = leftCRightO; this.timeRangeCount = 0; + this.zoneId = zoneId; } @Override @@ -78,7 +84,7 @@ private TimeRange getLeftmostTimeRange() { // calculate interval length by natural month based on startTime // ie. startTIme = 1/31, interval = 1mo, curEndTime will be set to 2/29 retEndTime = - Math.min(DateTimeUtils.calcPositiveIntervalByMonth(startTime, interval), endTime); + Math.min(DateTimeUtils.calcPositiveIntervalByMonth(startTime, interval, zoneId), endTime); } else { retEndTime = Math.min(startTime + interval.nonMonthDuration, endTime); } @@ -99,14 +105,14 @@ private TimeRange getRightmostTimeRange() { / (slidingStep.getMaxTotalDuration(TimestampPrecisionUtils.currPrecision))); long tempRetStartTime = DateTimeUtils.calcPositiveIntervalByMonth( - startTime, slidingStep.multiple(intervalNum - 1)); + startTime, slidingStep.multiple(intervalNum - 1), zoneId); retStartTime = tempRetStartTime; while (tempRetStartTime < endTime) { intervalNum++; retStartTime = tempRetStartTime; tempRetStartTime = DateTimeUtils.calcPositiveIntervalByMonth( - retStartTime, slidingStep.multiple(intervalNum - 1)); + retStartTime, slidingStep.multiple(intervalNum - 1), zoneId); } intervalNum -= 1; } else { @@ -120,7 +126,7 @@ private TimeRange getRightmostTimeRange() { retEndTime = Math.min( DateTimeUtils.calcPositiveIntervalByMonth( - startTime, interval.merge(slidingStep.multiple(intervalNum - 1))), + startTime, interval.merge(slidingStep.multiple(intervalNum - 1)), zoneId), endTime); } else { retEndTime = Math.min(retStartTime + interval.nonMonthDuration, endTime); @@ -147,7 +153,7 @@ public boolean hasNextTimeRange() { if (slidingStep.containsMonth()) { retStartTime = DateTimeUtils.calcPositiveIntervalByMonth( - startTime, slidingStep.multiple(timeRangeCount)); + startTime, slidingStep.multiple(timeRangeCount), zoneId); } else { retStartTime = curStartTime + slidingStep.nonMonthDuration; } @@ -171,7 +177,7 @@ public boolean hasNextTimeRange() { if (interval.containsMonth()) { retEndTime = DateTimeUtils.calcPositiveIntervalByMonth( - startTime, slidingStep.multiple(timeRangeCount).merge(interval)); + startTime, slidingStep.multiple(timeRangeCount).merge(interval), zoneId); } else { retEndTime = retStartTime + interval.nonMonthDuration; } @@ -213,11 +219,13 @@ public long getTotalIntervalNum() { (double) queryRange / (slidingStep.getMaxTotalDuration(TimestampPrecisionUtils.currPrecision))); long retStartTime = - DateTimeUtils.calcPositiveIntervalByMonth(startTime, slidingStep.multiple(intervalNum)); + DateTimeUtils.calcPositiveIntervalByMonth( + startTime, slidingStep.multiple(intervalNum), zoneId); while (retStartTime < endTime) { intervalNum++; retStartTime = - DateTimeUtils.calcPositiveIntervalByMonth(startTime, slidingStep.multiple(intervalNum)); + DateTimeUtils.calcPositiveIntervalByMonth( + startTime, slidingStep.multiple(intervalNum), zoneId); } } else { intervalNum = (long) Math.ceil(queryRange / (double) slidingStep.nonMonthDuration); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/timerangeiterator/PreAggrWindowWithNaturalMonthIterator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/timerangeiterator/PreAggrWindowWithNaturalMonthIterator.java index a99583f62afac..a4a0aa6261143 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/timerangeiterator/PreAggrWindowWithNaturalMonthIterator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/timerangeiterator/PreAggrWindowWithNaturalMonthIterator.java @@ -24,6 +24,8 @@ import org.apache.tsfile.read.common.TimeRange; import org.apache.tsfile.utils.TimeDuration; +import java.time.ZoneId; + public class PreAggrWindowWithNaturalMonthIterator implements ITimeRangeIterator { private static final int HEAP_MAX_SIZE = 100; @@ -46,11 +48,13 @@ public PreAggrWindowWithNaturalMonthIterator( TimeDuration interval, TimeDuration slidingStep, boolean isAscending, - boolean leftCRightO) { + boolean leftCRightO, + ZoneId zoneId) { this.isAscending = isAscending; this.timeBoundaryHeap = new TimeSelector(HEAP_MAX_SIZE, isAscending); this.aggrWindowIterator = - new AggrWindowIterator(startTime, endTime, interval, slidingStep, isAscending, leftCRightO); + new AggrWindowIterator( + startTime, endTime, interval, slidingStep, isAscending, leftCRightO, zoneId); this.leftCRightO = leftCRightO; initHeap(); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/timerangeiterator/TimeRangeIteratorFactory.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/timerangeiterator/TimeRangeIteratorFactory.java index 8bf06fd266847..7331971b4a6e4 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/timerangeiterator/TimeRangeIteratorFactory.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/aggregation/timerangeiterator/TimeRangeIteratorFactory.java @@ -23,6 +23,8 @@ import org.apache.tsfile.utils.TimeDuration; +import java.time.ZoneId; + public class TimeRangeIteratorFactory { private TimeRangeIteratorFactory() {} @@ -40,7 +42,8 @@ public static ITimeRangeIterator getTimeRangeIterator( TimeDuration slidingStep, boolean isAscending, boolean leftCRightO, - boolean outputPartialTimeWindow) { + boolean outputPartialTimeWindow, + ZoneId zoneId) { if (outputPartialTimeWindow && interval.getTotalDuration(TimestampPrecisionUtils.currPrecision) > slidingStep.getTotalDuration(TimestampPrecisionUtils.currPrecision)) { @@ -54,11 +57,11 @@ public static ITimeRangeIterator getTimeRangeIterator( leftCRightO); } else { return new PreAggrWindowWithNaturalMonthIterator( - startTime, endTime, interval, slidingStep, isAscending, leftCRightO); + startTime, endTime, interval, slidingStep, isAscending, leftCRightO, zoneId); } } else { return new AggrWindowIterator( - startTime, endTime, interval, slidingStep, isAscending, leftCRightO); + startTime, endTime, interval, slidingStep, isAscending, leftCRightO, zoneId); } } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/driver/Driver.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/driver/Driver.java index 1211df6b74b0a..ac353e3221311 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/driver/Driver.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/driver/Driver.java @@ -51,6 +51,7 @@ import static com.google.common.base.Preconditions.checkNotNull; import static com.google.common.base.Preconditions.checkState; import static com.google.common.base.Throwables.throwIfUnchecked; +import static com.google.common.base.Verify.verify; import static com.google.common.util.concurrent.MoreExecutors.directExecutor; import static java.lang.Boolean.TRUE; import static org.apache.iotdb.db.queryengine.execution.operator.Operator.NOT_BLOCKED; @@ -321,15 +322,16 @@ private Optional tryWithLock( return Optional.empty(); } - Optional result; + T result = null; + Throwable failure = null; try { - result = Optional.of(task.get()); + result = task.get(); + // opportunistic check to avoid unnecessary lock reacquisition + destroyIfNecessary(); + } catch (Throwable t) { + failure = t; } finally { - try { - destroyIfNecessary(); - } finally { - exclusiveLock.unlock(); - } + exclusiveLock.unlock(); } // We need to recheck whether the state is NEED_DESTRUCTION, if so, destroy the driver. @@ -342,12 +344,25 @@ private Optional tryWithLock( if (state.get() == State.NEED_DESTRUCTION && exclusiveLock.tryLock(interruptOnClose)) { try { destroyIfNecessary(); + } catch (Throwable t) { + if (failure == null) { + failure = t; + } else if (failure != t) { + failure.addSuppressed(t); + } } finally { exclusiveLock.unlock(); } } - return result; + if (failure != null) { + throwIfUnchecked(failure); + // should never happen + throw new AssertionError(failure); + } + + verify(result != null, "result is null"); + return Optional.of(result); } @SuppressWarnings({"squid:S1181", "squid:S112"}) @@ -445,7 +460,7 @@ private void cleanTmpFile() { if (!tmpPipeLineDir.exists()) { return; } - FileUtils.deleteFileOrDirectory(tmpPipeLineDir); + FileUtils.deleteFileOrDirectory(tmpPipeLineDir, true); } private static Throwable addSuppressedException( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/driver/DriverContext.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/driver/DriverContext.java index 79cb4acf3b932..b16de9d633bc3 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/driver/DriverContext.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/driver/DriverContext.java @@ -50,6 +50,12 @@ public DriverContext() { this.fragmentInstanceContext = null; } + @TestOnly + // should only be used by executeGroupByQueryInternal + public DriverContext(FragmentInstanceContext fragmentInstanceContext) { + this.fragmentInstanceContext = fragmentInstanceContext; + } + public DriverContext(FragmentInstanceContext fragmentInstanceContext, int pipelineId) { this.fragmentInstanceContext = fragmentInstanceContext; this.driverTaskID = new DriverTaskId(fragmentInstanceContext.getId(), pipelineId); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/SharedTsBlockQueue.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/SharedTsBlockQueue.java index 6c556c1ae553b..ec854da306c48 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/SharedTsBlockQueue.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/SharedTsBlockQueue.java @@ -37,7 +37,9 @@ import javax.annotation.concurrent.NotThreadSafe; import java.util.LinkedList; +import java.util.Optional; import java.util.Queue; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import static com.google.common.util.concurrent.Futures.immediateVoidFuture; @@ -72,7 +74,7 @@ public class SharedTsBlockQueue { private ListenableFuture blockedOnMemory; - private boolean closed = false; + private volatile boolean closed = false; private boolean alreadyRegistered = false; private LocalSourceHandle sourceHandle; @@ -81,6 +83,8 @@ public class SharedTsBlockQueue { private long maxBytesCanReserve = IoTDBDescriptor.getInstance().getConfig().getMaxBytesPerFragmentInstance(); + private volatile Throwable abortedCause = null; + // used for SharedTsBlockQueue listener private final ExecutorService executorService; @@ -177,6 +181,18 @@ public void setNoMoreTsBlocks(boolean noMoreTsBlocks) { */ public TsBlock remove() { if (closed) { + // try throw underlying exception instead of "Source handle is aborted." + if (abortedCause != null) { + throw new IllegalStateException(abortedCause); + } + try { + blocked.get(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IllegalStateException(e); + } catch (ExecutionException e) { + throw new IllegalStateException(e.getCause() == null ? e : e.getCause()); + } throw new IllegalStateException("queue has been destroyed"); } TsBlock tsBlock = queue.remove(); @@ -186,8 +202,8 @@ public TsBlock remove() { localFragmentInstanceId.getQueryId(), fullFragmentInstanceId, localPlanNodeId, - tsBlock.getRetainedSizeInBytes()); - bufferRetainedSizeInBytes -= tsBlock.getRetainedSizeInBytes(); + tsBlock.getSizeInBytes()); + bufferRetainedSizeInBytes -= tsBlock.getSizeInBytes(); // Every time LocalSourceHandle consumes a TsBlock, it needs to send the event to // corresponding LocalSinkChannel. if (sinkChannel != null) { @@ -226,10 +242,10 @@ public ListenableFuture add(TsBlock tsBlock) { localFragmentInstanceId.getQueryId(), fullFragmentInstanceId, localPlanNodeId, - tsBlock.getRetainedSizeInBytes(), + tsBlock.getSizeInBytes(), maxBytesCanReserve); blockedOnMemory = pair.left; - bufferRetainedSizeInBytes += tsBlock.getRetainedSizeInBytes(); + bufferRetainedSizeInBytes += tsBlock.getSizeInBytes(); // reserve memory failed, we should wait until there is enough memory if (!Boolean.TRUE.equals(pair.right)) { @@ -332,6 +348,7 @@ public void abort(Throwable t) { if (closed) { return; } + abortedCause = t; closed = true; if (!blocked.isDone()) { blocked.setException(t); @@ -354,4 +371,8 @@ public void abort(Throwable t) { bufferRetainedSizeInBytes = 0; } } + + public Optional getAbortedCause() { + return Optional.ofNullable(abortedCause); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/sink/ISink.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/sink/ISink.java index 37ec1ba6fef0a..5ee71abb1cfde 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/sink/ISink.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/sink/ISink.java @@ -71,7 +71,7 @@ public interface ISink extends Accountable { * *

Should only be called in abnormal case */ - void abort(); + boolean abort(); /** * Close the ISink. If this is an ISinkHandle, we should close all its channels. If this is an @@ -80,7 +80,7 @@ public interface ISink extends Accountable { * *

Should only be called in normal case. */ - void close(); + boolean close(); /** Return true if this ISink has been closed. Used in {@link Driver#isFinishedInternal()}. */ boolean isClosed(); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/sink/LocalSinkChannel.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/sink/LocalSinkChannel.java index 06ca3cf7965c6..91858a26d1b9a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/sink/LocalSinkChannel.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/sink/LocalSinkChannel.java @@ -49,8 +49,8 @@ public class LocalSinkChannel implements ISinkChannel { @SuppressWarnings("squid:S3077") private volatile ListenableFuture blocked; - private boolean aborted = false; - private boolean closed = false; + private volatile boolean aborted = false; + private volatile boolean closed = false; private boolean invokedOnFinished = false; @@ -181,14 +181,14 @@ public void setNoMoreTsBlocks() { } @Override - public void abort() { + public boolean abort() { if (LOGGER.isDebugEnabled()) { LOGGER.debug("[StartAbortLocalSinkChannel]"); } synchronized (queue) { synchronized (this) { if (aborted || closed) { - return; + return false; } aborted = true; Optional t = sinkListener.onAborted(this); @@ -202,17 +202,18 @@ public void abort() { if (LOGGER.isDebugEnabled()) { LOGGER.debug("[EndAbortLocalSinkChannel]"); } + return true; } @Override - public void close() { + public boolean close() { if (LOGGER.isDebugEnabled()) { LOGGER.debug("[StartCloseLocalSinkChannel]"); } synchronized (queue) { synchronized (this) { if (aborted || closed) { - return; + return false; } closed = true; queue.close(); @@ -225,6 +226,7 @@ public void close() { if (LOGGER.isDebugEnabled()) { LOGGER.debug("[EndCloseLocalSinkChannel]"); } + return true; } public SharedTsBlockQueue getSharedTsBlockQueue() { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/sink/ShuffleSinkHandle.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/sink/ShuffleSinkHandle.java index 90b135dcc60ce..1b7f175d123a7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/sink/ShuffleSinkHandle.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/sink/ShuffleSinkHandle.java @@ -190,18 +190,19 @@ public synchronized boolean isFinished() { } @Override - public void abort() { + public boolean abort() { if (aborted || closed) { - return; + return false; } if (LOGGER.isDebugEnabled()) { LOGGER.debug("[StartAbortShuffleSinkHandle]"); } boolean meetError = false; Exception firstException = null; + boolean selfAborted = true; for (ISink channel : downStreamChannelList) { try { - channel.abort(); + selfAborted = channel.abort(); } catch (Exception e) { if (!meetError) { firstException = e; @@ -212,10 +213,15 @@ public void abort() { if (meetError) { LOGGER.warn("Error occurred when try to abort channel.", firstException); } - sinkListener.onAborted(this); - aborted = true; - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[EndAbortShuffleSinkHandle]"); + if (selfAborted) { + sinkListener.onAborted(this); + aborted = true; + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("[EndAbortShuffleSinkHandle]"); + } + return true; + } else { + return false; } } @@ -224,18 +230,19 @@ public void abort() { // ShuffleSinkHandle while synchronized methods of ShuffleSinkHandle // Lock ShuffleSinkHandle and wait to lock LocalSinkChannel @Override - public void close() { + public boolean close() { if (closed || aborted) { - return; + return false; } if (LOGGER.isDebugEnabled()) { LOGGER.debug("[StartCloseShuffleSinkHandle]"); } boolean meetError = false; Exception firstException = null; + boolean selfClosed = true; for (ISink channel : downStreamChannelList) { try { - channel.close(); + selfClosed = channel.close(); } catch (Exception e) { if (!meetError) { firstException = e; @@ -246,10 +253,15 @@ public void close() { if (meetError) { LOGGER.warn("Error occurred when try to close channel.", firstException); } - sinkListener.onFinish(this); - closed = true; - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[EndCloseShuffleSinkHandle]"); + if (selfClosed) { + sinkListener.onFinish(this); + closed = true; + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("[EndCloseShuffleSinkHandle]"); + } + return true; + } else { + return false; } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/sink/SinkChannel.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/sink/SinkChannel.java index 9f29a8a753fc5..ca6fdadc993e6 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/sink/SinkChannel.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/sink/SinkChannel.java @@ -201,7 +201,7 @@ public synchronized void send(TsBlock tsBlock) { if (noMoreTsBlocks) { return; } - long retainedSizeInBytes = tsBlock.getRetainedSizeInBytes(); + long sizeInBytes = tsBlock.getSizeInBytes(); int startSequenceId; startSequenceId = nextSequenceId; blocked = @@ -211,17 +211,16 @@ public synchronized void send(TsBlock tsBlock) { localFragmentInstanceId.getQueryId(), fullFragmentInstanceId, localPlanNodeId, - retainedSizeInBytes, + sizeInBytes, maxBytesCanReserve) .left; - bufferRetainedSizeInBytes += retainedSizeInBytes; + bufferRetainedSizeInBytes += sizeInBytes; sequenceIdToTsBlock.put(nextSequenceId, new Pair<>(tsBlock, currentTsBlockSize)); nextSequenceId += 1; - currentTsBlockSize = retainedSizeInBytes; + currentTsBlockSize = sizeInBytes; - // TODO: consider merge multiple NewDataBlockEvent for less network traffic. - submitSendNewDataBlockEventTask(startSequenceId, ImmutableList.of(retainedSizeInBytes)); + submitSendNewDataBlockEventTask(startSequenceId, ImmutableList.of(sizeInBytes)); } finally { DATA_EXCHANGE_COST_METRIC_SET.recordDataExchangeCost( SINK_HANDLE_SEND_TSBLOCK_REMOTE, System.nanoTime() - startTime); @@ -240,12 +239,12 @@ public synchronized void setNoMoreTsBlocks() { } @Override - public synchronized void abort() { + public synchronized boolean abort() { if (LOGGER.isDebugEnabled()) { LOGGER.debug("[StartAbortSinkChannel]"); } if (aborted || closed) { - return; + return false; } sequenceIdToTsBlock.clear(); if (blocked != null) { @@ -266,15 +265,16 @@ public synchronized void abort() { if (LOGGER.isDebugEnabled()) { LOGGER.debug("[EndAbortSinkChannel]"); } + return true; } @Override - public synchronized void close() { + public synchronized boolean close() { if (LOGGER.isDebugEnabled()) { LOGGER.debug("[StartCloseSinkChannel]"); } if (closed || aborted) { - return; + return false; } sequenceIdToTsBlock.clear(); if (blocked != null) { @@ -295,6 +295,7 @@ public synchronized void close() { if (LOGGER.isDebugEnabled()) { LOGGER.debug("[EndCloseSinkChannel]"); } + return true; } private void invokeOnFinished() { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/source/LocalSourceHandle.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/source/LocalSourceHandle.java index 315e75cdfc56e..0c2d20d46ea1d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/source/LocalSourceHandle.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/exchange/source/LocalSourceHandle.java @@ -36,6 +36,8 @@ import org.slf4j.LoggerFactory; import java.nio.ByteBuffer; +import java.util.Optional; +import java.util.concurrent.ExecutionException; import static com.google.common.util.concurrent.Futures.nonCancellationPropagating; import static org.apache.iotdb.db.queryengine.execution.exchange.MPPDataExchangeManager.createFullIdFrom; @@ -123,9 +125,7 @@ public TsBlock receive() { if (tsBlock != null) { if (LOGGER.isDebugEnabled()) { LOGGER.debug( - "[GetTsBlockFromQueue] TsBlock:{} size:{}", - currSequenceId, - tsBlock.getRetainedSizeInBytes()); + "[GetTsBlockFromQueue] TsBlock:{} size:{}", currSequenceId, tsBlock.getSizeInBytes()); } currSequenceId++; } @@ -158,6 +158,7 @@ public ByteBuffer getSerializedTsBlock() throws IoTDBException { @Override public boolean isFinished() { synchronized (queue) { + checkSharedQueueIfAborted(); return queue.hasNoMoreTsBlocks() && queue.isEmpty(); } } @@ -253,10 +254,28 @@ public void close() { } private void checkState() { - if (aborted) { - throw new IllegalStateException("Source handle is aborted."); - } else if (closed) { - throw new IllegalStateException("Source Handle is closed."); + if (aborted || closed) { + checkSharedQueueIfAborted(); + if (queue.isBlocked().isDone()) { + // try throw underlying exception instead of "Source handle is aborted." + try { + queue.isBlocked().get(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IllegalStateException(e); + } catch (ExecutionException e) { + throw new IllegalStateException(e.getCause() == null ? e : e.getCause()); + } + } + throw new IllegalStateException( + "LocalSinkChannel state is ." + (aborted ? "ABORTED" : "CLOSED")); + } + } + + private void checkSharedQueueIfAborted() { + Optional abortedCause = queue.getAbortedCause(); + if (abortedCause.isPresent()) { + throw new IllegalStateException(abortedCause.get()); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/executor/RegionExecutionResult.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/executor/RegionExecutionResult.java index 2047bba61ac57..c3d886eebccad 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/executor/RegionExecutionResult.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/executor/RegionExecutionResult.java @@ -28,7 +28,18 @@ public class RegionExecutionResult { private String message; private TSStatus status; - private boolean needRetry; + private boolean readNeedRetry; + + private RegionExecutionResult(boolean accepted, String message, TSStatus status) { + this.accepted = accepted; + this.message = message; + this.status = status; + this.readNeedRetry = false; + } + + public static RegionExecutionResult create(boolean accepted, String message, TSStatus status) { + return new RegionExecutionResult(accepted, message, status); + } public boolean isAccepted() { return accepted; @@ -54,11 +65,11 @@ public void setStatus(TSStatus status) { this.status = status; } - public boolean isNeedRetry() { - return needRetry; + public boolean isReadNeedRetry() { + return readNeedRetry; } - public void setNeedRetry(boolean needRetry) { - this.needRetry = needRetry; + public void setReadNeedRetry(boolean readNeedRetry) { + this.readNeedRetry = readNeedRetry; } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/executor/RegionReadExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/executor/RegionReadExecutor.java index 3b6a0f0adb0df..b045babc6d63d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/executor/RegionReadExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/executor/RegionReadExecutor.java @@ -22,6 +22,7 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.consensus.ConsensusGroupId; import org.apache.iotdb.commons.consensus.DataRegionId; +import org.apache.iotdb.commons.utils.StatusUtils; import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.consensus.IConsensus; import org.apache.iotdb.consensus.common.DataSet; @@ -32,6 +33,7 @@ import org.apache.iotdb.db.queryengine.execution.fragment.FragmentInstanceManager; import org.apache.iotdb.db.queryengine.plan.planner.plan.FragmentInstance; import org.apache.iotdb.db.storageengine.dataregion.VirtualDataRegion; +import org.apache.iotdb.db.utils.ErrorHandlingUtils; import org.apache.iotdb.db.utils.SetThreadName; import org.apache.iotdb.rpc.TSStatusCode; @@ -76,7 +78,6 @@ public RegionReadExecutor( public RegionExecutionResult execute( ConsensusGroupId groupId, FragmentInstance fragmentInstance) { // execute fragment instance in state machine - RegionExecutionResult resp = new RegionExecutionResult(); try (SetThreadName threadName = new SetThreadName(fragmentInstance.getId().getFullId())) { DataSet readResponse; if (groupId instanceof DataRegionId) { @@ -86,29 +87,40 @@ public RegionExecutionResult execute( } if (readResponse == null) { LOGGER.error(RESPONSE_NULL_ERROR_MSG); - resp.setAccepted(false); - resp.setMessage(RESPONSE_NULL_ERROR_MSG); + return RegionExecutionResult.create(false, RESPONSE_NULL_ERROR_MSG, null); } else { FragmentInstanceInfo info = (FragmentInstanceInfo) readResponse; - resp.setAccepted(!info.getState().isFailed()); - resp.setMessage(info.getMessage()); + RegionExecutionResult resp = + RegionExecutionResult.create(!info.getState().isFailed(), info.getMessage(), null); + info.getErrorCode() + .ifPresent( + s -> { + resp.setStatus(s); + resp.setReadNeedRetry(StatusUtils.needRetryHelper(s)); + }); + return resp; } - return resp; } catch (ConsensusGroupNotExistException e) { - LOGGER.error("Execute FragmentInstance in ConsensusGroup {} failed.", groupId, e); - resp.setMessage(String.format(ERROR_MSG_FORMAT, e.getMessage())); - resp.setNeedRetry(true); - resp.setStatus(new TSStatus(TSStatusCode.CONSENSUS_GROUP_NOT_EXIST.getStatusCode())); + LOGGER.warn("Execute FragmentInstance in ConsensusGroup {} failed.", groupId, e); + RegionExecutionResult resp = + RegionExecutionResult.create( + false, + String.format(ERROR_MSG_FORMAT, e.getMessage()), + new TSStatus(TSStatusCode.CONSENSUS_GROUP_NOT_EXIST.getStatusCode())); + resp.setReadNeedRetry(true); return resp; } catch (Throwable e) { - LOGGER.error("Execute FragmentInstance in ConsensusGroup {} failed.", groupId, e); - resp.setMessage(String.format(ERROR_MSG_FORMAT, e.getMessage())); - Throwable t = e.getCause(); + LOGGER.warn("Execute FragmentInstance in ConsensusGroup {} failed.", groupId, e); + RegionExecutionResult resp = + RegionExecutionResult.create( + false, String.format(ERROR_MSG_FORMAT, e.getMessage()), null); + Throwable t = ErrorHandlingUtils.getRootCause(e); if (t instanceof ReadException || t instanceof ReadIndexException || t instanceof NotLeaderException - || t instanceof ServerNotReadyException) { - resp.setNeedRetry(true); + || t instanceof ServerNotReadyException + || t instanceof InterruptedException) { + resp.setReadNeedRetry(true); resp.setStatus(new TSStatus(TSStatusCode.RATIS_READ_UNAVAILABLE.getStatusCode())); } return resp; @@ -119,20 +131,15 @@ public RegionExecutionResult execute( public RegionExecutionResult execute(FragmentInstance fragmentInstance) { // execute fragment instance in state machine try (SetThreadName threadName = new SetThreadName(fragmentInstance.getId().getFullId())) { - RegionExecutionResult resp = new RegionExecutionResult(); // FI with queryExecutor will be executed directly FragmentInstanceInfo info = fragmentInstanceManager.execDataQueryFragmentInstance( fragmentInstance, VirtualDataRegion.getInstance()); - resp.setAccepted(!info.getState().isFailed()); - resp.setMessage(info.getMessage()); - return resp; + return RegionExecutionResult.create(!info.getState().isFailed(), info.getMessage(), null); } catch (Throwable t) { LOGGER.error("Execute FragmentInstance in QueryExecutor failed.", t); - RegionExecutionResult resp = new RegionExecutionResult(); - resp.setAccepted(false); - resp.setMessage(String.format(ERROR_MSG_FORMAT, t.getMessage())); - return resp; + return RegionExecutionResult.create( + false, String.format(ERROR_MSG_FORMAT, t.getMessage()), null); } } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/executor/RegionWriteExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/executor/RegionWriteExecutor.java index ee4ab738f8043..1f11c86ad99a8 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/executor/RegionWriteExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/executor/RegionWriteExecutor.java @@ -32,6 +32,7 @@ import org.apache.iotdb.consensus.ConsensusFactory; import org.apache.iotdb.consensus.IConsensus; import org.apache.iotdb.consensus.exception.ConsensusException; +import org.apache.iotdb.consensus.exception.ConsensusGroupNotExistException; import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.consensus.DataRegionConsensusImpl; @@ -79,8 +80,10 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.concurrent.locks.ReentrantReadWriteLock; public class RegionWriteExecutor { @@ -140,18 +143,33 @@ public RegionWriteExecutor( } @SuppressWarnings("squid:S1181") - public RegionExecutionResult execute(ConsensusGroupId groupId, PlanNode planNode) { + public RegionExecutionResult execute(final ConsensusGroupId groupId, final PlanNode planNode) { try { - WritePlanNodeExecutionContext context = - new WritePlanNodeExecutionContext(groupId, regionManager.getRegionLock(groupId)); - return planNode.accept(executionVisitor, context); - } catch (Throwable e) { + final ReentrantReadWriteLock lock = regionManager.getRegionLock(groupId); + if (lock == null) { + return RegionExecutionResult.create( + false, + "Failed to get the lock of the region because the region is not existed.", + RpcUtils.getStatus(TSStatusCode.NO_AVAILABLE_REGION_GROUP)); + } + return planNode.accept(executionVisitor, new WritePlanNodeExecutionContext(groupId, lock)); + } catch (final Throwable e) { + // Detect problems caused by removed region + if (Objects.isNull(regionManager.getRegionLock(groupId))) { + final String errorMsg = + "Exception " + + e.getClass().getSimpleName() + + " encountered during region removal, will retry. Message: " + + e.getMessage(); + LOGGER.info(errorMsg); + return RegionExecutionResult.create( + false, errorMsg, RpcUtils.getStatus(TSStatusCode.NO_AVAILABLE_REGION_GROUP)); + } LOGGER.warn(e.getMessage(), e); - RegionExecutionResult result = new RegionExecutionResult(); - result.setAccepted(false); - result.setMessage(e.getMessage()); - result.setStatus(RpcUtils.getStatus(TSStatusCode.INTERNAL_SERVER_ERROR, e.getMessage())); - return result; + return RegionExecutionResult.create( + false, + e.getMessage(), + RpcUtils.getStatus(TSStatusCode.INTERNAL_SERVER_ERROR, e.getMessage())); } } @@ -159,36 +177,36 @@ private class WritePlanNodeExecutionVisitor extends PlanVisitor { @Override - public RegionExecutionResult visitPlan(PlanNode node, WritePlanNodeExecutionContext context) { - RegionExecutionResult response = new RegionExecutionResult(); + public RegionExecutionResult visitPlan( + final PlanNode node, final WritePlanNodeExecutionContext context) { if (CommonDescriptor.getInstance().getConfig().isReadOnly()) { - response.setAccepted(false); - response.setMessage("Fail to do non-query operations because system is read-only."); - response.setStatus( + return RegionExecutionResult.create( + false, + "Fail to do non-query operations because system is read-only.", RpcUtils.getStatus( TSStatusCode.SYSTEM_READ_ONLY, "Fail to do non-query operations because system is read-only.")); - return response; } try { - TSStatus status = executePlanNodeInConsensusLayer(context.getRegionId(), node); - response.setAccepted(TSStatusCode.SUCCESS_STATUS.getStatusCode() == status.getCode()); - response.setMessage(status.getMessage()); - response.setStatus(status); - } catch (ConsensusException e) { + final TSStatus status = executePlanNodeInConsensusLayer(context.getRegionId(), node); + return RegionExecutionResult.create( + TSStatusCode.SUCCESS_STATUS.getStatusCode() == status.getCode(), + status.getMessage(), + status); + } catch (final ConsensusException e) { LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e); - response.setAccepted(false); - response.setMessage(e.toString()); - response.setStatus( - RpcUtils.getStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR, e.getMessage())); + TSStatus status = RpcUtils.getStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR, e.getMessage()); + if (e instanceof ConsensusGroupNotExistException) { + status.setCode(TSStatusCode.NO_AVAILABLE_REGION_GROUP.getStatusCode()); + } + return RegionExecutionResult.create(false, e.toString(), status); } - return response; } - private TSStatus executePlanNodeInConsensusLayer(ConsensusGroupId groupId, PlanNode planNode) - throws ConsensusException { + private TSStatus executePlanNodeInConsensusLayer( + final ConsensusGroupId groupId, final PlanNode planNode) throws ConsensusException { if (groupId instanceof DataRegionId) { return dataRegionConsensus.write(groupId, planNode); } else { @@ -198,65 +216,70 @@ private TSStatus executePlanNodeInConsensusLayer(ConsensusGroupId groupId, PlanN @Override public RegionExecutionResult visitInsertRow( - InsertRowNode node, WritePlanNodeExecutionContext context) { + final InsertRowNode node, final WritePlanNodeExecutionContext context) { return executeDataInsert(node, context); } @Override public RegionExecutionResult visitInsertTablet( - InsertTabletNode node, WritePlanNodeExecutionContext context) { + final InsertTabletNode node, final WritePlanNodeExecutionContext context) { return executeDataInsert(node, context); } @Override public RegionExecutionResult visitInsertRows( - InsertRowsNode node, WritePlanNodeExecutionContext context) { + final InsertRowsNode node, final WritePlanNodeExecutionContext context) { return executeDataInsert(node, context); } @Override public RegionExecutionResult visitInsertMultiTablets( - InsertMultiTabletsNode node, WritePlanNodeExecutionContext context) { + final InsertMultiTabletsNode node, final WritePlanNodeExecutionContext context) { return executeDataInsert(node, context); } @Override public RegionExecutionResult visitInsertRowsOfOneDevice( - InsertRowsOfOneDeviceNode node, WritePlanNodeExecutionContext context) { + final InsertRowsOfOneDeviceNode node, final WritePlanNodeExecutionContext context) { return executeDataInsert(node, context); } @Override public RegionExecutionResult visitPipeEnrichedInsertNode( - PipeEnrichedInsertNode node, WritePlanNodeExecutionContext context) { + final PipeEnrichedInsertNode node, final WritePlanNodeExecutionContext context) { return executeDataInsert(node, context); } private RegionExecutionResult executeDataInsert( - InsertNode insertNode, WritePlanNodeExecutionContext context) { - RegionExecutionResult response = new RegionExecutionResult(); + final InsertNode insertNode, final WritePlanNodeExecutionContext context) { + if (context.getRegionWriteValidationRWLock() == null) { + final String message = + "Failed to get the lock of the region because the region is not existed."; + return RegionExecutionResult.create( + false, message, RpcUtils.getStatus(TSStatusCode.NO_AVAILABLE_REGION_GROUP, message)); + } + context.getRegionWriteValidationRWLock().readLock().lock(); try { - TSStatus status = fireTriggerAndInsert(context.getRegionId(), insertNode); - response.setAccepted(TSStatusCode.SUCCESS_STATUS.getStatusCode() == status.getCode()); - response.setMessage(status.message); - if (!response.isAccepted()) { - response.setStatus(status); - } - return response; + final TSStatus status = fireTriggerAndInsert(context.getRegionId(), insertNode); + return RegionExecutionResult.create( + TSStatusCode.SUCCESS_STATUS.getStatusCode() == status.getCode(), + status.message, + status); } catch (ConsensusException e) { LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e); - response.setAccepted(false); - response.setMessage(e.toString()); - response.setStatus(RpcUtils.getStatus(TSStatusCode.WRITE_PROCESS_ERROR, e.toString())); - return response; + final TSStatus status = RpcUtils.getStatus(TSStatusCode.WRITE_PROCESS_ERROR, e.toString()); + if (e instanceof ConsensusGroupNotExistException) { + status.setCode(TSStatusCode.NO_AVAILABLE_REGION_GROUP.getStatusCode()); + } + return RegionExecutionResult.create(false, e.toString(), status); } finally { context.getRegionWriteValidationRWLock().readLock().unlock(); } } - private TSStatus fireTriggerAndInsert(ConsensusGroupId groupId, InsertNode insertNode) - throws ConsensusException { + private TSStatus fireTriggerAndInsert( + final ConsensusGroupId groupId, final InsertNode insertNode) throws ConsensusException { long triggerCostTime = 0; TSStatus status; long startTime = System.nanoTime(); @@ -269,7 +292,7 @@ private TSStatus fireTriggerAndInsert(ConsensusGroupId groupId, InsertNode inser TSStatusCode.TRIGGER_FIRE_ERROR.getStatusCode(), "Failed to complete the insertion because trigger error before the insertion."); } else { - long startWriteTime = System.nanoTime(); + final long startWriteTime = System.nanoTime(); status = dataRegionConsensus.write(groupId, insertNode); PERFORMANCE_OVERVIEW_METRICS.recordScheduleStorageCost(System.nanoTime() - startWriteTime); @@ -292,7 +315,7 @@ private TSStatus fireTriggerAndInsert(ConsensusGroupId groupId, InsertNode inser @Override public RegionExecutionResult visitPipeEnrichedDeleteDataNode( - PipeEnrichedDeleteDataNode node, WritePlanNodeExecutionContext context) { + final PipeEnrichedDeleteDataNode node, final WritePlanNodeExecutionContext context) { // data deletion should block data insertion, especially when executed for deleting timeseries context.getRegionWriteValidationRWLock().writeLock().lock(); try { @@ -304,7 +327,7 @@ public RegionExecutionResult visitPipeEnrichedDeleteDataNode( @Override public RegionExecutionResult visitDeleteData( - DeleteDataNode node, WritePlanNodeExecutionContext context) { + final DeleteDataNode node, final WritePlanNodeExecutionContext context) { // data deletion don't need to block data insertion, but there are some creation operation // require write lock on data region. context.getRegionWriteValidationRWLock().writeLock().lock(); @@ -317,43 +340,40 @@ public RegionExecutionResult visitDeleteData( @Override public RegionExecutionResult visitCreateTimeSeries( - CreateTimeSeriesNode node, WritePlanNodeExecutionContext context) { + final CreateTimeSeriesNode node, final WritePlanNodeExecutionContext context) { return executeCreateTimeSeries(node, context, false); } private RegionExecutionResult executeCreateTimeSeries( - CreateTimeSeriesNode node, - WritePlanNodeExecutionContext context, - boolean receivedFromPipe) { - ISchemaRegion schemaRegion = + final CreateTimeSeriesNode node, + final WritePlanNodeExecutionContext context, + final boolean receivedFromPipe) { + final ISchemaRegion schemaRegion = schemaEngine.getSchemaRegion((SchemaRegionId) context.getRegionId()); - RegionExecutionResult result = + final RegionExecutionResult result = checkQuotaBeforeCreatingTimeSeries(schemaRegion, node.getPath().getDevicePath(), 1); if (result != null) { return result; } - if (CONFIG.getSchemaRegionConsensusProtocolClass().equals(ConsensusFactory.RATIS_CONSENSUS)) { + if (CONFIG.getSchemaRegionConsensusProtocolClass().equals(ConsensusFactory.RATIS_CONSENSUS) + && !receivedFromPipe) { context.getRegionWriteValidationRWLock().writeLock().lock(); try { - Map failingMeasurementMap = + final Map failingMeasurementMap = schemaRegion.checkMeasurementExistence( node.getPath().getDevicePath(), Collections.singletonList(node.getPath().getMeasurement()), Collections.singletonList(node.getAlias())); if (failingMeasurementMap.isEmpty()) { - return receivedFromPipe - ? super.visitPipeEnrichedWritePlanNode(new PipeEnrichedWritePlanNode(node), context) - : super.visitCreateTimeSeries(node, context); + return super.visitCreateTimeSeries(node, context); } else { - MetadataException metadataException = failingMeasurementMap.get(0); + final MetadataException metadataException = failingMeasurementMap.get(0); LOGGER.warn(METADATA_ERROR_MSG, metadataException); - result = new RegionExecutionResult(); - result.setAccepted(false); - result.setMessage(metadataException.getMessage()); - result.setStatus( + return RegionExecutionResult.create( + false, + metadataException.getMessage(), RpcUtils.getStatus( metadataException.getErrorCode(), metadataException.getMessage())); - return result; } } finally { context.getRegionWriteValidationRWLock().writeLock().unlock(); @@ -367,42 +387,40 @@ private RegionExecutionResult executeCreateTimeSeries( @Override public RegionExecutionResult visitCreateAlignedTimeSeries( - CreateAlignedTimeSeriesNode node, WritePlanNodeExecutionContext context) { + final CreateAlignedTimeSeriesNode node, final WritePlanNodeExecutionContext context) { return executeCreateAlignedTimeSeries(node, context, false); } private RegionExecutionResult executeCreateAlignedTimeSeries( - CreateAlignedTimeSeriesNode node, - WritePlanNodeExecutionContext context, - boolean receivedFromPipe) { - ISchemaRegion schemaRegion = + final CreateAlignedTimeSeriesNode node, + final WritePlanNodeExecutionContext context, + final boolean receivedFromPipe) { + final ISchemaRegion schemaRegion = schemaEngine.getSchemaRegion((SchemaRegionId) context.getRegionId()); - RegionExecutionResult result = + final RegionExecutionResult result = checkQuotaBeforeCreatingTimeSeries( schemaRegion, node.getDevicePath(), node.getMeasurements().size()); if (result != null) { return result; } - if (CONFIG.getSchemaRegionConsensusProtocolClass().equals(ConsensusFactory.RATIS_CONSENSUS)) { + if (CONFIG.getSchemaRegionConsensusProtocolClass().equals(ConsensusFactory.RATIS_CONSENSUS) + && !receivedFromPipe) { context.getRegionWriteValidationRWLock().writeLock().lock(); try { - Map failingMeasurementMap = + final Map failingMeasurementMap = schemaRegion.checkMeasurementExistence( node.getDevicePath(), node.getMeasurements(), node.getAliasList()); if (failingMeasurementMap.isEmpty()) { - return receivedFromPipe - ? super.visitPipeEnrichedWritePlanNode(new PipeEnrichedWritePlanNode(node), context) - : super.visitCreateAlignedTimeSeries(node, context); + return super.visitCreateAlignedTimeSeries(node, context); } else { - MetadataException metadataException = failingMeasurementMap.values().iterator().next(); + final MetadataException metadataException = + failingMeasurementMap.values().iterator().next(); LOGGER.warn(METADATA_ERROR_MSG, metadataException); - result = new RegionExecutionResult(); - result.setAccepted(false); - result.setMessage(metadataException.getMessage()); - result.setStatus( + return RegionExecutionResult.create( + false, + metadataException.getMessage(), RpcUtils.getStatus( metadataException.getErrorCode(), metadataException.getMessage())); - return result; } } finally { context.getRegionWriteValidationRWLock().writeLock().unlock(); @@ -416,18 +434,18 @@ private RegionExecutionResult executeCreateAlignedTimeSeries( @Override public RegionExecutionResult visitCreateMultiTimeSeries( - CreateMultiTimeSeriesNode node, WritePlanNodeExecutionContext context) { + final CreateMultiTimeSeriesNode node, final WritePlanNodeExecutionContext context) { return executeCreateMultiTimeSeries(node, context, false); } private RegionExecutionResult executeCreateMultiTimeSeries( - CreateMultiTimeSeriesNode node, - WritePlanNodeExecutionContext context, - boolean receivedFromPipe) { - ISchemaRegion schemaRegion = + final CreateMultiTimeSeriesNode node, + final WritePlanNodeExecutionContext context, + final boolean receivedFromPipe) { + final ISchemaRegion schemaRegion = schemaEngine.getSchemaRegion((SchemaRegionId) context.getRegionId()); RegionExecutionResult result; - for (Map.Entry entry : + for (final Map.Entry entry : node.getMeasurementGroupMap().entrySet()) { result = checkQuotaBeforeCreatingTimeSeries( @@ -436,34 +454,31 @@ private RegionExecutionResult executeCreateMultiTimeSeries( return result; } } - if (CONFIG.getSchemaRegionConsensusProtocolClass().equals(ConsensusFactory.RATIS_CONSENSUS)) { + if (CONFIG.getSchemaRegionConsensusProtocolClass().equals(ConsensusFactory.RATIS_CONSENSUS) + && !receivedFromPipe) { context.getRegionWriteValidationRWLock().writeLock().lock(); try { - List failingStatus = new ArrayList<>(); - Map measurementGroupMap = node.getMeasurementGroupMap(); - List emptyDeviceList = new ArrayList<>(); + final List failingStatus = new ArrayList<>(); + final Map measurementGroupMap = + node.getMeasurementGroupMap(); + final List emptyDeviceList = new ArrayList<>(); checkMeasurementExistence( measurementGroupMap, schemaRegion, failingStatus, emptyDeviceList); - for (PartialPath emptyDevice : emptyDeviceList) { + for (final PartialPath emptyDevice : emptyDeviceList) { measurementGroupMap.remove(emptyDevice); } - RegionExecutionResult failingResult = - registerTimeSeries( - measurementGroupMap, node, context, failingStatus, receivedFromPipe); + final RegionExecutionResult executionResult = + registerTimeSeries(measurementGroupMap, node, context, failingStatus); - if (failingResult != null) { - return failingResult; + if (executionResult != null) { + return executionResult; } - TSStatus status = RpcUtils.getStatus(failingStatus); - failingResult = new RegionExecutionResult(); - failingResult.setAccepted(false); - failingResult.setMessage(status.getMessage()); - failingResult.setStatus(status); - return failingResult; + final TSStatus status = RpcUtils.getStatus(failingStatus); + return RegionExecutionResult.create(false, status.getMessage(), status); } finally { context.getRegionWriteValidationRWLock().writeLock().unlock(); } @@ -475,12 +490,12 @@ private RegionExecutionResult executeCreateMultiTimeSeries( } private void checkMeasurementExistence( - Map measurementGroupMap, - ISchemaRegion schemaRegion, - List failingStatus, - List emptyDeviceList) { - for (Map.Entry entry : measurementGroupMap.entrySet()) { - Map failingMeasurementMap = + final Map measurementGroupMap, + final ISchemaRegion schemaRegion, + final List failingStatus, + final List emptyDeviceList) { + for (final Map.Entry entry : measurementGroupMap.entrySet()) { + final Map failingMeasurementMap = schemaRegion.checkMeasurementExistence( entry.getKey(), entry.getValue().getMeasurements(), @@ -489,7 +504,7 @@ private void checkMeasurementExistence( continue; } - for (Map.Entry failingMeasurement : + for (final Map.Entry failingMeasurement : failingMeasurementMap.entrySet()) { LOGGER.warn(METADATA_ERROR_MSG, failingMeasurement.getValue()); failingStatus.add( @@ -506,22 +521,19 @@ private void checkMeasurementExistence( } private RegionExecutionResult registerTimeSeries( - Map measurementGroupMap, - CreateMultiTimeSeriesNode node, - WritePlanNodeExecutionContext context, - List failingStatus, - boolean receivedFromPipe) { + final Map measurementGroupMap, + final CreateMultiTimeSeriesNode node, + final WritePlanNodeExecutionContext context, + final List failingStatus) { if (!measurementGroupMap.isEmpty()) { // try registering the rest timeseries - RegionExecutionResult executionResult = - receivedFromPipe - ? super.visitPipeEnrichedWritePlanNode(new PipeEnrichedWritePlanNode(node), context) - : super.visitCreateMultiTimeSeries(node, context); + final RegionExecutionResult executionResult = + super.visitCreateMultiTimeSeries(node, context); if (failingStatus.isEmpty()) { return executionResult; } - TSStatus executionStatus = executionResult.getStatus(); + final TSStatus executionStatus = executionResult.getStatus(); if (executionStatus.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) { failingStatus.addAll(executionStatus.getSubStatus()); } else if (executionStatus.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { @@ -533,36 +545,37 @@ private RegionExecutionResult registerTimeSeries( @Override public RegionExecutionResult visitInternalCreateTimeSeries( - InternalCreateTimeSeriesNode node, WritePlanNodeExecutionContext context) { + final InternalCreateTimeSeriesNode node, final WritePlanNodeExecutionContext context) { return executeInternalCreateTimeSeries(node, context, false); } private RegionExecutionResult executeInternalCreateTimeSeries( - InternalCreateTimeSeriesNode node, - WritePlanNodeExecutionContext context, - boolean receivedFromPipe) { - ISchemaRegion schemaRegion = + final InternalCreateTimeSeriesNode node, + final WritePlanNodeExecutionContext context, + final boolean receivedFromPipe) { + final ISchemaRegion schemaRegion = schemaEngine.getSchemaRegion((SchemaRegionId) context.getRegionId()); - RegionExecutionResult result = + final RegionExecutionResult result = checkQuotaBeforeCreatingTimeSeries( schemaRegion, node.getDevicePath(), node.getMeasurementGroup().size()); if (result != null) { return result; } - if (CONFIG.getSchemaRegionConsensusProtocolClass().equals(ConsensusFactory.RATIS_CONSENSUS)) { + if (CONFIG.getSchemaRegionConsensusProtocolClass().equals(ConsensusFactory.RATIS_CONSENSUS) + && !receivedFromPipe) { context.getRegionWriteValidationRWLock().writeLock().lock(); try { - List failingStatus = new ArrayList<>(); - List alreadyExistingStatus = new ArrayList<>(); - MeasurementGroup measurementGroup = node.getMeasurementGroup(); - Map failingMeasurementMap = + final List failingStatus = new ArrayList<>(); + final List alreadyExistingStatus = new ArrayList<>(); + final MeasurementGroup measurementGroup = node.getMeasurementGroup(); + final Map failingMeasurementMap = schemaRegion.checkMeasurementExistence( node.getDevicePath(), measurementGroup.getMeasurements(), measurementGroup.getAliasList()); MetadataException metadataException; // filter failed measurement and keep the rest for execution - for (Map.Entry failingMeasurement : + for (final Map.Entry failingMeasurement : failingMeasurementMap.entrySet()) { metadataException = failingMeasurement.getValue(); if (metadataException.getErrorCode() @@ -575,7 +588,7 @@ private RegionExecutionResult executeInternalCreateTimeSeries( ((MeasurementAlreadyExistException) metadataException) .getMeasurementPath()))); } else { - int errorCode = metadataException.getErrorCode(); + final int errorCode = metadataException.getErrorCode(); if (errorCode != TSStatusCode.PATH_ALREADY_EXIST.getStatusCode() || errorCode != TSStatusCode.ALIAS_ALREADY_EXIST.getStatusCode()) { LOGGER.warn(METADATA_ERROR_MSG, metadataException); @@ -588,10 +601,12 @@ private RegionExecutionResult executeInternalCreateTimeSeries( measurementGroup.removeMeasurements(failingMeasurementMap.keySet()); return processExecutionResultOfInternalCreateSchema( - receivedFromPipe - ? super.visitPipeEnrichedWritePlanNode( - new PipeEnrichedWritePlanNode(node), context) - : super.visitInternalCreateTimeSeries(node, context), + !measurementGroup.isEmpty() + ? super.visitInternalCreateTimeSeries(node, context) + : RegionExecutionResult.create( + true, + "Execute successfully", + RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS, "Execute successfully")), failingStatus, alreadyExistingStatus); } finally { @@ -606,7 +621,7 @@ private RegionExecutionResult executeInternalCreateTimeSeries( @Override public RegionExecutionResult visitInternalCreateMultiTimeSeries( - InternalCreateMultiTimeSeriesNode node, WritePlanNodeExecutionContext context) { + final InternalCreateMultiTimeSeriesNode node, final WritePlanNodeExecutionContext context) { return executeInternalCreateMultiTimeSeries(node, context, false); } @@ -626,56 +641,62 @@ private RegionExecutionResult executeInternalCreateMultiTimeSeries( return result; } } - if (CONFIG.getSchemaRegionConsensusProtocolClass().equals(ConsensusFactory.RATIS_CONSENSUS)) { + if (CONFIG.getSchemaRegionConsensusProtocolClass().equals(ConsensusFactory.RATIS_CONSENSUS) + && !receivedFromPipe) { context.getRegionWriteValidationRWLock().writeLock().lock(); try { final List failingStatus = new ArrayList<>(); final List alreadyExistingStatus = new ArrayList<>(); - // Do not filter measurements if the node is generated by pipe - // Because pipe may use the InternalCreateMultiTimeSeriesNode to transfer historical data - // And the alias/tags/attributes may need to be updated for existing time series - if (!receivedFromPipe) { - MeasurementGroup measurementGroup; - Map failingMeasurementMap; - MetadataException metadataException; - for (final Map.Entry> deviceEntry : - node.getDeviceMap().entrySet()) { - measurementGroup = deviceEntry.getValue().right; - failingMeasurementMap = - schemaRegion.checkMeasurementExistence( - deviceEntry.getKey(), - measurementGroup.getMeasurements(), - measurementGroup.getAliasList()); - // filter failed measurement and keep the rest for execution - for (final Map.Entry failingMeasurement : - failingMeasurementMap.entrySet()) { - metadataException = failingMeasurement.getValue(); - if (metadataException.getErrorCode() - == TSStatusCode.TIMESERIES_ALREADY_EXIST.getStatusCode()) { - // There's no need to internal create time series. - alreadyExistingStatus.add( - RpcUtils.getStatus( - metadataException.getErrorCode(), - MeasurementPath.transformDataToString( - ((MeasurementAlreadyExistException) metadataException) - .getMeasurementPath()))); - } else { - LOGGER.warn(METADATA_ERROR_MSG, metadataException); - failingStatus.add( - RpcUtils.getStatus( - metadataException.getErrorCode(), metadataException.getMessage())); - } + MeasurementGroup measurementGroup; + Map failingMeasurementMap; + MetadataException metadataException; + + final Iterator>> iterator = + node.getDeviceMap().entrySet().iterator(); + + while (iterator.hasNext()) { + final Map.Entry> deviceEntry = + iterator.next(); + measurementGroup = deviceEntry.getValue().right; + failingMeasurementMap = + schemaRegion.checkMeasurementExistence( + deviceEntry.getKey(), + measurementGroup.getMeasurements(), + measurementGroup.getAliasList()); + // filter failed measurement and keep the rest for execution + for (final Map.Entry failingMeasurement : + failingMeasurementMap.entrySet()) { + metadataException = failingMeasurement.getValue(); + if (metadataException.getErrorCode() + == TSStatusCode.TIMESERIES_ALREADY_EXIST.getStatusCode()) { + // There's no need to internal create time series. + alreadyExistingStatus.add( + RpcUtils.getStatus( + metadataException.getErrorCode(), + MeasurementPath.transformDataToString( + ((MeasurementAlreadyExistException) metadataException) + .getMeasurementPath()))); + } else { + LOGGER.warn(METADATA_ERROR_MSG, metadataException); + failingStatus.add( + RpcUtils.getStatus( + metadataException.getErrorCode(), metadataException.getMessage())); } - measurementGroup.removeMeasurements(failingMeasurementMap.keySet()); + } + measurementGroup.removeMeasurements(failingMeasurementMap.keySet()); + if (measurementGroup.isEmpty()) { + iterator.remove(); } } return processExecutionResultOfInternalCreateSchema( - receivedFromPipe - ? super.visitPipeEnrichedWritePlanNode( - new PipeEnrichedWritePlanNode(node), context) - : super.visitInternalCreateMultiTimeSeries(node, context), + !node.getDeviceMap().isEmpty() + ? super.visitInternalCreateMultiTimeSeries(node, context) + : RegionExecutionResult.create( + true, + "Execute successfully", + RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS, "Execute successfully")), failingStatus, alreadyExistingStatus); } finally { @@ -698,11 +719,8 @@ private RegionExecutionResult checkQuotaBeforeCreatingTimeSeries( try { schemaRegion.checkSchemaQuota(path, size); } catch (SchemaQuotaExceededException e) { - RegionExecutionResult result = new RegionExecutionResult(); - result.setAccepted(false); - result.setMessage(e.getMessage()); - result.setStatus(RpcUtils.getStatus(e.getErrorCode(), e.getMessage())); - return result; + return RegionExecutionResult.create( + false, e.getMessage(), RpcUtils.getStatus(e.getErrorCode(), e.getMessage())); } return null; } @@ -716,22 +734,17 @@ private RegionExecutionResult processExecutionResultOfInternalCreateSchema( separateMeasurementAlreadyExistException( failingStatus, executionStatus, alreadyExistingStatus); - RegionExecutionResult result = new RegionExecutionResult(); + boolean isAccepted = true; TSStatus status; if (failingStatus.isEmpty() && alreadyExistingStatus.isEmpty()) { status = RpcUtils.SUCCESS_STATUS; - result.setAccepted(true); } else if (failingStatus.isEmpty()) { status = RpcUtils.getStatus(alreadyExistingStatus); - result.setAccepted(true); } else { status = RpcUtils.getStatus(failingStatus); - result.setAccepted(false); + isAccepted = false; } - - result.setMessage(status.getMessage()); - result.setStatus(status); - return result; + return RegionExecutionResult.create(isAccepted, status.getMessage(), status); } private void separateMeasurementAlreadyExistException( @@ -784,11 +797,8 @@ private RegionExecutionResult executeAlterTimeSeries( ? super.visitPipeEnrichedWritePlanNode(new PipeEnrichedWritePlanNode(node), context) : super.visitAlterTimeSeries(node, context); } catch (MetadataException e) { - RegionExecutionResult result = new RegionExecutionResult(); - result.setAccepted(true); - result.setMessage(e.getMessage()); - result.setStatus(RpcUtils.getStatus(e.getErrorCode(), e.getMessage())); - return result; + return RegionExecutionResult.create( + true, e.getMessage(), RpcUtils.getStatus(e.getErrorCode(), e.getMessage())); } } @@ -810,15 +820,12 @@ private RegionExecutionResult executeActivateTemplate( if (templateSetInfo == null) { // The activation has already been validated during analyzing. // That means the template is being unset during the activation plan transport. - RegionExecutionResult result = new RegionExecutionResult(); - result.setAccepted(false); String message = String.format( "Template is being unsetting from path %s. Please try activating later.", node.getPathSetTemplate()); - result.setMessage(message); - result.setStatus(RpcUtils.getStatus(TSStatusCode.METADATA_ERROR, message)); - return result; + return RegionExecutionResult.create( + false, message, RpcUtils.getStatus(TSStatusCode.METADATA_ERROR, message)); } ISchemaRegion schemaRegion = schemaEngine.getSchemaRegion((SchemaRegionId) context.getRegionId()); @@ -858,15 +865,12 @@ private RegionExecutionResult executeBatchActivateTemplate( if (templateSetInfo == null) { // The activation has already been validated during analyzing. // That means the template is being unset during the activation plan transport. - RegionExecutionResult result = new RegionExecutionResult(); - result.setAccepted(false); String message = String.format( "Template is being unsetting from path %s. Please try activating later.", node.getPathSetTemplate(devicePath)); - result.setMessage(message); - result.setStatus(RpcUtils.getStatus(TSStatusCode.METADATA_ERROR, message)); - return result; + return RegionExecutionResult.create( + false, message, RpcUtils.getStatus(TSStatusCode.METADATA_ERROR, message)); } RegionExecutionResult result = checkQuotaBeforeCreatingTimeSeries( @@ -906,17 +910,14 @@ private RegionExecutionResult executeInternalBatchActivateTemplate( if (templateSetInfo == null) { // The activation has already been validated during analyzing. // That means the template is being unset during the activation plan transport. - RegionExecutionResult result = new RegionExecutionResult(); - result.setAccepted(false); String message = String.format( "Template is being unsetting from prefix path of %s. Please try activating later.", new PartialPath( Arrays.copyOf(entry.getKey().getNodes(), entry.getValue().right + 1)) .getFullPath()); - result.setMessage(message); - result.setStatus(RpcUtils.getStatus(TSStatusCode.METADATA_ERROR, message)); - return result; + return RegionExecutionResult.create( + false, message, RpcUtils.getStatus(TSStatusCode.METADATA_ERROR, message)); } RegionExecutionResult result = checkQuotaBeforeCreatingTimeSeries( @@ -941,9 +942,9 @@ public RegionExecutionResult visitCreateLogicalView( } private RegionExecutionResult executeCreateLogicalView( - CreateLogicalViewNode node, - WritePlanNodeExecutionContext context, - boolean receivedFromPipe) { + final CreateLogicalViewNode node, + final WritePlanNodeExecutionContext context, + final boolean receivedFromPipe) { ISchemaRegion schemaRegion = schemaEngine.getSchemaRegion((SchemaRegionId) context.getRegionId()); if (CONFIG.getSchemaRegionConsensusProtocolClass().equals(ConsensusFactory.RATIS_CONSENSUS)) { @@ -968,13 +969,11 @@ private RegionExecutionResult executeCreateLogicalView( if (!failingMetadataException.isEmpty()) { MetadataException metadataException = failingMetadataException.get(0); LOGGER.warn(METADATA_ERROR_MSG, metadataException); - RegionExecutionResult result = new RegionExecutionResult(); - result.setAccepted(false); - result.setMessage(metadataException.getMessage()); - result.setStatus( + return RegionExecutionResult.create( + false, + metadataException.getMessage(), RpcUtils.getStatus( metadataException.getErrorCode(), metadataException.getMessage())); - return result; } // step 2. make sure all source paths exist. return receivedFromPipe diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/DataNodeQueryContext.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/DataNodeQueryContext.java index 881ff9dc8a260..ffa3ead32e17f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/DataNodeQueryContext.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/DataNodeQueryContext.java @@ -26,8 +26,8 @@ import javax.annotation.concurrent.GuardedBy; -import java.util.HashMap; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.ReentrantLock; @@ -43,7 +43,7 @@ public class DataNodeQueryContext { private final ReentrantLock lock = new ReentrantLock(); public DataNodeQueryContext(int dataNodeFINum) { - this.uncachedPathToSeriesScanInfo = new HashMap<>(); + this.uncachedPathToSeriesScanInfo = new ConcurrentHashMap<>(); this.dataNodeFINum = new AtomicInteger(dataNodeFINum); } @@ -59,15 +59,24 @@ public Pair getSeriesScanInfo(PartialPath path) { return uncachedPathToSeriesScanInfo.get(path); } + public Map> getUncachedPathToSeriesScanInfo() { + return uncachedPathToSeriesScanInfo; + } + public int decreaseDataNodeFINum() { return dataNodeFINum.decrementAndGet(); } - public void lock() { - lock.lock(); + public void lock(boolean isDeviceInMultiRegion) { + // When a device exists in only one region, there will be no intermediate state. + if (isDeviceInMultiRegion) { + lock.lock(); + } } - public void unLock() { - lock.unlock(); + public void unLock(boolean isDeviceInMultiRegion) { + if (isDeviceInMultiRegion) { + lock.unlock(); + } } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FakedFragmentInstanceContext.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FakedFragmentInstanceContext.java new file mode 100644 index 0000000000000..a91e41d237932 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FakedFragmentInstanceContext.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.queryengine.execution.fragment; + +import org.apache.iotdb.commons.path.PartialPath; +import org.apache.iotdb.db.exception.query.QueryProcessException; +import org.apache.iotdb.db.queryengine.plan.planner.memory.FakedMemoryReservationManager; +import org.apache.iotdb.db.storageengine.dataregion.DataRegion; +import org.apache.iotdb.db.storageengine.dataregion.read.QueryDataSource; +import org.apache.iotdb.db.storageengine.dataregion.read.control.FileReaderManager; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; + +import org.apache.tsfile.read.filter.basic.Filter; + +import java.util.Collections; +import java.util.List; + +public class FakedFragmentInstanceContext extends FragmentInstanceContext { + + public FakedFragmentInstanceContext(Filter timeFilter, DataRegion dataRegion) { + super(0, new FakedMemoryReservationManager(), timeFilter, dataRegion); + } + + public QueryDataSource getSharedQueryDataSource(PartialPath sourcePath) + throws QueryProcessException { + if (sharedQueryDataSource == null) { + initQueryDataSource(sourcePath); + } + return (QueryDataSource) sharedQueryDataSource; + } + + public void initQueryDataSource(PartialPath sourcePath) throws QueryProcessException { + + dataRegion.readLock(); + try { + this.sharedQueryDataSource = + dataRegion.query( + Collections.singletonList(sourcePath), + sourcePath.getDevice(), + this, + getGlobalTimeFilter(), + null); + + // used files should be added before mergeLock is unlocked, or they may be deleted by + // running merge + if (sharedQueryDataSource != null) { + ((QueryDataSource) sharedQueryDataSource).setSingleDevice(true); + List tsFileList = + ((QueryDataSource) sharedQueryDataSource).getSeqResources(); + if (tsFileList != null) { + for (TsFileResource tsFile : tsFileList) { + FileReaderManager.getInstance().increaseFileReaderReference(tsFile, tsFile.isClosed()); + } + } + tsFileList = ((QueryDataSource) sharedQueryDataSource).getUnseqResources(); + if (tsFileList != null) { + for (TsFileResource tsFile : tsFileList) { + FileReaderManager.getInstance().increaseFileReaderReference(tsFile, tsFile.isClosed()); + } + } + } + } finally { + dataRegion.readUnlock(); + } + } + + public void releaseSharedQueryDataSource() { + if (sharedQueryDataSource != null) { + List tsFileList = ((QueryDataSource) sharedQueryDataSource).getSeqResources(); + if (tsFileList != null) { + for (TsFileResource tsFile : tsFileList) { + FileReaderManager.getInstance().decreaseFileReaderReference(tsFile, tsFile.isClosed()); + } + } + tsFileList = ((QueryDataSource) sharedQueryDataSource).getUnseqResources(); + if (tsFileList != null) { + for (TsFileResource tsFile : tsFileList) { + FileReaderManager.getInstance().decreaseFileReaderReference(tsFile, tsFile.isClosed()); + } + } + sharedQueryDataSource = null; + } + } + + @Override + protected boolean checkIfModificationExists(TsFileResource tsFileResource) { + return false; + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceContext.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceContext.java index 7302e7f0b4538..5510394396d1c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceContext.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceContext.java @@ -19,34 +19,47 @@ package org.apache.iotdb.db.queryengine.execution.fragment; +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.exception.IoTDBException; +import org.apache.iotdb.commons.path.AlignedPath; import org.apache.iotdb.commons.path.PartialPath; +import org.apache.iotdb.commons.path.PatternTreeMap; import org.apache.iotdb.commons.utils.TestOnly; +import org.apache.iotdb.db.conf.IoTDBConfig; +import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.exception.query.QueryProcessException; import org.apache.iotdb.db.queryengine.common.DeviceContext; import org.apache.iotdb.db.queryengine.common.FragmentInstanceId; import org.apache.iotdb.db.queryengine.common.QueryId; import org.apache.iotdb.db.queryengine.common.SessionInfo; +import org.apache.iotdb.db.queryengine.metric.DriverSchedulerMetricSet; import org.apache.iotdb.db.queryengine.metric.QueryRelatedResourceMetricSet; import org.apache.iotdb.db.queryengine.metric.SeriesScanCostMetricSet; import org.apache.iotdb.db.queryengine.plan.planner.memory.MemoryReservationManager; import org.apache.iotdb.db.queryengine.plan.planner.memory.ThreadSafeMemoryReservationManager; import org.apache.iotdb.db.queryengine.plan.planner.plan.TimePredicate; +import org.apache.iotdb.db.storageengine.StorageEngine; +import org.apache.iotdb.db.storageengine.dataregion.DataRegion; import org.apache.iotdb.db.storageengine.dataregion.IDataRegionForQuery; +import org.apache.iotdb.db.storageengine.dataregion.modification.Modification; +import org.apache.iotdb.db.storageengine.dataregion.modification.ModificationFile; import org.apache.iotdb.db.storageengine.dataregion.read.IQueryDataSource; import org.apache.iotdb.db.storageengine.dataregion.read.QueryDataSource; import org.apache.iotdb.db.storageengine.dataregion.read.QueryDataSourceForRegionScan; import org.apache.iotdb.db.storageengine.dataregion.read.QueryDataSourceType; import org.apache.iotdb.db.storageengine.dataregion.read.control.FileReaderManager; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.utils.datastructure.PatternTreeMapFactory; +import org.apache.iotdb.db.utils.datastructure.TVList; import org.apache.iotdb.mpp.rpc.thrift.TFetchFragmentInstanceStatisticsResp; import org.apache.tsfile.file.metadata.IDeviceID; import org.apache.tsfile.read.filter.basic.Filter; +import org.apache.tsfile.utils.RamUsageEstimator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.time.ZoneId; -import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -57,26 +70,35 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; +import static org.apache.iotdb.db.queryengine.metric.DriverSchedulerMetricSet.BLOCK_QUEUED_TIME; +import static org.apache.iotdb.db.queryengine.metric.DriverSchedulerMetricSet.READY_QUEUED_TIME; +import static org.apache.iotdb.db.storageengine.dataregion.VirtualDataRegion.EMPTY_QUERY_DATA_SOURCE; + public class FragmentInstanceContext extends QueryContext { private static final Logger LOGGER = LoggerFactory.getLogger(FragmentInstanceContext.class); + private static final IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig(); private static final long END_TIME_INITIAL_VALUE = -1L; + // wait over 5s for driver to close is abnormal + private static final long LONG_WAIT_DURATION = 5_000_000_000L; private final FragmentInstanceId id; private final FragmentInstanceStateMachine stateMachine; private final MemoryReservationManager memoryReservationManager; - private IDataRegionForQuery dataRegion; + protected IDataRegionForQuery dataRegion; private Filter globalTimeFilter; // it will only be used once, after sharedQueryDataSource being inited, it will be set to null - private List sourcePaths; + protected List sourcePaths; + + private boolean singleSourcePath = false; // Used for region scan. private Map devicePathsToContext; // Shared by all scan operators in this fragment instance to avoid memory problem - private IQueryDataSource sharedQueryDataSource; + protected IQueryDataSource sharedQueryDataSource; /** closed tsfile used in this fragment instance. */ private Set closedFilePaths; @@ -91,6 +113,10 @@ public class FragmentInstanceContext extends QueryContext { // empty for zero time partitions private List timePartitions; + // An optimization during restart changes the time index from FILE TIME INDEX + // to DEVICE TIME INDEX, which may cause a related validation false positive. + private boolean ignoreNotExistsDevice = false; + private QueryDataSourceType queryDataSourceType = QueryDataSourceType.SERIES_SCAN; private final AtomicLong startNanos = new AtomicLong(); @@ -163,7 +189,7 @@ public static FragmentInstanceContext createFragmentInstanceContext( } public static FragmentInstanceContext createFragmentInstanceContextForCompaction(long queryId) { - return new FragmentInstanceContext(queryId); + return new FragmentInstanceContext(queryId, null, null, null); } public void setQueryDataSourceType(QueryDataSourceType queryDataSourceType) { @@ -181,6 +207,22 @@ public static FragmentInstanceContext createFragmentInstanceContext( return instanceContext; } + @TestOnly + public static FragmentInstanceContext createFragmentInstanceContext( + FragmentInstanceId id, + FragmentInstanceStateMachine stateMachine, + MemoryReservationManager memoryReservationManager) { + FragmentInstanceContext instanceContext = + new FragmentInstanceContext( + id, + stateMachine, + new SessionInfo(1, "test", ZoneId.systemDefault()), + memoryReservationManager); + instanceContext.initialize(); + instanceContext.start(); + return instanceContext; + } + private FragmentInstanceContext( FragmentInstanceId id, FragmentInstanceStateMachine stateMachine, @@ -213,6 +255,20 @@ private FragmentInstanceContext( new ThreadSafeMemoryReservationManager(id.getQueryId(), this.getClass().getName()); } + private FragmentInstanceContext( + FragmentInstanceId id, + FragmentInstanceStateMachine stateMachine, + SessionInfo sessionInfo, + MemoryReservationManager memoryReservationManager) { + this.id = id; + this.stateMachine = stateMachine; + this.executionEndTime.set(END_TIME_INITIAL_VALUE); + this.sessionInfo = sessionInfo; + this.dataNodeQueryContextMap = null; + this.dataNodeQueryContext = null; + this.memoryReservationManager = memoryReservationManager; + } + private FragmentInstanceContext( FragmentInstanceId id, FragmentInstanceStateMachine stateMachine, @@ -236,17 +292,24 @@ public void setDataRegion(IDataRegionForQuery dataRegion) { } // used for compaction - private FragmentInstanceContext(long queryId) { + protected FragmentInstanceContext( + long queryId, + MemoryReservationManager memoryReservationManager, + Filter timeFilter, + DataRegion dataRegion) { this.queryId = queryId; this.id = null; this.stateMachine = null; this.dataNodeQueryContextMap = null; this.dataNodeQueryContext = null; - this.memoryReservationManager = null; + this.dataRegion = dataRegion; + this.globalTimeFilter = timeFilter; + this.memoryReservationManager = memoryReservationManager; } public void start() { long now = System.currentTimeMillis(); + ignoreNotExistsDevice = !StorageEngine.getInstance().isReadyForNonReadWriteFunctions(); executionStartTime.compareAndSet(null, now); startNanos.compareAndSet(0, System.nanoTime()); @@ -254,6 +317,64 @@ public void start() { lastExecutionStartTime.set(now); } + @Override + protected boolean checkIfModificationExists(TsFileResource tsFileResource) { + if (isSingleSourcePath()) { + return tsFileResource.getModFile().exists(); + } + if (nonExistentModFiles.contains(tsFileResource.getTsFileID())) { + return false; + } + + ModificationFile modFile = tsFileResource.getModFile(); + if (!modFile.exists()) { + if (nonExistentModFiles.add(tsFileResource.getTsFileID()) + && memoryReservationManager != null) { + memoryReservationManager.reserveMemoryCumulatively(RamUsageEstimator.NUM_BYTES_OBJECT_REF); + } + return false; + } + return true; + } + + @Override + protected PatternTreeMap getAllModifications( + TsFileResource resource) { + if (isSingleSourcePath() || memoryReservationManager == null) { + return loadAllModificationsFromDisk(resource); + } + + AtomicReference> + atomicReference = new AtomicReference<>(); + PatternTreeMap cachedResult = + fileModCache.computeIfAbsent( + resource.getTsFileID(), + k -> { + PatternTreeMap allMods = + loadAllModificationsFromDisk(resource); + atomicReference.set(allMods); + if (cachedModEntriesSize.get() >= config.getModsCacheSizeLimitPerFI()) { + return null; + } + long memCost = + RamUsageEstimator.sizeOfObject(allMods) + + RamUsageEstimator.SHALLOW_SIZE_OF_CONCURRENT_HASHMAP_ENTRY; + long alreadyUsedMemoryForCachedModEntries = cachedModEntriesSize.get(); + while (alreadyUsedMemoryForCachedModEntries + memCost + < config.getModsCacheSizeLimitPerFI()) { + if (cachedModEntriesSize.compareAndSet( + alreadyUsedMemoryForCachedModEntries, + alreadyUsedMemoryForCachedModEntries + memCost)) { + memoryReservationManager.reserveMemoryCumulatively(memCost); + return allMods; + } + alreadyUsedMemoryForCachedModEntries = cachedModEntriesSize.get(); + } + return null; + }); + return cachedResult == null ? atomicReference.get() : cachedResult; + } + // the state change listener is added here in a separate initialize() method // instead of the constructor to prevent leaking the "this" reference to // another thread, which will cause unsafe publication of this instance. @@ -310,6 +431,18 @@ public List getFailureInfoList() { .collect(Collectors.toList()); } + public Optional getErrorCode() { + return stateMachine.getFailureCauses().stream() + .filter(IoTDBException.class::isInstance) + .findFirst() + .flatMap( + t -> { + TSStatus status = new TSStatus(((IoTDBException) t).getErrorCode()); + status.setMessage(t.getMessage()); + return Optional.of(status); + }); + } + public void finished() { stateMachine.finished(); } @@ -348,8 +481,19 @@ public void setDataNodeQueryContext(DataNodeQueryContext dataNodeQueryContext) { } public FragmentInstanceInfo getInstanceInfo() { - return new FragmentInstanceInfo( - stateMachine.getState(), getEndTime(), getFailedCause(), getFailureInfoList()); + return getErrorCode() + .map( + s -> + new FragmentInstanceInfo( + stateMachine.getState(), + getEndTime(), + getFailedCause(), + getFailureInfoList(), + s)) + .orElseGet( + () -> + new FragmentInstanceInfo( + stateMachine.getState(), getEndTime(), getFailedCause(), getFailureInfoList())); } public FragmentInstanceStateMachine getStateMachine() { @@ -374,6 +518,9 @@ public IDataRegionForQuery getDataRegion() { public void setSourcePaths(List sourcePaths) { this.sourcePaths = sourcePaths; + if (sourcePaths != null && sourcePaths.size() == 1) { + singleSourcePath = true; + } } public void setDevicePathsToContext(Map devicePathsToContext) { @@ -390,24 +537,37 @@ public void releaseMemoryReservationManager() { public void initQueryDataSource(List sourcePaths) throws QueryProcessException { long startTime = System.nanoTime(); - if (sourcePaths == null) { + if (sourcePaths == null || sourcePaths.isEmpty()) { + this.sharedQueryDataSource = EMPTY_QUERY_DATA_SOURCE; return; } + String singleDevice = null; + if (sourcePaths.size() == 1) { + singleDevice = sourcePaths.get(0).getDevice(); + } else { + Set selectedDeviceSet = new HashSet<>(); + for (PartialPath sourcePath : sourcePaths) { + if (sourcePath instanceof AlignedPath) { + singleDevice = null; + break; + } else { + singleDevice = sourcePath.getDevice(); + selectedDeviceSet.add(singleDevice); + if (selectedDeviceSet.size() > 1) { + singleDevice = null; + break; + } + } + } + } dataRegion.readLock(); try { - List pathList = new ArrayList<>(); - Set selectedDeviceIdSet = new HashSet<>(); - for (PartialPath path : sourcePaths) { - pathList.add(path); - selectedDeviceIdSet.add(path.getDevice()); - } - this.sharedQueryDataSource = dataRegion.query( - pathList, + sourcePaths, // when all the selected series are under the same device, the QueryDataSource will be // filtered according to timeIndex - selectedDeviceIdSet.size() == 1 ? selectedDeviceIdSet.iterator().next() : null, + singleDevice, this, // time filter may be stateful, so we need to copy it globalTimeFilter != null ? globalTimeFilter.copy() : null, @@ -419,7 +579,7 @@ public void initQueryDataSource(List sourcePaths) throws QueryProce closedFilePaths = new HashSet<>(); unClosedFilePaths = new HashSet<>(); addUsedFilesForQuery((QueryDataSource) sharedQueryDataSource); - ((QueryDataSource) sharedQueryDataSource).setSingleDevice(selectedDeviceIdSet.size() == 1); + ((QueryDataSource) sharedQueryDataSource).setSingleDevice(singleDevice != null); } } finally { setInitQueryDataSourceCost(System.nanoTime() - startTime); @@ -588,6 +748,7 @@ public void decrementNumOfUnClosedDriver() { @SuppressWarnings("squid:S2142") public void releaseResourceWhenAllDriversAreClosed() { + long startTime = System.nanoTime(); while (true) { try { allDriversClosed.await(); @@ -598,9 +759,49 @@ public void releaseResourceWhenAllDriversAreClosed() { "Interrupted when await on allDriversClosed, FragmentInstance Id is {}", this.getId()); } } + long duration = System.nanoTime() - startTime; + if (duration >= LONG_WAIT_DURATION) { + LOGGER.warn("Wait {}ms for all Drivers closed", duration / 1_000_000); + } releaseResource(); } + /** + * It checks all referenced TVList by the query: 1. If current is not the owner, just remove + * itself from query context list 2. If current query is the owner and no other query use it now, + * release the TVList 3. If current query is the owner and other queries still use it, set the + * next query as owner + */ + private void releaseTVListOwnedByQuery() { + for (TVList tvList : tvListSet) { + tvList.lockQueryList(); + Set queryContextSet = tvList.getQueryContextSet(); + try { + queryContextSet.remove(this); + if (tvList.getOwnerQuery() == this) { + if (queryContextSet.isEmpty()) { + LOGGER.debug( + "TVList {} is released by the query, FragmentInstance Id is {}", + tvList, + this.getId()); + memoryReservationManager.releaseMemoryCumulatively(tvList.calculateRamSize()); + tvList.clear(); + } else { + FragmentInstanceContext queryContext = + (FragmentInstanceContext) queryContextSet.iterator().next(); + LOGGER.debug( + "TVList {} is now owned by another query, FragmentInstance Id is {}", + tvList, + queryContext.getId()); + tvList.setOwnerQuery(queryContext); + } + } + } finally { + tvList.unlockQueryList(); + } + } + } + /** * All file paths used by this fragment instance must be cleared and thus the usage reference must * be decreased. @@ -621,14 +822,31 @@ public synchronized void releaseResource() { unClosedFilePaths = null; } + // release TVList/AlignedTVList owned by current query + releaseTVListOwnedByQuery(); + + fileModCache = null; + nonExistentModFiles = null; dataRegion = null; globalTimeFilter = null; sharedQueryDataSource = null; // record fragment instance execution time and metadata get time to metrics long durationTime = System.currentTimeMillis() - executionStartTime.get(); + DriverSchedulerMetricSet.getInstance() + .recordTaskQueueTime(BLOCK_QUEUED_TIME, blockQueueTime.get()); + DriverSchedulerMetricSet.getInstance() + .recordTaskQueueTime(READY_QUEUED_TIME, readyQueueTime.get()); + QueryRelatedResourceMetricSet.getInstance().updateFragmentInstanceTime(durationTime); + SeriesScanCostMetricSet.getInstance() + .recordBloomFilterMetrics( + getQueryStatistics().getLoadBloomFilterFromCacheCount().get(), + getQueryStatistics().getLoadBloomFilterFromDiskCount().get(), + getQueryStatistics().getLoadBloomFilterActualIOSize().get(), + getQueryStatistics().getLoadBloomFilterTime().get()); + SeriesScanCostMetricSet.getInstance() .recordNonAlignedTimeSeriesMetadataCount( getQueryStatistics().getLoadTimeSeriesMetadataDiskSeqCount().get(), @@ -654,6 +872,12 @@ public synchronized void releaseResource() { getQueryStatistics().getLoadTimeSeriesMetadataAlignedMemSeqTime().get(), getQueryStatistics().getLoadTimeSeriesMetadataAlignedMemUnSeqTime().get()); + SeriesScanCostMetricSet.getInstance() + .recordTimeSeriesMetadataMetrics( + getQueryStatistics().getLoadTimeSeriesMetadataFromCacheCount().get(), + getQueryStatistics().getLoadTimeSeriesMetadataFromDiskCount().get(), + getQueryStatistics().getLoadTimeSeriesMetadataActualIOSize().get()); + SeriesScanCostMetricSet.getInstance() .recordConstructChunkReadersCount( getQueryStatistics().getConstructAlignedChunkReadersMemCount().get(), @@ -667,6 +891,12 @@ public synchronized void releaseResource() { getQueryStatistics().getConstructNonAlignedChunkReadersMemTime().get(), getQueryStatistics().getConstructNonAlignedChunkReadersDiskTime().get()); + SeriesScanCostMetricSet.getInstance() + .recordChunkMetrics( + getQueryStatistics().getLoadChunkFromCacheCount().get(), + getQueryStatistics().getLoadChunkFromDiskCount().get(), + getQueryStatistics().getLoadChunkActualIOSize().get()); + SeriesScanCostMetricSet.getInstance() .recordPageReadersDecompressCount( getQueryStatistics().getPageReadersDecodeAlignedMemCount().get(), @@ -767,4 +997,12 @@ public long getClosedUnseqFileNum() { public long getUnclosedSeqFileNum() { return unclosedSeqFileNum; } + + public boolean ignoreNotExistsDevice() { + return ignoreNotExistsDevice; + } + + public boolean isSingleSourcePath() { + return singleSourcePath; + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceExecution.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceExecution.java index f631ea8a24683..9a825a055c41b 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceExecution.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceExecution.java @@ -117,11 +117,23 @@ public FragmentInstanceState getInstanceState() { } public FragmentInstanceInfo getInstanceInfo() { - return new FragmentInstanceInfo( - stateMachine.getState(), - context.getEndTime(), - context.getFailedCause(), - context.getFailureInfoList()); + return context + .getErrorCode() + .map( + s -> + new FragmentInstanceInfo( + stateMachine.getState(), + context.getEndTime(), + context.getFailedCause(), + context.getFailureInfoList(), + s)) + .orElseGet( + () -> + new FragmentInstanceInfo( + stateMachine.getState(), + context.getEndTime(), + context.getFailedCause(), + context.getFailureInfoList())); } public long getStartTime() { @@ -292,10 +304,16 @@ private void initialize(IDriverScheduler scheduler, boolean isExplainAnalyze) { staticsRemoved = true; statisticsLock.writeLock().unlock(); - clearShuffleSinkHandle(newState); - - // delete tmp file if exists - deleteTmpFile(); + // must clear shuffle sink handle before driver close + // because in failed state, if we can driver.close firstly, we will finally call + // sink.setNoMoreTsBlocks() which may mislead upstream that downstream normally ends + try { + clearShuffleSinkHandle(newState); + } catch (Throwable t) { + LOGGER.error( + "Errors occurred while attempting to release sink, potentially leading to resource leakage.", + t); + } // close the driver after sink is aborted or closed because in driver.close() it // will try to call ISink.setNoMoreTsBlocks() @@ -308,11 +326,33 @@ private void initialize(IDriverScheduler scheduler, boolean isExplainAnalyze) { // release file handlers context.releaseResourceWhenAllDriversAreClosed(); - // release memory - exchangeManager.deRegisterFragmentInstanceFromMemoryPool( - instanceId.getQueryId().getId(), instanceId.getFragmentInstanceId(), true); + try { + // delete tmp file if exists + deleteTmpFile(); + } catch (Throwable t) { + LOGGER.error( + "Errors occurred while attempting to delete tmp files, potentially leading to resource leakage.", + t); + } + + try { + // release memory + exchangeManager.deRegisterFragmentInstanceFromMemoryPool( + instanceId.getQueryId().getId(), instanceId.getFragmentInstanceId(), true); + } catch (Throwable t) { + LOGGER.error( + "Errors occurred while attempting to deRegister FI from Memory Pool, potentially leading to resource leakage, status is {}.", + newState, + t); + } - context.releaseMemoryReservationManager(); + try { + context.releaseMemoryReservationManager(); + } catch (Throwable t) { + LOGGER.error( + "Errors occurred while attempting to release memory, potentially leading to resource leakage.", + t); + } if (newState.isFailed()) { scheduler.abortFragmentInstance(instanceId); @@ -346,7 +386,7 @@ private void deleteTmpFile() { + File.separator; File tmpFile = new File(tmpFilePath); if (tmpFile.exists()) { - FileUtils.deleteFileOrDirectory(tmpFile); + FileUtils.deleteFileOrDirectory(tmpFile, true); } } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceInfo.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceInfo.java index 9c67d67de64b7..a544aebe6df53 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceInfo.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceInfo.java @@ -19,9 +19,11 @@ package org.apache.iotdb.db.queryengine.execution.fragment; +import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.consensus.common.DataSet; import java.util.List; +import java.util.Optional; public class FragmentInstanceInfo implements DataSet { private final FragmentInstanceState state; @@ -30,6 +32,8 @@ public class FragmentInstanceInfo implements DataSet { private List failureInfoList; + private TSStatus errorCode; + public FragmentInstanceInfo(FragmentInstanceState state) { this.state = state; } @@ -49,6 +53,18 @@ public FragmentInstanceInfo( this.failureInfoList = failureInfoList; } + public FragmentInstanceInfo( + FragmentInstanceState state, + long endTime, + String message, + List failureInfoList, + TSStatus errorStatus) { + this(state, endTime); + this.message = message; + this.failureInfoList = failureInfoList; + this.errorCode = errorStatus; + } + public FragmentInstanceState getState() { return state; } @@ -61,6 +77,14 @@ public String getMessage() { return message; } + public void setMessage(String message) { + this.message = message; + } + + public Optional getErrorCode() { + return Optional.ofNullable(errorCode); + } + public List getFailureInfoList() { return failureInfoList; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceManager.java index 900865dc88701..5cb43d86bc0df 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceManager.java @@ -19,10 +19,14 @@ package org.apache.iotdb.db.queryengine.execution.fragment; +import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory; import org.apache.iotdb.commons.concurrent.ThreadName; import org.apache.iotdb.commons.concurrent.threadpool.ScheduledExecutorUtil; +import org.apache.iotdb.commons.conf.CommonDescriptor; +import org.apache.iotdb.commons.exception.IoTDBException; import org.apache.iotdb.db.conf.IoTDBDescriptor; +import org.apache.iotdb.db.exception.query.QueryTimeoutRuntimeException; import org.apache.iotdb.db.queryengine.common.FragmentInstanceId; import org.apache.iotdb.db.queryengine.common.QueryId; import org.apache.iotdb.db.queryengine.execution.driver.IDriver; @@ -48,17 +52,19 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicLong; import static java.util.Objects.requireNonNull; import static org.apache.iotdb.db.queryengine.execution.fragment.FragmentInstanceContext.createFragmentInstanceContext; import static org.apache.iotdb.db.queryengine.execution.fragment.FragmentInstanceExecution.createFragmentInstanceExecution; +import static org.apache.iotdb.db.queryengine.execution.schedule.queue.IndexedBlockingQueue.TOO_MANY_CONCURRENT_QUERIES_ERROR_MSG; import static org.apache.iotdb.db.queryengine.metric.QueryExecutionMetricSet.LOCAL_EXECUTION_PLANNER; +import static org.apache.iotdb.rpc.TSStatusCode.TOO_MANY_CONCURRENT_QUERIES_ERROR; @SuppressWarnings("squid:S6548") public class FragmentInstanceManager { @@ -77,6 +83,7 @@ public class FragmentInstanceManager { private final Duration infoCacheTime; private final ExecutorService intoOperationExecutor; + private final ExecutorService modelInferenceExecutor; private final MPPDataExchangeManager exchangeManager = MPPDataExchangeService.getInstance().getMPPDataExchangeManager(); @@ -114,6 +121,11 @@ private FragmentInstanceManager() { IoTDBThreadPoolFactory.newFixedThreadPool( IoTDBDescriptor.getInstance().getConfig().getIntoOperationExecutionThreadCount(), "into-operation-executor"); + + this.modelInferenceExecutor = + IoTDBThreadPoolFactory.newFixedThreadPool( + CommonDescriptor.getInstance().getConfig().getModelInferenceExecutionThreadCount(), + "model-inference-executor"); } @SuppressWarnings("squid:S1181") @@ -178,8 +190,18 @@ public FragmentInstanceInfo execDataQueryFragmentInstance( exchangeManager); } catch (Throwable t) { clearFIRelatedResources(instanceId); - logger.warn("error when create FragmentInstanceExecution.", t); - stateMachine.failed(t); + // deal with + if (t instanceof IllegalStateException + && TOO_MANY_CONCURRENT_QUERIES_ERROR_MSG.equals(t.getMessage())) { + logger.warn(TOO_MANY_CONCURRENT_QUERIES_ERROR_MSG); + stateMachine.failed( + new IoTDBException( + TOO_MANY_CONCURRENT_QUERIES_ERROR_MSG, + TOO_MANY_CONCURRENT_QUERIES_ERROR.getStatusCode())); + } else { + logger.warn("error when create FragmentInstanceExecution.", t); + stateMachine.failed(t); + } return null; } }); @@ -259,8 +281,18 @@ public FragmentInstanceInfo execSchemaQueryFragmentInstance( exchangeManager); } catch (Throwable t) { clearFIRelatedResources(instanceId); - logger.warn("Execute error caused by ", t); - stateMachine.failed(t); + // deal with + if (t instanceof IllegalStateException + && TOO_MANY_CONCURRENT_QUERIES_ERROR_MSG.equals(t.getMessage())) { + logger.warn(TOO_MANY_CONCURRENT_QUERIES_ERROR_MSG); + stateMachine.failed( + new IoTDBException( + TOO_MANY_CONCURRENT_QUERIES_ERROR_MSG, + TOO_MANY_CONCURRENT_QUERIES_ERROR.getStatusCode())); + } else { + logger.warn("Execute error caused by ", t); + stateMachine.failed(t); + } return null; } }); @@ -350,11 +382,23 @@ public TFetchFragmentInstanceStatisticsResp getFragmentInstanceStatistics( private FragmentInstanceInfo createFailedInstanceInfo(FragmentInstanceId instanceId) { FragmentInstanceContext context = instanceContext.get(instanceId); - return new FragmentInstanceInfo( - FragmentInstanceState.FAILED, - context.getEndTime(), - context.getFailedCause(), - context.getFailureInfoList()); + Optional errorCode = context.getErrorCode(); + return errorCode + .map( + tsStatus -> + new FragmentInstanceInfo( + FragmentInstanceState.FAILED, + context.getEndTime(), + context.getFailedCause(), + context.getFailureInfoList(), + tsStatus)) + .orElseGet( + () -> + new FragmentInstanceInfo( + FragmentInstanceState.FAILED, + context.getEndTime(), + context.getFailedCause(), + context.getFailureInfoList())); } private void removeOldInstances() { @@ -377,8 +421,10 @@ private void cancelTimeoutFlushingInstances() { execution .getStateMachine() .failed( - new TimeoutException( - "Query has executed more than " + execution.getTimeoutInMs() + "ms")); + new QueryTimeoutRuntimeException( + "Query has executed more than " + + execution.getTimeoutInMs() + + "ms, and now is in flushing state")); } }); } @@ -387,6 +433,10 @@ public ExecutorService getIntoOperationExecutor() { return intoOperationExecutor; } + public ExecutorService getModelInferenceExecutor() { + return modelInferenceExecutor; + } + private static class InstanceHolder { private InstanceHolder() {} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceState.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceState.java index 77531e9449450..2bcb12544d950 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceState.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceState.java @@ -47,7 +47,7 @@ public enum FragmentInstanceState { /** Instance execution failed. */ FAILED(true, true), /** Instance is not found. */ - NO_SUCH_INSTANCE(false, false); + NO_SUCH_INSTANCE(true, true); public static final Set TERMINAL_INSTANCE_STATES = Stream.of(FragmentInstanceState.values()) diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/QueryContext.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/QueryContext.java index cbde4cca787ff..8d62e20776679 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/QueryContext.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/QueryContext.java @@ -29,16 +29,19 @@ import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import org.apache.iotdb.db.utils.datastructure.PatternTreeMapFactory; import org.apache.iotdb.db.utils.datastructure.PatternTreeMapFactory.ModsSerializer; +import org.apache.iotdb.db.utils.datastructure.TVList; import org.apache.tsfile.file.metadata.IDeviceID; import java.util.ArrayList; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArraySet; +import java.util.concurrent.atomic.AtomicLong; /** QueryContext contains the shared information with in a query. */ public class QueryContext { @@ -46,13 +49,15 @@ public class QueryContext { private QueryStatistics queryStatistics = new QueryStatistics(); /** - * The key is the path of a ModificationFile and the value is all Modifications in this file. We - * use this field because each call of Modification.getModifications() return a copy of the - * Modifications, and we do not want it to create multiple copies within a query. + * The key is TsFileID and the value is all Modifications in this file. We use this field because + * each call of Modification.getModifications() return a copy of the Modifications, and we do not + * want it to create multiple copies within a query. */ - private final Map> fileModCache = + protected Map> fileModCache = new ConcurrentHashMap<>(); + protected AtomicLong cachedModEntriesSize = new AtomicLong(0); + protected long queryId; private boolean debug; @@ -62,7 +67,10 @@ public class QueryContext { private volatile boolean isInterrupted = false; - private final Set nonExistentModFiles = new CopyOnWriteArraySet<>(); + protected Set nonExistentModFiles = new CopyOnWriteArraySet<>(); + + // referenced TVLists for the query + protected final Set tvListSet = new HashSet<>(); public QueryContext() {} @@ -78,7 +86,7 @@ public QueryContext(long queryId, boolean debug, long startTime, long timeout) { this.timeout = timeout; } - private boolean checkIfModificationExists(TsFileResource tsFileResource) { + protected boolean checkIfModificationExists(TsFileResource tsFileResource) { if (nonExistentModFiles.contains(tsFileResource.getTsFileID())) { return false; } @@ -91,18 +99,21 @@ private boolean checkIfModificationExists(TsFileResource tsFileResource) { return true; } - private PatternTreeMap getAllModifications( - ModificationFile modFile) { + protected PatternTreeMap getAllModifications( + TsFileResource resource) { return fileModCache.computeIfAbsent( - modFile.getFilePath(), - k -> { - PatternTreeMap modifications = - PatternTreeMapFactory.getModsPatternTreeMap(); - for (Modification modification : modFile.getModificationsIter()) { - modifications.append(modification.getPath(), modification); - } - return modifications; - }); + resource.getTsFileID(), k -> loadAllModificationsFromDisk(resource)); + } + + public PatternTreeMap loadAllModificationsFromDisk( + TsFileResource resource) { + PatternTreeMap modifications = + PatternTreeMapFactory.getModsPatternTreeMap(); + Iterable modEntryIterator = resource.getModFile().getModificationsIter(); + for (Modification modification : modEntryIterator) { + modifications.append(modification.getPath(), modification); + } + return modifications; } public List getPathModifications( @@ -114,20 +125,19 @@ public List getPathModifications( } return ModificationFile.sortAndMerge( - getAllModifications(tsFileResource.getModFile()) - .getOverlapped(new PartialPath(deviceID, measurement))); + getAllModifications(tsFileResource).getOverlapped(new PartialPath(deviceID, measurement))); } - public List getPathModifications(TsFileResource tsFileResource, IDeviceID deviceID) + public List getPathModifications( + PatternTreeMap fileMods, IDeviceID deviceID) throws IllegalPathException { - // if the mods file does not exist, do not add it to the cache - if (!checkIfModificationExists(tsFileResource)) { + if (fileMods == null) { return Collections.emptyList(); } return ModificationFile.sortAndMerge( - getAllModifications(tsFileResource.getModFile()) - .getDeviceOverlapped(new PartialPath(deviceID))); + fileMods.getOverlapped( + new PartialPath(deviceID).concatAsMeasurementPath(AlignedPath.VECTOR_PLACEHOLDER))); } /** @@ -140,8 +150,15 @@ public List getPathModifications(TsFileResource tsFileResource, Pa return Collections.emptyList(); } - return ModificationFile.sortAndMerge( - getAllModifications(tsFileResource.getModFile()).getOverlapped(path)); + return getPathModifications(getAllModifications(tsFileResource), path); + } + + public List getPathModifications( + PatternTreeMap fileMods, PartialPath path) { + if (fileMods == null) { + return Collections.emptyList(); + } + return ModificationFile.sortAndMerge(fileMods.getOverlapped(path)); } /** @@ -199,4 +216,8 @@ public QueryStatistics getQueryStatistics() { public void setQueryStatistics(QueryStatistics queryStatistics) { this.queryStatistics = queryStatistics; } + + public void addTVListToSet(Map tvListMap) { + tvListSet.addAll(tvListMap.keySet()); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/QueryStatistics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/QueryStatistics.java index b996569a69dbc..b1ddfdbf4d747 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/QueryStatistics.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/QueryStatistics.java @@ -29,6 +29,11 @@ */ public class QueryStatistics { + private final AtomicLong loadBloomFilterFromCacheCount = new AtomicLong(0); + private final AtomicLong loadBloomFilterFromDiskCount = new AtomicLong(0); + private final AtomicLong loadBloomFilterActualIOSize = new AtomicLong(0); + private final AtomicLong loadBloomFilterTime = new AtomicLong(0); + // statistics for count and time of load timeseriesmetadata private final AtomicLong loadTimeSeriesMetadataDiskSeqCount = new AtomicLong(0); private final AtomicLong loadTimeSeriesMetadataDiskUnSeqCount = new AtomicLong(0); @@ -48,6 +53,10 @@ public class QueryStatistics { private final AtomicLong loadTimeSeriesMetadataAlignedMemSeqTime = new AtomicLong(0); private final AtomicLong loadTimeSeriesMetadataAlignedMemUnSeqTime = new AtomicLong(0); + private final AtomicLong loadTimeSeriesMetadataFromCacheCount = new AtomicLong(0); + private final AtomicLong loadTimeSeriesMetadataFromDiskCount = new AtomicLong(0); + private final AtomicLong loadTimeSeriesMetadataActualIOSize = new AtomicLong(0); + // statistics for count and time of construct chunk readers(disk io and decompress) private final AtomicLong constructNonAlignedChunkReadersDiskCount = new AtomicLong(0); private final AtomicLong constructNonAlignedChunkReadersMemCount = new AtomicLong(0); @@ -59,6 +68,10 @@ public class QueryStatistics { private final AtomicLong constructAlignedChunkReadersDiskTime = new AtomicLong(0); private final AtomicLong constructAlignedChunkReadersMemTime = new AtomicLong(0); + private final AtomicLong loadChunkFromCacheCount = new AtomicLong(0); + private final AtomicLong loadChunkFromDiskCount = new AtomicLong(0); + private final AtomicLong loadChunkActualIOSize = new AtomicLong(0); + // statistics for count and time of page decode private final AtomicLong pageReadersDecodeAlignedDiskCount = new AtomicLong(0); private final AtomicLong pageReadersDecodeAlignedDiskTime = new AtomicLong(0); @@ -225,6 +238,46 @@ public AtomicLong getPageReaderMaxUsedMemorySize() { return pageReaderMaxUsedMemorySize; } + public AtomicLong getLoadBloomFilterActualIOSize() { + return loadBloomFilterActualIOSize; + } + + public AtomicLong getLoadBloomFilterFromCacheCount() { + return loadBloomFilterFromCacheCount; + } + + public AtomicLong getLoadBloomFilterFromDiskCount() { + return loadBloomFilterFromDiskCount; + } + + public AtomicLong getLoadBloomFilterTime() { + return loadBloomFilterTime; + } + + public AtomicLong getLoadChunkActualIOSize() { + return loadChunkActualIOSize; + } + + public AtomicLong getLoadChunkFromCacheCount() { + return loadChunkFromCacheCount; + } + + public AtomicLong getLoadChunkFromDiskCount() { + return loadChunkFromDiskCount; + } + + public AtomicLong getLoadTimeSeriesMetadataActualIOSize() { + return loadTimeSeriesMetadataActualIOSize; + } + + public AtomicLong getLoadTimeSeriesMetadataFromCacheCount() { + return loadTimeSeriesMetadataFromCacheCount; + } + + public AtomicLong getLoadTimeSeriesMetadataFromDiskCount() { + return loadTimeSeriesMetadataFromDiskCount; + } + public TQueryStatistics toThrift() { return new TQueryStatistics( loadTimeSeriesMetadataDiskSeqCount.get(), @@ -263,6 +316,16 @@ public TQueryStatistics toThrift() { alignedTimeSeriesMetadataModificationCount.get(), alignedTimeSeriesMetadataModificationTime.get(), nonAlignedTimeSeriesMetadataModificationCount.get(), - nonAlignedTimeSeriesMetadataModificationTime.get()); + nonAlignedTimeSeriesMetadataModificationTime.get(), + loadBloomFilterFromCacheCount.get(), + loadBloomFilterFromDiskCount.get(), + loadBloomFilterActualIOSize.get(), + loadBloomFilterTime.get(), + loadTimeSeriesMetadataFromCacheCount.get(), + loadTimeSeriesMetadataFromDiskCount.get(), + loadTimeSeriesMetadataActualIOSize.get(), + loadChunkFromCacheCount.get(), + loadChunkFromDiskCount.get(), + loadChunkActualIOSize.get()); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/load/LoadTsFileRateLimiter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/load/LoadTsFileRateLimiter.java deleted file mode 100644 index 9876256e916e4..0000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/load/LoadTsFileRateLimiter.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.queryengine.execution.load; - -import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.db.conf.IoTDBConfig; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.queryengine.metric.load.LoadTsFileCostMetricsSet; - -import com.google.common.util.concurrent.AtomicDouble; -import com.google.common.util.concurrent.RateLimiter; - -import java.util.concurrent.TimeUnit; - -public class LoadTsFileRateLimiter { - - private static final IoTDBConfig CONFIG = IoTDBDescriptor.getInstance().getConfig(); - - private final AtomicDouble throughputBytesPerSecond = - new AtomicDouble(CONFIG.getLoadWriteThroughputBytesPerSecond()); - private final RateLimiter loadWriteRateLimiter; - - public void acquire(long bytes) { - LoadTsFileCostMetricsSet.getInstance().recordDiskIO(bytes); - - if (reloadParams()) { - return; - } - - while (bytes > 0) { - if (bytes > Integer.MAX_VALUE) { - tryAcquireWithRateCheck(Integer.MAX_VALUE); - bytes -= Integer.MAX_VALUE; - } else { - tryAcquireWithRateCheck((int) bytes); - return; - } - } - } - - private void tryAcquireWithRateCheck(final int bytes) { - while (!loadWriteRateLimiter.tryAcquire( - bytes, - PipeConfig.getInstance().getRateLimiterHotReloadCheckIntervalMs(), - TimeUnit.MILLISECONDS)) { - if (reloadParams()) { - return; - } - } - } - - private boolean reloadParams() { - final double throughputBytesPerSecondLimit = CONFIG.getLoadWriteThroughputBytesPerSecond(); - - if (throughputBytesPerSecond.get() != throughputBytesPerSecondLimit) { - throughputBytesPerSecond.set(throughputBytesPerSecondLimit); - loadWriteRateLimiter.setRate( - // if throughput <= 0, disable rate limiting - throughputBytesPerSecondLimit <= 0 ? Double.MAX_VALUE : throughputBytesPerSecondLimit); - } - - // For performance, we don't need to acquire rate limiter if throughput <= 0 - return throughputBytesPerSecondLimit <= 0; - } - - //////////////////////////// Singleton //////////////////////////// - - private LoadTsFileRateLimiter() { - final double throughputBytesPerSecondLimit = throughputBytesPerSecond.get(); - loadWriteRateLimiter = - // if throughput <= 0, disable rate limiting - throughputBytesPerSecondLimit <= 0 - ? RateLimiter.create(Double.MAX_VALUE) - : RateLimiter.create(throughputBytesPerSecondLimit); - } - - private static class LoadTsFileRateLimiterHolder { - - private static final LoadTsFileRateLimiter INSTANCE = new LoadTsFileRateLimiter(); - - private LoadTsFileRateLimiterHolder() { - // Prevent instantiation - } - } - - public static LoadTsFileRateLimiter getInstance() { - return LoadTsFileRateLimiterHolder.INSTANCE; - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/load/TsFileSplitter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/load/TsFileSplitter.java deleted file mode 100644 index 8ddec6c622f16..0000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/load/TsFileSplitter.java +++ /dev/null @@ -1,500 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.queryengine.execution.load; - -import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; -import org.apache.iotdb.commons.utils.TimePartitionUtils; -import org.apache.iotdb.db.storageengine.dataregion.modification.Deletion; -import org.apache.iotdb.db.storageengine.dataregion.modification.Modification; -import org.apache.iotdb.db.storageengine.dataregion.modification.ModificationFile; - -import org.apache.tsfile.common.conf.TSFileConfig; -import org.apache.tsfile.common.conf.TSFileDescriptor; -import org.apache.tsfile.common.constant.TsFileConstant; -import org.apache.tsfile.encoding.decoder.Decoder; -import org.apache.tsfile.enums.TSDataType; -import org.apache.tsfile.exception.TsFileRuntimeException; -import org.apache.tsfile.file.MetaMarker; -import org.apache.tsfile.file.header.ChunkGroupHeader; -import org.apache.tsfile.file.header.ChunkHeader; -import org.apache.tsfile.file.header.PageHeader; -import org.apache.tsfile.file.metadata.IChunkMetadata; -import org.apache.tsfile.file.metadata.IDeviceID; -import org.apache.tsfile.file.metadata.PlainDeviceID; -import org.apache.tsfile.file.metadata.TimeseriesMetadata; -import org.apache.tsfile.file.metadata.enums.TSEncoding; -import org.apache.tsfile.read.TsFileSequenceReader; -import org.apache.tsfile.read.common.BatchData; -import org.apache.tsfile.read.reader.page.PageReader; -import org.apache.tsfile.read.reader.page.TimePageReader; -import org.apache.tsfile.read.reader.page.ValuePageReader; -import org.apache.tsfile.utils.Pair; -import org.apache.tsfile.utils.TsPrimitiveType; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; -import java.util.function.Function; - -public class TsFileSplitter { - private static final Logger logger = LoggerFactory.getLogger(TsFileSplitter.class); - - private final File tsFile; - private final Function consumer; - - public TsFileSplitter(File tsFile, Function consumer) { - this.tsFile = tsFile; - this.consumer = consumer; - } - - @SuppressWarnings({"squid:S3776", "squid:S6541"}) - public void splitTsFileByDataPartition() throws IOException, IllegalStateException { - try (TsFileSequenceReader reader = new TsFileSequenceReader(tsFile.getAbsolutePath())) { - TreeMap> offset2Deletions = new TreeMap<>(); - getAllModification(offset2Deletions); - - if (!checkMagic(reader)) { - throw new TsFileRuntimeException( - String.format("Magic String check error when parsing TsFile %s.", tsFile.getPath())); - } - - reader.position((long) TSFileConfig.MAGIC_STRING.getBytes().length + 1); - IDeviceID curDevice = null; - boolean isTimeChunkNeedDecode = true; - Map> pageIndex2ChunkData = new HashMap<>(); - Map pageIndex2Times = null; - Map offset2ChunkMetadata = new HashMap<>(); - getChunkMetadata(reader, offset2ChunkMetadata); - byte marker; - while ((marker = reader.readMarker()) != MetaMarker.SEPARATOR) { - switch (marker) { - case MetaMarker.CHUNK_HEADER: - case MetaMarker.TIME_CHUNK_HEADER: - case MetaMarker.ONLY_ONE_PAGE_CHUNK_HEADER: - case MetaMarker.ONLY_ONE_PAGE_TIME_CHUNK_HEADER: - long chunkOffset = reader.position(); - consumeAllAlignedChunkData(chunkOffset, pageIndex2ChunkData); - handleModification(offset2Deletions, chunkOffset); - - ChunkHeader header = reader.readChunkHeader(marker); - String measurementId = header.getMeasurementID(); - if (header.getDataSize() == 0) { - throw new TsFileRuntimeException( - String.format( - "Empty Nonaligned Chunk or Time Chunk with offset %d in TsFile %s.", - chunkOffset, tsFile.getPath())); - } - - boolean isAligned = - ((header.getChunkType() & TsFileConstant.TIME_COLUMN_MASK) - == TsFileConstant.TIME_COLUMN_MASK); - IChunkMetadata chunkMetadata = offset2ChunkMetadata.get(chunkOffset - Byte.BYTES); - // When loading TsFile with Chunk in data zone but no matched ChunkMetadata - // at the end of file, this Chunk needs to be skipped. - if (chunkMetadata == null) { - reader.readChunk(-1, header.getDataSize()); - break; - } - TTimePartitionSlot timePartitionSlot = - TimePartitionUtils.getTimePartitionSlot(chunkMetadata.getStartTime()); - ChunkData chunkData = - ChunkData.createChunkData( - isAligned, ((PlainDeviceID) curDevice).toStringID(), header, timePartitionSlot); - - if (!needDecodeChunk(chunkMetadata)) { - chunkData.setNotDecode(); - chunkData.writeEntireChunk(reader.readChunk(-1, header.getDataSize()), chunkMetadata); - if (isAligned) { - isTimeChunkNeedDecode = false; - pageIndex2ChunkData - .computeIfAbsent(1, o -> new ArrayList<>()) - .add((AlignedChunkData) chunkData); - } else { - consumeChunkData(measurementId, chunkOffset, chunkData); - } - break; - } - - Decoder defaultTimeDecoder = - Decoder.getDecoderByType( - TSEncoding.valueOf(TSFileDescriptor.getInstance().getConfig().getTimeEncoder()), - TSDataType.INT64); - Decoder valueDecoder = - Decoder.getDecoderByType(header.getEncodingType(), header.getDataType()); - int dataSize = header.getDataSize(); - int pageIndex = 0; - if (isAligned) { - isTimeChunkNeedDecode = true; - pageIndex2Times = new HashMap<>(); - } - - while (dataSize > 0) { - PageHeader pageHeader = - reader.readPageHeader( - header.getDataType(), - (header.getChunkType() & 0x3F) == MetaMarker.CHUNK_HEADER); - long pageDataSize = pageHeader.getSerializedPageSize(); - if (!needDecodePage(pageHeader, chunkMetadata)) { // an entire page - long startTime = - pageHeader.getStatistics() == null - ? chunkMetadata.getStartTime() - : pageHeader.getStartTime(); - TTimePartitionSlot pageTimePartitionSlot = - TimePartitionUtils.getTimePartitionSlot(startTime); - if (!timePartitionSlot.equals(pageTimePartitionSlot)) { - if (!isAligned) { - consumeChunkData(measurementId, chunkOffset, chunkData); - } - timePartitionSlot = pageTimePartitionSlot; - chunkData = - ChunkData.createChunkData( - isAligned, - ((PlainDeviceID) curDevice).toStringID(), - header, - timePartitionSlot); - } - if (isAligned) { - pageIndex2ChunkData - .computeIfAbsent(pageIndex, o -> new ArrayList<>()) - .add((AlignedChunkData) chunkData); - } - chunkData.writeEntirePage(pageHeader, reader.readCompressedPage(pageHeader)); - } else { // split page - ByteBuffer pageData = reader.readPage(pageHeader, header.getCompressionType()); - Pair tvArray = - decodePage( - isAligned, pageData, pageHeader, defaultTimeDecoder, valueDecoder, header); - long[] times = tvArray.left; - Object[] values = tvArray.right; - if (isAligned) { - pageIndex2Times.put(pageIndex, times); - } - - int satisfiedLength = 0; - long endTime = - timePartitionSlot.getStartTime() - + TimePartitionUtils.getTimePartitionInterval(); - for (int i = 0; i < times.length; i++) { - if (times[i] >= endTime) { - chunkData.writeDecodePage(times, values, satisfiedLength); - if (isAligned) { - pageIndex2ChunkData - .computeIfAbsent(pageIndex, o -> new ArrayList<>()) - .add((AlignedChunkData) chunkData); - } else { - consumeChunkData(measurementId, chunkOffset, chunkData); - } - - timePartitionSlot = TimePartitionUtils.getTimePartitionSlot(times[i]); - satisfiedLength = 0; - endTime = - timePartitionSlot.getStartTime() - + TimePartitionUtils.getTimePartitionInterval(); - chunkData = - ChunkData.createChunkData( - isAligned, - ((PlainDeviceID) curDevice).toStringID(), - header, - timePartitionSlot); - } - satisfiedLength += 1; - } - chunkData.writeDecodePage(times, values, satisfiedLength); - if (isAligned) { - pageIndex2ChunkData - .computeIfAbsent(pageIndex, o -> new ArrayList<>()) - .add((AlignedChunkData) chunkData); - } - } - - pageIndex += 1; - dataSize -= pageDataSize; - } - - if (!isAligned) { - consumeChunkData(measurementId, chunkOffset, chunkData); - } - break; - case MetaMarker.VALUE_CHUNK_HEADER: - case MetaMarker.ONLY_ONE_PAGE_VALUE_CHUNK_HEADER: - chunkOffset = reader.position(); - chunkMetadata = offset2ChunkMetadata.get(chunkOffset - Byte.BYTES); - header = reader.readChunkHeader(marker); - // When loading TsFile with Chunk in data zone but no matched ChunkMetadata - // at the end of file, this Chunk needs to be skipped. - if (chunkMetadata == null) { - reader.readChunk(-1, header.getDataSize()); - break; - } - if (header.getDataSize() == 0) { - handleEmptyValueChunk( - header, pageIndex2ChunkData, chunkMetadata, isTimeChunkNeedDecode); - break; - } - - if (!isTimeChunkNeedDecode) { - AlignedChunkData alignedChunkData = pageIndex2ChunkData.get(1).get(0); - alignedChunkData.addValueChunk(header); - alignedChunkData.writeEntireChunk( - reader.readChunk(-1, header.getDataSize()), chunkMetadata); - break; - } - - Set allChunkData = new HashSet<>(); - dataSize = header.getDataSize(); - pageIndex = 0; - valueDecoder = Decoder.getDecoderByType(header.getEncodingType(), header.getDataType()); - - while (dataSize > 0) { - PageHeader pageHeader = - reader.readPageHeader( - header.getDataType(), - (header.getChunkType() & 0x3F) == MetaMarker.CHUNK_HEADER); - List alignedChunkDataList = pageIndex2ChunkData.get(pageIndex); - for (AlignedChunkData alignedChunkData : alignedChunkDataList) { - if (!allChunkData.contains(alignedChunkData)) { - alignedChunkData.addValueChunk(header); - allChunkData.add(alignedChunkData); - } - } - if (alignedChunkDataList.size() == 1) { // write entire page - // write the entire page if it's not an empty page. - alignedChunkDataList - .get(0) - .writeEntirePage(pageHeader, reader.readCompressedPage(pageHeader)); - } else { // decode page - long[] times = pageIndex2Times.get(pageIndex); - TsPrimitiveType[] values = - decodeValuePage(reader, header, pageHeader, times, valueDecoder); - for (AlignedChunkData alignedChunkData : alignedChunkDataList) { - alignedChunkData.writeDecodeValuePage(times, values, header.getDataType()); - } - } - long pageDataSize = pageHeader.getSerializedPageSize(); - pageIndex += 1; - dataSize -= pageDataSize; - } - break; - case MetaMarker.CHUNK_GROUP_HEADER: - ChunkGroupHeader chunkGroupHeader = reader.readChunkGroupHeader(); - curDevice = chunkGroupHeader.getDeviceID(); - break; - case MetaMarker.OPERATION_INDEX_RANGE: - reader.readPlanIndex(); - break; - default: - MetaMarker.handleUnexpectedMarker(marker); - } - } - - consumeAllAlignedChunkData(reader.position(), pageIndex2ChunkData); - handleModification(offset2Deletions, Long.MAX_VALUE); - } - } - - private void getAllModification(Map> offset2Deletions) throws IOException { - try (ModificationFile modificationFile = - new ModificationFile(tsFile.getAbsolutePath() + ModificationFile.FILE_SUFFIX)) { - for (Modification modification : modificationFile.getModifications()) { - offset2Deletions - .computeIfAbsent(modification.getFileOffset(), o -> new ArrayList<>()) - .add((Deletion) modification); - } - } - } - - private boolean checkMagic(TsFileSequenceReader reader) throws IOException { - String magic = reader.readHeadMagic(); - if (!magic.equals(TSFileConfig.MAGIC_STRING)) { - logger.error("the file's MAGIC STRING is incorrect, file path: {}", reader.getFileName()); - return false; - } - - byte versionNumber = reader.readVersionNumber(); - if (versionNumber != TSFileConfig.VERSION_NUMBER) { - logger.error("the file's Version Number is incorrect, file path: {}", reader.getFileName()); - return false; - } - - if (!reader.readTailMagic().equals(TSFileConfig.MAGIC_STRING)) { - logger.error("the file is not closed correctly, file path: {}", reader.getFileName()); - return false; - } - return true; - } - - private void getChunkMetadata( - TsFileSequenceReader reader, Map offset2ChunkMetadata) - throws IOException { - Map> device2Metadata = - reader.getAllTimeseriesMetadata(true); - for (Map.Entry> entry : device2Metadata.entrySet()) { - for (TimeseriesMetadata timeseriesMetadata : entry.getValue()) { - for (IChunkMetadata chunkMetadata : timeseriesMetadata.getChunkMetadataList()) { - offset2ChunkMetadata.put(chunkMetadata.getOffsetOfChunkHeader(), chunkMetadata); - } - } - } - } - - private void handleModification( - TreeMap> offset2Deletions, long chunkOffset) { - while (!offset2Deletions.isEmpty() && offset2Deletions.firstEntry().getKey() <= chunkOffset) { - offset2Deletions - .pollFirstEntry() - .getValue() - .forEach(o -> consumer.apply(new DeletionData(o))); - } - } - - private void consumeAllAlignedChunkData( - long offset, Map> pageIndex2ChunkData) { - if (pageIndex2ChunkData.isEmpty()) { - return; - } - - Set allChunkData = new HashSet<>(); - for (Map.Entry> entry : pageIndex2ChunkData.entrySet()) { - allChunkData.addAll(entry.getValue()); - } - for (ChunkData chunkData : allChunkData) { - if (Boolean.FALSE.equals(consumer.apply(chunkData))) { - throw new IllegalStateException( - String.format( - "Consume aligned chunk data error, next chunk offset: %d, chunkData: %s", - offset, chunkData)); - } - } - pageIndex2ChunkData.clear(); - } - - private void consumeChunkData(String measurement, long offset, ChunkData chunkData) { - if (Boolean.FALSE.equals(consumer.apply(chunkData))) { - throw new IllegalStateException( - String.format( - "Consume chunkData error, chunk offset: %d, measurement: %s, chunkData: %s", - offset, measurement, chunkData)); - } - } - - private boolean needDecodeChunk(IChunkMetadata chunkMetadata) { - return !TimePartitionUtils.getTimePartitionSlot(chunkMetadata.getStartTime()) - .equals(TimePartitionUtils.getTimePartitionSlot(chunkMetadata.getEndTime())); - } - - private boolean needDecodePage(PageHeader pageHeader, IChunkMetadata chunkMetadata) { - if (pageHeader.getStatistics() == null) { - return !TimePartitionUtils.getTimePartitionSlot(chunkMetadata.getStartTime()) - .equals(TimePartitionUtils.getTimePartitionSlot(chunkMetadata.getEndTime())); - } - return !TimePartitionUtils.getTimePartitionSlot(pageHeader.getStartTime()) - .equals(TimePartitionUtils.getTimePartitionSlot(pageHeader.getEndTime())); - } - - private Pair decodePage( - boolean isAligned, - ByteBuffer pageData, - PageHeader pageHeader, - Decoder timeDecoder, - Decoder valueDecoder, - ChunkHeader chunkHeader) - throws IOException { - if (isAligned) { - TimePageReader timePageReader = new TimePageReader(pageHeader, pageData, timeDecoder); - long[] times = timePageReader.getNextTimeBatch(); - return new Pair<>(times, new Object[times.length]); - } - - valueDecoder.reset(); - PageReader pageReader = - new PageReader(pageData, chunkHeader.getDataType(), valueDecoder, timeDecoder); - BatchData batchData = pageReader.getAllSatisfiedPageData(); - long[] times = new long[batchData.length()]; - Object[] values = new Object[batchData.length()]; - int index = 0; - while (batchData.hasCurrent()) { - times[index] = batchData.currentTime(); - values[index++] = batchData.currentValue(); - batchData.next(); - } - return new Pair<>(times, values); - } - - private void handleEmptyValueChunk( - ChunkHeader header, - Map> pageIndex2ChunkData, - IChunkMetadata chunkMetadata, - boolean isTimeChunkNeedDecode) - throws IOException { - Set allChunkData = new HashSet<>(); - for (Map.Entry> entry : pageIndex2ChunkData.entrySet()) { - for (AlignedChunkData alignedChunkData : entry.getValue()) { - if (!allChunkData.contains(alignedChunkData)) { - alignedChunkData.addValueChunk(header); - if (!isTimeChunkNeedDecode) { - alignedChunkData.writeEntireChunk(ByteBuffer.allocate(0), chunkMetadata); - } - allChunkData.add(alignedChunkData); - } - } - } - } - - /** - * handle empty page in aligned chunk, if uncompressedSize and compressedSize are both 0, and the - * statistics is null, then the page is empty. - * - * @param pageHeader page header - * @return true if the page is empty - */ - private boolean isEmptyPage(PageHeader pageHeader) { - return pageHeader.getUncompressedSize() == 0 - && pageHeader.getCompressedSize() == 0 - && pageHeader.getStatistics() == null; - } - - private TsPrimitiveType[] decodeValuePage( - TsFileSequenceReader reader, - ChunkHeader chunkHeader, - PageHeader pageHeader, - long[] times, - Decoder valueDecoder) - throws IOException { - if (pageHeader.getSerializedPageSize() == 0) { - return new TsPrimitiveType[times.length]; - } - - valueDecoder.reset(); - ByteBuffer pageData = reader.readPage(pageHeader, chunkHeader.getCompressionType()); - ValuePageReader valuePageReader = - new ValuePageReader(pageHeader, pageData, chunkHeader.getDataType(), valueDecoder); - return valuePageReader.nextValueBatch( - times); // should be origin time, so recording satisfied length is necessary - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/AbstractOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/AbstractOperator.java index 44120ba1d12a2..e5d23d88d8560 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/AbstractOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/AbstractOperator.java @@ -40,9 +40,12 @@ public void initializeMaxTsBlockLength(TsBlock tsBlock) { if (maxTupleSizeOfTsBlock != -1) { return; } + // oneTupleSize should be greater than 0 to avoid division by zero long oneTupleSize = - (tsBlock.getRetainedSizeInBytes() - tsBlock.getTotalInstanceSize()) - / tsBlock.getPositionCount(); + Math.max( + 1, + (tsBlock.getSizeInBytes() - tsBlock.getTotalInstanceSize()) + / tsBlock.getPositionCount()); if (oneTupleSize > maxReturnSize) { // make sure at least one-tuple-at-a-time this.maxTupleSizeOfTsBlock = 1; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/AggregationUtil.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/AggregationUtil.java index 96c36fe8fcd9d..8340a0ca40726 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/AggregationUtil.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/AggregationUtil.java @@ -46,6 +46,7 @@ import org.apache.tsfile.read.common.block.column.TimeColumnBuilder; import org.apache.tsfile.utils.Pair; +import java.time.ZoneId; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; @@ -72,7 +73,8 @@ private AggregationUtil() { public static ITimeRangeIterator initTimeRangeIterator( GroupByTimeParameter groupByTimeParameter, boolean ascending, - boolean outputPartialTimeWindow) { + boolean outputPartialTimeWindow, + ZoneId zoneId) { if (groupByTimeParameter == null) { return new SingleTimeWindowIterator(Long.MIN_VALUE, Long.MAX_VALUE); } else { @@ -83,7 +85,8 @@ public static ITimeRangeIterator initTimeRangeIterator( groupByTimeParameter.getSlidingStep(), ascending, groupByTimeParameter.isLeftCRightO(), - outputPartialTimeWindow); + outputPartialTimeWindow, + zoneId); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/AggregationMergeSortOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/AggregationMergeSortOperator.java index cd1d533202836..f67975d84fbd6 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/AggregationMergeSortOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/AggregationMergeSortOperator.java @@ -147,7 +147,7 @@ public TsBlock next() throws Exception { outputResultToTsBlock(); } - return tsBlockBuilder.build(); + return tsBlockBuilder.getPositionCount() > 0 ? tsBlockBuilder.build() : null; } private void outputResultToTsBlock() { @@ -160,6 +160,7 @@ private void outputResultToTsBlock() { } tsBlockBuilder.declarePosition(); accumulators.forEach(Accumulator::reset); + lastDevice = null; } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/SlidingWindowAggregationOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/SlidingWindowAggregationOperator.java index 9a1de9522af5e..c7e5769d6752e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/SlidingWindowAggregationOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/SlidingWindowAggregationOperator.java @@ -32,6 +32,7 @@ import org.apache.tsfile.read.common.block.TsBlockBuilder; import org.apache.tsfile.utils.RamUsageEstimator; +import java.time.ZoneId; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -61,7 +62,8 @@ public SlidingWindowAggregationOperator( boolean ascending, boolean outputEndTime, GroupByTimeParameter groupByTimeParameter, - long maxReturnSize) { + long maxReturnSize, + ZoneId zoneId) { super(operatorContext, aggregators, child, ascending, maxReturnSize); checkArgument( groupByTimeParameter != null, @@ -78,7 +80,8 @@ public SlidingWindowAggregationOperator( this.timeRangeIterator = timeRangeIterator; this.outputEndTime = outputEndTime; - this.subTimeRangeIterator = initTimeRangeIterator(groupByTimeParameter, ascending, true); + this.subTimeRangeIterator = + initTimeRangeIterator(groupByTimeParameter, ascending, true, zoneId); } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/SortOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/SortOperator.java index 55453353f0dea..834429cd1c3dd 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/SortOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/SortOperator.java @@ -138,7 +138,7 @@ public TsBlock next() throws Exception { if (tsBlock == null) { return null; } - dataSize += tsBlock.getRetainedSizeInBytes(); + dataSize += tsBlock.getSizeInBytes(); cacheTsBlock(tsBlock); } catch (IoTDBException e) { clear(); @@ -184,7 +184,7 @@ private void prepareSortReaders() throws IoTDBException { } private void cacheTsBlock(TsBlock tsBlock) throws IoTDBException { - long bytesSize = tsBlock.getRetainedSizeInBytes(); + long bytesSize = tsBlock.getSizeInBytes(); if (bytesSize + cachedBytes < SORT_BUFFER_SIZE) { cachedBytes += bytesSize; for (int i = 0; i < tsBlock.getPositionCount(); i++) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/TopKOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/TopKOperator.java index ad4e04e2aed70..08d03384693c9 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/TopKOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/TopKOperator.java @@ -281,11 +281,13 @@ private void initResultTsBlock() { new boolean[positionCount]); break; case INT32: + case DATE: columns[i] = new IntColumn( positionCount, Optional.of(new boolean[positionCount]), new int[positionCount]); break; case INT64: + case TIMESTAMP: columns[i] = new LongColumn( positionCount, Optional.of(new boolean[positionCount]), new long[positionCount]); @@ -303,6 +305,8 @@ private void initResultTsBlock() { new double[positionCount]); break; case TEXT: + case STRING: + case BLOB: columns[i] = new BinaryColumn( positionCount, @@ -366,14 +370,18 @@ private long getMemoryUsageOfOneMergeSortKey() { break; case INT32: case FLOAT: + case DATE: memory += 4; break; case INT64: case DOUBLE: case VECTOR: + case TIMESTAMP: memory += 8; break; case TEXT: + case STRING: + case BLOB: memory += 16; break; default: diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/ai/InferenceOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/ai/InferenceOperator.java new file mode 100644 index 0000000000000..9bdb57dc5bd0e --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/ai/InferenceOperator.java @@ -0,0 +1,377 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.queryengine.execution.operator.process.ai; + +import org.apache.iotdb.ainode.rpc.thrift.TInferenceResp; +import org.apache.iotdb.ainode.rpc.thrift.TWindowParams; +import org.apache.iotdb.commons.client.ainode.AINodeClient; +import org.apache.iotdb.commons.client.ainode.AINodeClientManager; +import org.apache.iotdb.db.exception.runtime.ModelInferenceProcessException; +import org.apache.iotdb.db.queryengine.execution.MemoryEstimationHelper; +import org.apache.iotdb.db.queryengine.execution.operator.Operator; +import org.apache.iotdb.db.queryengine.execution.operator.OperatorContext; +import org.apache.iotdb.db.queryengine.execution.operator.process.ProcessOperator; +import org.apache.iotdb.db.queryengine.execution.operator.window.ainode.BottomInferenceWindowParameter; +import org.apache.iotdb.db.queryengine.execution.operator.window.ainode.CountInferenceWindowParameter; +import org.apache.iotdb.db.queryengine.execution.operator.window.ainode.InferenceWindowType; +import org.apache.iotdb.db.queryengine.plan.planner.plan.parameter.model.ModelInferenceDescriptor; +import org.apache.iotdb.rpc.TSStatusCode; + +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import org.apache.tsfile.block.column.Column; +import org.apache.tsfile.block.column.ColumnBuilder; +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.read.common.block.TsBlock; +import org.apache.tsfile.read.common.block.TsBlockBuilder; +import org.apache.tsfile.read.common.block.column.TimeColumnBuilder; +import org.apache.tsfile.read.common.block.column.TsBlockSerde; +import org.apache.tsfile.utils.RamUsageEstimator; + +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.stream.Collectors; + +import static com.google.common.util.concurrent.Futures.successfulAsList; + +public class InferenceOperator implements ProcessOperator { + + private static final long INSTANCE_SIZE = + RamUsageEstimator.shallowSizeOfInstance(InferenceOperator.class); + + private final OperatorContext operatorContext; + private final Operator child; + private final ModelInferenceDescriptor modelInferenceDescriptor; + + private final TsBlockBuilder inputTsBlockBuilder; + + private final ExecutorService modelInferenceExecutor; + private ListenableFuture inferenceExecutionFuture; + + private boolean finished = false; + + private final long maxRetainedSize; + private final long maxReturnSize; + private final List inputColumnNames; + private final List targetColumnNames; + private long totalRow; + private int resultIndex = 0; + private List results; + private final TsBlockSerde serde = new TsBlockSerde(); + private InferenceWindowType windowType = null; + + private final boolean generateTimeColumn; + private long maxTimestamp; + private long minTimestamp; + private long interval; + private long currentRowIndex; + + public InferenceOperator( + OperatorContext operatorContext, + Operator child, + ModelInferenceDescriptor modelInferenceDescriptor, + ExecutorService modelInferenceExecutor, + List targetColumnNames, + List inputColumnNames, + boolean generateTimeColumn, + long maxRetainedSize, + long maxReturnSize) { + this.operatorContext = operatorContext; + this.child = child; + this.modelInferenceDescriptor = modelInferenceDescriptor; + this.inputTsBlockBuilder = + new TsBlockBuilder( + Arrays.asList(modelInferenceDescriptor.getModelInformation().getInputDataType())); + this.modelInferenceExecutor = modelInferenceExecutor; + this.targetColumnNames = targetColumnNames; + this.inputColumnNames = inputColumnNames; + this.maxRetainedSize = maxRetainedSize; + this.maxReturnSize = maxReturnSize; + this.totalRow = 0; + + if (modelInferenceDescriptor.getInferenceWindowParameter() != null) { + windowType = modelInferenceDescriptor.getInferenceWindowParameter().getWindowType(); + } + + if (generateTimeColumn) { + this.interval = 0; + this.minTimestamp = Long.MAX_VALUE; + this.maxTimestamp = Long.MIN_VALUE; + this.currentRowIndex = 0; + } + this.generateTimeColumn = generateTimeColumn; + } + + @Override + public OperatorContext getOperatorContext() { + return operatorContext; + } + + @Override + public ListenableFuture isBlocked() { + ListenableFuture childBlocked = child.isBlocked(); + boolean executionDone = forecastExecutionDone(); + if (executionDone && childBlocked.isDone()) { + return NOT_BLOCKED; + } else if (childBlocked.isDone()) { + return inferenceExecutionFuture; + } else if (executionDone) { + return childBlocked; + } else { + return successfulAsList(Arrays.asList(inferenceExecutionFuture, childBlocked)); + } + } + + private boolean forecastExecutionDone() { + if (inferenceExecutionFuture == null) { + return true; + } + return inferenceExecutionFuture.isDone(); + } + + @Override + public boolean hasNext() throws Exception { + return !finished || (results != null && results.size() != resultIndex); + } + + private void fillTimeColumn(TsBlock tsBlock) { + Column timeColumn = tsBlock.getTimeColumn(); + long[] time = timeColumn.getLongs(); + for (int i = 0; i < time.length; i++) { + time[i] = maxTimestamp + interval * currentRowIndex; + currentRowIndex++; + } + } + + @Override + public TsBlock next() throws Exception { + if (inferenceExecutionFuture == null) { + if (child.hasNextWithTimer()) { + TsBlock inputTsBlock = child.nextWithTimer(); + if (inputTsBlock != null) { + appendTsBlockToBuilder(inputTsBlock); + } + } else { + submitInferenceTask(); + } + return null; + } else { + + if (results != null && resultIndex != results.size()) { + TsBlock tsBlock = serde.deserialize(results.get(resultIndex)); + if (generateTimeColumn) { + fillTimeColumn(tsBlock); + } + resultIndex++; + return tsBlock; + } + + try { + if (!inferenceExecutionFuture.isDone()) { + throw new IllegalStateException( + "The operator cannot continue until the forecast execution is done."); + } + + TInferenceResp inferenceResp = inferenceExecutionFuture.get(); + if (inferenceResp.getStatus().getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + String message = + String.format( + "Error occurred while executing inference:[%s]", + inferenceResp.getStatus().getMessage()); + throw new ModelInferenceProcessException(message); + } + + finished = true; + TsBlock resultTsBlock = serde.deserialize(inferenceResp.inferenceResult.get(0)); + if (generateTimeColumn) { + fillTimeColumn(resultTsBlock); + } + results = inferenceResp.inferenceResult; + resultIndex++; + return resultTsBlock; + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new ModelInferenceProcessException(e.getMessage()); + } catch (ExecutionException e) { + throw new ModelInferenceProcessException(e.getMessage()); + } + } + } + + private void appendTsBlockToBuilder(TsBlock inputTsBlock) { + TimeColumnBuilder timeColumnBuilder = inputTsBlockBuilder.getTimeColumnBuilder(); + ColumnBuilder[] columnBuilders = inputTsBlockBuilder.getValueColumnBuilders(); + totalRow += inputTsBlock.getPositionCount(); + for (int i = 0; i < inputTsBlock.getPositionCount(); i++) { + long timestamp = inputTsBlock.getTimeByIndex(i); + if (generateTimeColumn) { + minTimestamp = Math.min(minTimestamp, timestamp); + maxTimestamp = Math.max(maxTimestamp, timestamp); + } + timeColumnBuilder.writeLong(timestamp); + for (int columnIndex = 0; columnIndex < inputTsBlock.getValueColumnCount(); columnIndex++) { + columnBuilders[columnIndex].write(inputTsBlock.getColumn(columnIndex), i); + } + inputTsBlockBuilder.declarePosition(); + } + } + + private TWindowParams getWindowParams() { + TWindowParams windowParams; + if (windowType == null) { + return null; + } + if (windowType == InferenceWindowType.COUNT) { + CountInferenceWindowParameter countInferenceWindowParameter = + (CountInferenceWindowParameter) modelInferenceDescriptor.getInferenceWindowParameter(); + windowParams = new TWindowParams(); + windowParams.setWindowInterval((int) countInferenceWindowParameter.getInterval()); + windowParams.setWindowStep((int) countInferenceWindowParameter.getStep()); + } else { + windowParams = null; + } + return windowParams; + } + + private TsBlock preProcess(TsBlock inputTsBlock) { + boolean notBuiltIn = !modelInferenceDescriptor.getModelInformation().isBuiltIn(); + if (windowType == null || windowType == InferenceWindowType.HEAD) { + if (notBuiltIn + && totalRow != modelInferenceDescriptor.getModelInformation().getInputShape()[0]) { + throw new ModelInferenceProcessException( + String.format( + "The number of rows %s in the input data does not match the model input %s. Try to use LIMIT in SQL or WINDOW in CALL INFERENCE", + totalRow, modelInferenceDescriptor.getModelInformation().getInputShape()[0])); + } + return inputTsBlock; + } else if (windowType == InferenceWindowType.COUNT) { + if (notBuiltIn + && totalRow < modelInferenceDescriptor.getModelInformation().getInputShape()[0]) { + throw new ModelInferenceProcessException( + String.format( + "The number of rows %s in the input data is less than the model input %s. ", + totalRow, modelInferenceDescriptor.getModelInformation().getInputShape()[0])); + } + } else if (windowType == InferenceWindowType.TAIL) { + if (notBuiltIn + && totalRow < modelInferenceDescriptor.getModelInformation().getInputShape()[0]) { + throw new ModelInferenceProcessException( + String.format( + "The number of rows %s in the input data is less than the model input %s. ", + totalRow, modelInferenceDescriptor.getModelInformation().getInputShape()[0])); + } + // Tail window logic: get the latest data for inference + long windowSize = + (int) + ((BottomInferenceWindowParameter) + modelInferenceDescriptor.getInferenceWindowParameter()) + .getWindowSize(); + return inputTsBlock.subTsBlock((int) (totalRow - windowSize)); + } + return inputTsBlock; + } + + private void submitInferenceTask() { + + if (generateTimeColumn) { + interval = (maxTimestamp - minTimestamp) / totalRow; + } + + TsBlock inputTsBlock = inputTsBlockBuilder.build(); + + TsBlock finalInputTsBlock = preProcess(inputTsBlock); + TWindowParams windowParams = getWindowParams(); + + Map columnNameIndexMap = new HashMap<>(); + + for (int i = 0; i < inputColumnNames.size(); i++) { + columnNameIndexMap.put(inputColumnNames.get(i), i); + } + + inferenceExecutionFuture = + Futures.submit( + () -> { + try (AINodeClient client = + AINodeClientManager.getInstance() + .borrowClient(modelInferenceDescriptor.getTargetAINode())) { + return client.inference( + modelInferenceDescriptor.getModelName(), + targetColumnNames, + Arrays.stream(modelInferenceDescriptor.getModelInformation().getInputDataType()) + .map(TSDataType::toString) + .collect(Collectors.toList()), + columnNameIndexMap, + finalInputTsBlock, + modelInferenceDescriptor.getInferenceAttributes(), + windowParams); + } catch (Exception e) { + throw new ModelInferenceProcessException(e.getMessage()); + } + }, + modelInferenceExecutor); + } + + @Override + public boolean isFinished() throws Exception { + return finished && !hasNext(); + } + + @Override + public void close() throws Exception { + if (inferenceExecutionFuture != null) { + inferenceExecutionFuture.cancel(true); + } + child.close(); + } + + @Override + public long calculateMaxPeekMemory() { + return maxReturnSize + maxRetainedSize + child.calculateMaxPeekMemory(); + } + + @Override + public long calculateMaxReturnSize() { + return maxReturnSize; + } + + @Override + public long calculateRetainedSizeAfterCallingNext() { + return maxRetainedSize + child.calculateRetainedSizeAfterCallingNext(); + } + + @Override + public long ramBytesUsed() { + return INSTANCE_SIZE + + MemoryEstimationHelper.getEstimatedSizeOfAccountableObject(child) + + MemoryEstimationHelper.getEstimatedSizeOfAccountableObject(operatorContext) + + inputTsBlockBuilder.getRetainedSizeInBytes() + + (inputColumnNames == null + ? 0 + : inputColumnNames.stream().mapToLong(RamUsageEstimator::sizeOf).sum()) + + (targetColumnNames == null + ? 0 + : targetColumnNames.stream().mapToLong(RamUsageEstimator::sizeOf).sum()); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/join/merge/MergeSortComparator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/join/merge/MergeSortComparator.java index c299a368e732a..4b8afe4e39eba 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/join/merge/MergeSortComparator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/join/merge/MergeSortComparator.java @@ -70,11 +70,13 @@ public static Comparator getComparator(TSDataType dataType, int index, Comparator comparator; switch (dataType) { case INT32: + case DATE: comparator = Comparator.comparingInt( (SortKey sortKey) -> sortKey.tsBlock.getColumn(index).getInt(sortKey.rowIndex)); break; case INT64: + case TIMESTAMP: comparator = Comparator.comparingLong( (SortKey sortKey) -> sortKey.tsBlock.getColumn(index).getLong(sortKey.rowIndex)); @@ -90,6 +92,8 @@ public static Comparator getComparator(TSDataType dataType, int index, (SortKey sortKey) -> sortKey.tsBlock.getColumn(index).getDouble(sortKey.rowIndex)); break; case TEXT: + case BLOB: + case STRING: comparator = Comparator.comparing( (SortKey sortKey) -> sortKey.tsBlock.getColumn(index).getBinary(sortKey.rowIndex)); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/AbstractUpdateLastCacheOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/AbstractUpdateLastCacheOperator.java index b4ce8943ea9a8..30d305bcec7ea 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/AbstractUpdateLastCacheOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/AbstractUpdateLastCacheOperator.java @@ -61,12 +61,15 @@ public abstract class AbstractUpdateLastCacheOperator implements ProcessOperator protected String databaseName; + protected boolean deviceInMultiRegion; + protected AbstractUpdateLastCacheOperator( - OperatorContext operatorContext, - Operator child, - DataNodeSchemaCache dataNodeSchemaCache, - boolean needUpdateCache, - boolean needUpdateNullEntry) { + final OperatorContext operatorContext, + final Operator child, + final DataNodeSchemaCache dataNodeSchemaCache, + final boolean needUpdateCache, + final boolean needUpdateNullEntry, + final boolean deviceInMultiRegion) { this.operatorContext = operatorContext; this.child = child; this.lastCache = dataNodeSchemaCache; @@ -75,6 +78,7 @@ protected AbstractUpdateLastCacheOperator( this.tsBlockBuilder = LastQueryUtil.createTsBlockBuilder(1); this.dataNodeQueryContext = operatorContext.getDriverContext().getFragmentInstanceContext().getDataNodeQueryContext(); + this.deviceInMultiRegion = deviceInMultiRegion; } @Override @@ -103,8 +107,8 @@ protected void mayUpdateLastCache( return; } try { - dataNodeQueryContext.lock(); - Pair seriesScanInfo = + dataNodeQueryContext.lock(deviceInMultiRegion); + final Pair seriesScanInfo = dataNodeQueryContext.getSeriesScanInfo(fullPath); // may enter this case when use TTL @@ -112,6 +116,11 @@ protected void mayUpdateLastCache( return; } + if (!deviceInMultiRegion) { + lastCache.updateLastCache( + getDatabaseName(), fullPath, new TimeValuePair(time, value), false, Long.MIN_VALUE); + return; + } // update cache in DataNodeQueryContext if (seriesScanInfo.right == null || time > seriesScanInfo.right.getTimestamp()) { seriesScanInfo.right = new TimeValuePair(time, value); @@ -122,7 +131,7 @@ protected void mayUpdateLastCache( getDatabaseName(), fullPath, seriesScanInfo.right, false, Long.MIN_VALUE); } } finally { - dataNodeQueryContext.unLock(); + dataNodeQueryContext.unLock(deviceInMultiRegion); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/AlignedUpdateLastCacheOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/AlignedUpdateLastCacheOperator.java index c3f5fff6f781a..85c3134c4290a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/AlignedUpdateLastCacheOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/AlignedUpdateLastCacheOperator.java @@ -47,8 +47,15 @@ public AlignedUpdateLastCacheOperator( AlignedPath seriesPath, DataNodeSchemaCache dataNodeSchemaCache, boolean needUpdateCache, - boolean needUpdateNullEntry) { - super(operatorContext, child, dataNodeSchemaCache, needUpdateCache, needUpdateNullEntry); + boolean needUpdateNullEntry, + boolean deviceInMultiRegion) { + super( + operatorContext, + child, + dataNodeSchemaCache, + needUpdateCache, + needUpdateNullEntry, + deviceInMultiRegion); this.seriesPath = seriesPath; this.devicePath = seriesPath.getDevicePath(); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/AlignedUpdateViewPathLastCacheOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/AlignedUpdateViewPathLastCacheOperator.java index 9a8a309b2ec9c..3fc9f0412bc29 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/AlignedUpdateViewPathLastCacheOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/AlignedUpdateViewPathLastCacheOperator.java @@ -41,14 +41,16 @@ public AlignedUpdateViewPathLastCacheOperator( DataNodeSchemaCache dataNodeSchemaCache, boolean needUpdateCache, boolean needUpdateNullEntry, - String outputViewPath) { + String outputViewPath, + boolean deviceInMultiRegion) { super( operatorContext, child, seriesPath, dataNodeSchemaCache, needUpdateCache, - needUpdateNullEntry); + needUpdateNullEntry, + deviceInMultiRegion); checkArgument(seriesPath.getMeasurementList().size() == 1); this.outputViewPath = outputViewPath; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/LastQueryOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/LastQueryOperator.java index 9f50dcb6e7d9f..dd41bbd7afa01 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/LastQueryOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/LastQueryOperator.java @@ -116,6 +116,7 @@ public TsBlock next() throws Exception { return null; } else if (!tsBlock.isEmpty()) { LastQueryUtil.appendLastValue(tsBlockBuilder, tsBlock); + return null; } } else { children.get(currentIndex).close(); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/LastQuerySortOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/LastQuerySortOperator.java index 0d10207380257..e40cb1ad1316a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/LastQuerySortOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/LastQuerySortOperator.java @@ -153,7 +153,8 @@ private TsBlock buildResult() throws Exception { while (keepGoing(start, maxRuntime, endIndex)) { - if (prepareData()) { + prepareData(); + if (previousTsBlock == null) { return null; } @@ -179,21 +180,18 @@ private boolean keepGoing(long start, long maxRuntime, int endIndex) { && !tsBlockBuilder.isFull(); } - private boolean prepareData() throws Exception { + private void prepareData() throws Exception { if (previousTsBlock == null || previousTsBlock.getPositionCount() <= previousTsBlockIndex) { if (children.get(currentIndex).hasNextWithTimer()) { previousTsBlock = children.get(currentIndex).nextWithTimer(); previousTsBlockIndex = 0; - if (previousTsBlock == null) { - return true; - } + return; } else { children.get(currentIndex).close(); children.set(currentIndex, null); } currentIndex++; } - return false; } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/UpdateLastCacheOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/UpdateLastCacheOperator.java index d49e89c19ef92..d55f1d9fd4e8e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/UpdateLastCacheOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/last/UpdateLastCacheOperator.java @@ -53,7 +53,33 @@ public UpdateLastCacheOperator( DataNodeSchemaCache dataNodeSchemaCache, boolean needUpdateCache, boolean isNeedUpdateNullEntry) { - super(operatorContext, child, dataNodeSchemaCache, needUpdateCache, isNeedUpdateNullEntry); + this( + operatorContext, + child, + fullPath, + dataType, + dataNodeSchemaCache, + needUpdateCache, + isNeedUpdateNullEntry, + true); + } + + public UpdateLastCacheOperator( + OperatorContext operatorContext, + Operator child, + MeasurementPath fullPath, + TSDataType dataType, + DataNodeSchemaCache dataNodeSchemaCache, + boolean needUpdateCache, + boolean isNeedUpdateNullEntry, + boolean deviceInMultiRegion) { + super( + operatorContext, + child, + dataNodeSchemaCache, + needUpdateCache, + isNeedUpdateNullEntry, + deviceInMultiRegion); this.fullPath = fullPath; this.dataType = dataType.name(); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/schema/SchemaFetchScanOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/schema/SchemaFetchScanOperator.java index f4b40c69b79e4..9a8dbf9f21b36 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/schema/SchemaFetchScanOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/schema/SchemaFetchScanOperator.java @@ -23,7 +23,9 @@ import org.apache.iotdb.commons.exception.runtime.SchemaExecutionException; import org.apache.iotdb.commons.path.PathPatternTree; import org.apache.iotdb.commons.schema.SchemaConstant; +import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.db.queryengine.common.schematree.ClusterSchemaTree; +import org.apache.iotdb.db.queryengine.common.schematree.node.SchemaNode; import org.apache.iotdb.db.queryengine.execution.MemoryEstimationHelper; import org.apache.iotdb.db.queryengine.execution.operator.OperatorContext; import org.apache.iotdb.db.queryengine.execution.operator.source.SourceOperator; @@ -36,12 +38,12 @@ import org.apache.tsfile.read.common.block.column.BinaryColumn; import org.apache.tsfile.read.common.block.column.TimeColumn; import org.apache.tsfile.utils.Binary; +import org.apache.tsfile.utils.PublicBAOS; import org.apache.tsfile.utils.RamUsageEstimator; import org.apache.tsfile.utils.ReadWriteIOUtils; -import java.io.ByteArrayOutputStream; -import java.io.IOException; import java.util.Collections; +import java.util.Iterator; import java.util.Map; import java.util.NoSuchElementException; import java.util.Optional; @@ -62,7 +64,12 @@ public class SchemaFetchScanOperator implements SourceOperator { private boolean isFinished = false; private final PathPatternTree authorityScope; - private static final int DEFAULT_MAX_TSBLOCK_SIZE_IN_BYTES = + private Iterator schemaNodeIteratorForSerialize = null; + private long schemaTreeMemCost; + private PublicBAOS baos = null; + // Reserve some bytes to avoid capacity grow + private static final int EXTRA_SIZE_TO_AVOID_GROW = 1024; + private static int DEFAULT_MAX_TSBLOCK_SIZE_IN_BYTES = TSFileDescriptor.getInstance().getConfig().getMaxTsBlockSizeInBytes(); private static final long INSTANCE_SIZE = @@ -152,12 +159,33 @@ public TsBlock next() throws Exception { if (!hasNext()) { throw new NoSuchElementException(); } - isFinished = true; - try { - return fetchSchema(); - } catch (MetadataException e) { - throw new SchemaExecutionException(e); + + boolean isFirstBatch = schemaNodeIteratorForSerialize == null; + prepareSchemaNodeIteratorForSerialize(); + // to indicate this binary data is a part of schema tree, and the remaining parts will be sent + // later + ReadWriteIOUtils.write((byte) 2, baos); + // the estimated mem cost to deserialize the total schema tree + if (isFirstBatch) { + ReadWriteIOUtils.write(schemaTreeMemCost, baos); + } + while (schemaNodeIteratorForSerialize.hasNext() + && baos.size() < DEFAULT_MAX_TSBLOCK_SIZE_IN_BYTES) { + SchemaNode node = schemaNodeIteratorForSerialize.next(); + node.serializeNodeOwnContent(baos); } + byte[] currentBatch = baos.toByteArray(); + baos.reset(); + isFinished = !schemaNodeIteratorForSerialize.hasNext(); + if (isFinished) { + // indicate all continuous binary data is finished + currentBatch[0] = 3; + releaseSchemaTree(); + baos = null; + } + return new TsBlock( + new TimeColumn(1, new long[] {0}), + new BinaryColumn(1, Optional.empty(), new Binary[] {new Binary(currentBatch)})); } @Override @@ -172,7 +200,8 @@ public boolean isFinished() throws Exception { @Override public void close() throws Exception { - // do nothing + releaseSchemaTree(); + baos = null; } @Override @@ -180,26 +209,34 @@ public PlanNodeId getSourceId() { return sourceId; } - private TsBlock fetchSchema() throws MetadataException { - ClusterSchemaTree schemaTree = - fetchDevice - ? schemaRegion.fetchDeviceSchema(patternTree, authorityScope) - : schemaRegion.fetchSeriesSchema( - patternTree, templateMap, withTags, withAttributes, withTemplate, withAliasForce); - - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + private void prepareSchemaNodeIteratorForSerialize() { + if (schemaNodeIteratorForSerialize != null) { + return; + } try { - // to indicate this binary data is database info - ReadWriteIOUtils.write((byte) 1, outputStream); - - schemaTree.serialize(outputStream); - } catch (IOException e) { - // Totally memory operation. This case won't happen. + ClusterSchemaTree schemaTree = + fetchDevice + ? schemaRegion.fetchDeviceSchema(patternTree, authorityScope) + : schemaRegion.fetchSeriesSchema( + patternTree, templateMap, withTags, withAttributes, withTemplate, withAliasForce); + schemaNodeIteratorForSerialize = schemaTree.getIteratorForSerialize(); + baos = new PublicBAOS(DEFAULT_MAX_TSBLOCK_SIZE_IN_BYTES + EXTRA_SIZE_TO_AVOID_GROW); + if (operatorContext != null) { + long ramBytesUsed = schemaTree.ramBytesUsed(); + operatorContext + .getInstanceContext() + .getMemoryReservationContext() + .reserveMemoryCumulatively(ramBytesUsed); + // For temporary and independently counted memory, we need process it immediately + operatorContext + .getInstanceContext() + .getMemoryReservationContext() + .reserveMemoryImmediately(); + this.schemaTreeMemCost = ramBytesUsed; + } + } catch (MetadataException e) { + throw new SchemaExecutionException(e); } - return new TsBlock( - new TimeColumn(1, new long[] {0}), - new BinaryColumn( - 1, Optional.empty(), new Binary[] {new Binary(outputStream.toByteArray())})); } @Override @@ -221,6 +258,25 @@ public long calculateRetainedSizeAfterCallingNext() { public long ramBytesUsed() { return INSTANCE_SIZE + MemoryEstimationHelper.getEstimatedSizeOfAccountableObject(operatorContext) + + DEFAULT_MAX_TSBLOCK_SIZE_IN_BYTES + + EXTRA_SIZE_TO_AVOID_GROW + MemoryEstimationHelper.getEstimatedSizeOfAccountableObject(sourceId); } + + private void releaseSchemaTree() { + if (schemaTreeMemCost <= 0 || operatorContext == null) { + return; + } + operatorContext + .getInstanceContext() + .getMemoryReservationContext() + .releaseMemoryCumulatively(schemaTreeMemCost); + schemaTreeMemCost = 0; + schemaNodeIteratorForSerialize = null; + } + + @TestOnly + public static void setDefaultMaxTsBlockSizeInBytes(int newSize) { + DEFAULT_MAX_TSBLOCK_SIZE_IN_BYTES = newSize; + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AbstractSeriesAggregationScanOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AbstractSeriesAggregationScanOperator.java index d8ca582940398..fe24513ae436e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AbstractSeriesAggregationScanOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AbstractSeriesAggregationScanOperator.java @@ -25,7 +25,6 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeId; import org.apache.iotdb.db.queryengine.plan.planner.plan.parameter.GroupByTimeParameter; -import org.apache.tsfile.common.conf.TSFileDescriptor; import org.apache.tsfile.enums.TSDataType; import org.apache.tsfile.file.metadata.statistics.Statistics; import org.apache.tsfile.read.common.TimeRange; @@ -36,6 +35,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Optional; import java.util.concurrent.TimeUnit; import static org.apache.iotdb.db.queryengine.execution.operator.AggregationUtil.appendAggregationResult; @@ -83,6 +83,7 @@ protected AbstractSeriesAggregationScanOperator( boolean outputEndTime, GroupByTimeParameter groupByTimeParameter, long maxReturnSize, + long cachedRawDataSize, boolean canUseStatistics) { this.sourceId = sourceId; this.operatorContext = context; @@ -93,8 +94,7 @@ protected AbstractSeriesAggregationScanOperator( this.aggregators = aggregators; this.timeRangeIterator = timeRangeIterator; - this.cachedRawDataSize = - (1L + subSensorSize) * TSFileDescriptor.getInstance().getConfig().getPageSizeInByte(); + this.cachedRawDataSize = cachedRawDataSize; this.maxReturnSize = maxReturnSize; this.outputEndTime = outputEndTime; this.canUseStatistics = canUseStatistics; @@ -141,7 +141,11 @@ public TsBlock next() throws Exception { // calculate aggregation result on current time window // Keep curTimeRange if the calculation of this timeRange is not done - if (calculateAggregationResultForCurrentTimeRange()) { + Optional b = calculateAggregationResultForCurrentTimeRange(); + if (!b.isPresent()) { + continue; + } + if (b.get()) { curTimeRange = null; } } @@ -165,41 +169,58 @@ public boolean isFinished() throws Exception { @SuppressWarnings("squid:S112") /** Return true if we have the result of this timeRange. */ - protected boolean calculateAggregationResultForCurrentTimeRange() { + protected Optional calculateAggregationResultForCurrentTimeRange() { try { if (calcFromCachedData()) { updateResultTsBlock(); - return true; + return Optional.of(true); } if (readAndCalcFromPage()) { updateResultTsBlock(); - return true; + return Optional.of(true); } // only when all the page data has been consumed, we need to read the chunk data if (!seriesScanUtil.hasNextPage() && readAndCalcFromChunk()) { updateResultTsBlock(); - return true; + return Optional.of(true); } // only when all the page and chunk data has been consumed, we need to read the file data - if (!seriesScanUtil.hasNextPage() - && !seriesScanUtil.hasNextChunk() - && readAndCalcFromFile()) { - updateResultTsBlock(); - return true; + Optional b; + if (!seriesScanUtil.hasNextPage()) { + b = seriesScanUtil.hasNextChunk(); + if (!b.isPresent()) { + return b; + } + if (!b.get() && readAndCalcFromFile()) { + updateResultTsBlock(); + return Optional.of(true); + } } // If the TimeRange is (Long.MIN_VALUE, Long.MAX_VALUE), for Aggregators like countAggregator, // we have to consume all the data before we finish the aggregation calculation. - if (seriesScanUtil.hasNextPage() - || seriesScanUtil.hasNextChunk() - || seriesScanUtil.hasNextFile()) { - return false; + if (seriesScanUtil.hasNextPage()) { + return Optional.of(false); + } + b = seriesScanUtil.hasNextChunk(); + if (!b.isPresent()) { + return b; + } + if (b.get()) { + return Optional.of(false); + } + b = seriesScanUtil.hasNextFile(); + if (!b.isPresent()) { + return b; + } + if (b.get()) { + return Optional.of(false); } updateResultTsBlock(); - return true; + return Optional.of(true); } catch (IOException e) { throw new RuntimeException("Error while scanning the file", e); } @@ -242,7 +263,14 @@ protected void calcFromStatistics(Statistics timeStatistics, Statistics[] valueS protected boolean readAndCalcFromFile() throws IOException { // start stopwatch long start = System.nanoTime(); - while (System.nanoTime() - start < leftRuntimeOfOneNextCall && seriesScanUtil.hasNextFile()) { + while (System.nanoTime() - start < leftRuntimeOfOneNextCall) { + Optional b = seriesScanUtil.hasNextFile(); + if (!b.isPresent()) { + continue; + } + if (!b.get()) { + break; + } if (canUseStatistics && seriesScanUtil.canUseCurrentFileStatistics()) { Statistics fileTimeStatistics = seriesScanUtil.currentFileTimeStatistics(); if (fileTimeStatistics.getStartTime() > curTimeRange.getMax()) { @@ -283,7 +311,14 @@ protected boolean readAndCalcFromFile() throws IOException { protected boolean readAndCalcFromChunk() throws IOException { // start stopwatch long start = System.nanoTime(); - while (System.nanoTime() - start < leftRuntimeOfOneNextCall && seriesScanUtil.hasNextChunk()) { + while (System.nanoTime() - start < leftRuntimeOfOneNextCall) { + Optional b = seriesScanUtil.hasNextChunk(); + if (!b.isPresent()) { + continue; + } + if (!b.get()) { + break; + } if (canUseStatistics && seriesScanUtil.canUseCurrentChunkStatistics()) { Statistics chunkTimeStatistics = seriesScanUtil.currentChunkTimeStatistics(); if (chunkTimeStatistics.getStartTime() > curTimeRange.getMax()) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AbstractSeriesScanOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AbstractSeriesScanOperator.java index d8c07d75cafe3..d53b792ebfb8b 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AbstractSeriesScanOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AbstractSeriesScanOperator.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.util.List; +import java.util.Optional; import java.util.concurrent.TimeUnit; public abstract class AbstractSeriesScanOperator extends AbstractDataSourceOperator { @@ -70,10 +71,19 @@ public boolean hasNext() throws Exception { * 2. consume chunk data secondly * 3. consume next file finally */ - if (!readPageData() && !readChunkData() && !readFileData()) { - noMoreData = true; - break; + if (readPageData()) { + continue; } + Optional b = readChunkData(); + if (!b.isPresent() || b.get()) { + continue; + } + b = readFileData(); + if (!b.isPresent() || b.get()) { + continue; + } + noMoreData = true; + break; } while (System.nanoTime() - start < maxRuntime && !resultTsBlockBuilder.isFull() @@ -87,22 +97,28 @@ public boolean hasNext() throws Exception { } } - private boolean readFileData() throws IOException { - while (seriesScanUtil.hasNextFile()) { - if (readChunkData()) { - return true; - } + protected Optional readFileData() throws IOException { + Optional b = seriesScanUtil.hasNextFile(); + if (!b.isPresent() || !b.get()) { + return b; } - return false; + b = readChunkData(); + if (!b.isPresent() || b.get()) { + return b; + } + return Optional.empty(); } - private boolean readChunkData() throws IOException { - while (seriesScanUtil.hasNextChunk()) { - if (readPageData()) { - return true; - } + protected Optional readChunkData() throws IOException { + Optional b = seriesScanUtil.hasNextChunk(); + if (!b.isPresent() || !b.get()) { + return b; } - return false; + + if (readPageData()) { + return Optional.of(true); + } + return Optional.empty(); } private boolean readPageData() throws IOException { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AlignedSeriesAggregationScanOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AlignedSeriesAggregationScanOperator.java index 01bd90ad9aba6..3da37bdb9cac2 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AlignedSeriesAggregationScanOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AlignedSeriesAggregationScanOperator.java @@ -29,6 +29,7 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.parameter.SeriesScanOptions; import org.apache.iotdb.db.queryengine.plan.statement.component.Ordering; +import org.apache.tsfile.common.conf.TSFileDescriptor; import org.apache.tsfile.utils.RamUsageEstimator; import java.util.List; @@ -63,6 +64,8 @@ public AlignedSeriesAggregationScanOperator( false, groupByTimeParameter, maxReturnSize, + (1L + seriesPath.getMeasurementList().size()) + * TSFileDescriptor.getInstance().getConfig().getPageSizeInByte(), canUseStatistics); } @@ -89,6 +92,8 @@ public AlignedSeriesAggregationScanOperator( outputEndTime, groupByTimeParameter, maxReturnSize, + (1L + seriesPath.getMeasurementList().size()) + * TSFileDescriptor.getInstance().getConfig().getPageSizeInByte(), canUseStatistics); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/FileLoaderUtils.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/FileLoaderUtils.java index 3d7ba745ebff5..c472b9ca607c4 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/FileLoaderUtils.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/FileLoaderUtils.java @@ -21,16 +21,20 @@ import org.apache.iotdb.commons.path.AlignedPath; import org.apache.iotdb.commons.path.PartialPath; +import org.apache.iotdb.db.queryengine.execution.fragment.FragmentInstanceContext; import org.apache.iotdb.db.queryengine.execution.fragment.QueryContext; import org.apache.iotdb.db.queryengine.metric.SeriesScanCostMetricSet; import org.apache.iotdb.db.storageengine.buffer.TimeSeriesMetadataCache; import org.apache.iotdb.db.storageengine.buffer.TimeSeriesMetadataCache.TimeSeriesMetadataCacheKey; import org.apache.iotdb.db.storageengine.dataregion.modification.Modification; +import org.apache.iotdb.db.storageengine.dataregion.read.reader.chunk.DiskAlignedChunkLoader; +import org.apache.iotdb.db.storageengine.dataregion.read.reader.chunk.DiskChunkLoader; import org.apache.iotdb.db.storageengine.dataregion.read.reader.chunk.metadata.DiskAlignedChunkMetadataLoader; import org.apache.iotdb.db.storageengine.dataregion.read.reader.chunk.metadata.DiskChunkMetadataLoader; import org.apache.iotdb.db.storageengine.dataregion.read.reader.chunk.metadata.MemAlignedChunkMetadataLoader; import org.apache.iotdb.db.storageengine.dataregion.read.reader.chunk.metadata.MemChunkMetadataLoader; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.timeindex.ITimeIndex; import org.apache.tsfile.file.metadata.AlignedTimeSeriesMetadata; import org.apache.tsfile.file.metadata.IChunkMetadata; @@ -74,7 +78,7 @@ private FileLoaderUtils() { public static TimeseriesMetadata loadTimeSeriesMetadata( TsFileResource resource, PartialPath seriesPath, - QueryContext context, + FragmentInstanceContext context, Filter globalTimeFilter, Set allSensors, boolean isSeq) @@ -97,8 +101,10 @@ public static TimeseriesMetadata loadTimeSeriesMetadata( new PlainDeviceID(seriesPath.getDevice()), seriesPath.getMeasurement()), allSensors, - resource.getTimeIndexType() != 1, - context.isDebug()); + context.ignoreNotExistsDevice() + || resource.getTimeIndexType() == ITimeIndex.FILE_TIME_INDEX_TYPE, + context.isDebug(), + context); if (timeSeriesMetadata != null) { long t2 = System.nanoTime(); List pathModifications = context.getPathModifications(resource, seriesPath); @@ -171,7 +177,7 @@ public static TimeseriesMetadata loadTimeSeriesMetadata( public static AlignedTimeSeriesMetadata loadAlignedTimeSeriesMetadata( TsFileResource resource, AlignedPath alignedPath, - QueryContext context, + FragmentInstanceContext context, Filter globalTimeFilter, boolean isSeq) throws IOException { @@ -244,7 +250,7 @@ public static AlignedTimeSeriesMetadata loadAlignedTimeSeriesMetadata( private static AlignedTimeSeriesMetadata loadAlignedTimeSeriesMetadataFromDisk( TsFileResource resource, AlignedPath alignedPath, - QueryContext context, + FragmentInstanceContext context, Filter globalTimeFilter) throws IOException { AlignedTimeSeriesMetadata alignedTimeSeriesMetadata = null; @@ -266,8 +272,10 @@ private static AlignedTimeSeriesMetadata loadAlignedTimeSeriesMetadataFromDisk( filePath, new TimeSeriesMetadataCacheKey(resource.getTsFileID(), deviceId, ""), allSensors, - resource.getTimeIndexType() != 1, - isDebug); + context.ignoreNotExistsDevice() + || resource.getTimeIndexType() == ITimeIndex.FILE_TIME_INDEX_TYPE, + isDebug, + context); if (timeColumn != null) { // only need time column, like count_time aggregation if (valueMeasurementList.isEmpty()) { @@ -288,8 +296,10 @@ private static AlignedTimeSeriesMetadata loadAlignedTimeSeriesMetadataFromDisk( new TimeSeriesMetadataCacheKey( resource.getTsFileID(), deviceId, valueMeasurement), allSensors, - resource.getTimeIndexType() != 1, - isDebug); + context.ignoreNotExistsDevice() + || resource.getTimeIndexType() == ITimeIndex.FILE_TIME_INDEX_TYPE, + isDebug, + context); exist = (exist || (valueColumn != null)); valueTimeSeriesMetadataList.add(valueColumn); } @@ -365,4 +375,20 @@ public static List loadPageReaderList( IChunkReader chunkReader = chunkLoader.getChunkReader(chunkMetaData, globalTimeFilter); return chunkReader.loadPageReaderList(); } + + /** + * get the timestamp in file name of the chunk metadata. + * + * @param chunkMetaData the corresponding ChunkMetadata in that file. + */ + public static long getTimestampInFileName(IChunkMetadata chunkMetaData) { + IChunkLoader chunkLoader = chunkMetaData.getChunkLoader(); + if (chunkLoader instanceof DiskChunkLoader) { + return ((DiskChunkLoader) chunkLoader).getTsFileID().getTimestamp(); + } else if (chunkLoader instanceof DiskAlignedChunkLoader) { + return ((DiskAlignedChunkLoader) chunkLoader).getTsFileID().getTimestamp(); + } else { + return Long.MAX_VALUE; + } + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/SeriesAggregationScanOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/SeriesAggregationScanOperator.java index 2cf2e85107171..3f4da4c878846 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/SeriesAggregationScanOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/SeriesAggregationScanOperator.java @@ -29,6 +29,7 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.parameter.SeriesScanOptions; import org.apache.iotdb.db.queryengine.plan.statement.component.Ordering; +import org.apache.tsfile.common.conf.TSFileDescriptor; import org.apache.tsfile.utils.RamUsageEstimator; import java.util.List; @@ -70,6 +71,7 @@ public SeriesAggregationScanOperator( false, groupByTimeParameter, maxReturnSize, + TSFileDescriptor.getInstance().getConfig().getPageSizeInByte(), canUseStatistics); } @@ -96,6 +98,7 @@ public SeriesAggregationScanOperator( outputEndTime, groupByTimeParameter, maxReturnSize, + TSFileDescriptor.getInstance().getConfig().getPageSizeInByte(), canUseStatistics); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/SeriesScanUtil.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/SeriesScanUtil.java index f1a20ef86be34..76a817f18ef50 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/SeriesScanUtil.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/SeriesScanUtil.java @@ -31,6 +31,7 @@ import org.apache.iotdb.db.storageengine.dataregion.read.reader.chunk.MemAlignedPageReader; import org.apache.iotdb.db.storageengine.dataregion.read.reader.chunk.MemPageReader; import org.apache.iotdb.db.storageengine.dataregion.read.reader.common.DescPriorityMergeReader; +import org.apache.iotdb.db.storageengine.dataregion.read.reader.common.MergeReaderPriority; import org.apache.iotdb.db.storageengine.dataregion.read.reader.common.PriorityMergeReader; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; @@ -61,6 +62,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Objects; +import java.util.Optional; import java.util.PriorityQueue; import java.util.function.ToLongFunction; @@ -196,9 +198,14 @@ protected DescPriorityMergeReader getDescPriorityMergeReader() { // file level methods ///////////////////////////////////////////////////////////////////////////////////////////////// - public boolean hasNextFile() throws IOException { + // When Optional.empty() is returned, it means that the current hasNextFile has not been fully + // executed. In order to avoid the execution time of this method exceeding the allocated time + // slice, it is return early in this way. For the upper-level method, when encountering + // Optional.empty(), it needs to return directly to the checkpoint method that checks the operator + // execution time slice. + public Optional hasNextFile() throws IOException { if (!paginationController.hasCurLimit()) { - return false; + return Optional.of(false); } if (!unSeqPageReaders.isEmpty() @@ -218,21 +225,25 @@ public boolean hasNextFile() throws IOException { } if (firstTimeSeriesMetadata != null) { - return true; + return Optional.of(true); } - while (firstTimeSeriesMetadata == null - && (orderUtils.hasNextSeqResource() - || orderUtils.hasNextUnseqResource() - || !seqTimeSeriesMetadata.isEmpty() - || !unSeqTimeSeriesMetadata.isEmpty())) { + boolean checked = false; + if (orderUtils.hasNextSeqResource() + || orderUtils.hasNextUnseqResource() + || !seqTimeSeriesMetadata.isEmpty() + || !unSeqTimeSeriesMetadata.isEmpty()) { // init first time series metadata whose startTime is minimum tryToUnpackAllOverlappedFilesToTimeSeriesMetadata(); // filter file based on push-down conditions filterFirstTimeSeriesMetadata(); + checked = true; } - return firstTimeSeriesMetadata != null; + if (checked && firstTimeSeriesMetadata == null) { + return Optional.empty(); + } + return Optional.of(firstTimeSeriesMetadata != null); } private boolean currentFileOverlapped() { @@ -276,11 +287,16 @@ public void skipCurrentFile() { * This method should be called after hasNextFile() until no next chunk, make sure that all * overlapped chunks are consumed. * + * @return Optional When Optional.empty() is returned, it means that the current + * hasNextFile has not been fully executed. In order to avoid the execution time of this + * method exceeding the allocated time slice, it is return early in this way. For the + * upper-level method, when encountering Optional.empty(), it needs to return directly to the + * checkpoint method who checks the operator execution time slice. * @throws IllegalStateException illegal state */ - public boolean hasNextChunk() throws IOException { + public Optional hasNextChunk() throws IOException { if (!paginationController.hasCurLimit()) { - return false; + return Optional.of(false); } if (!unSeqPageReaders.isEmpty() @@ -296,18 +312,28 @@ public boolean hasNextChunk() throws IOException { } if (firstChunkMetadata != null) { - return true; + return Optional.of(true); // hasNextFile() has not been invoked } else if (firstTimeSeriesMetadata == null && cachedChunkMetadata.isEmpty()) { - return false; + return Optional.of(false); } - while (firstChunkMetadata == null && (!cachedChunkMetadata.isEmpty() || hasNextFile())) { + Optional hasNextFileReturnValue = null; + while (firstChunkMetadata == null) { + if (cachedChunkMetadata.isEmpty()) { + if (hasNextFileReturnValue != null) { + return Optional.empty(); + } + hasNextFileReturnValue = hasNextFile(); + if (!hasNextFileReturnValue.isPresent() || !hasNextFileReturnValue.get()) { + return hasNextFileReturnValue; + } + } initFirstChunkMetadata(); // filter chunk based on push-down conditions filterFirstChunkMetadata(); } - return firstChunkMetadata != null; + return Optional.of(firstChunkMetadata != null); } private void filterFirstChunkMetadata() { @@ -559,6 +585,7 @@ private void unpackAllOverlappedChunkMetadataToPageReaders(long endpointTime, bo private void unpackOneChunkMetaData(IChunkMetadata chunkMetaData) throws IOException { List pageReaderList = FileLoaderUtils.loadPageReaderList(chunkMetaData, scanOptions.getGlobalTimeFilter()); + long timestampInFileName = FileLoaderUtils.getTimestampInFileName(chunkMetaData); // init TsBlockBuilder for each page reader pageReaderList.forEach(p -> p.initTsBlockBuilder(getTsDataTypeList())); @@ -569,6 +596,7 @@ private void unpackOneChunkMetaData(IChunkMetadata chunkMetaData) throws IOExcep seqPageReaders.add( new VersionPageReader( context, + timestampInFileName, chunkMetaData.getVersion(), chunkMetaData.getOffsetOfChunkHeader(), iPageReader, @@ -579,6 +607,7 @@ private void unpackOneChunkMetaData(IChunkMetadata chunkMetaData) throws IOExcep seqPageReaders.add( new VersionPageReader( context, + timestampInFileName, chunkMetaData.getVersion(), chunkMetaData.getOffsetOfChunkHeader(), pageReaderList.get(i), @@ -591,6 +620,7 @@ private void unpackOneChunkMetaData(IChunkMetadata chunkMetaData) throws IOExcep unSeqPageReaders.add( new VersionPageReader( context, + timestampInFileName, chunkMetaData.getVersion(), chunkMetaData.getOffsetOfChunkHeader(), pageReader, @@ -1045,15 +1075,21 @@ private void tryToUnpackAllOverlappedFilesToTimeSeriesMetadata() throws IOExcept /* * Fill sequence TimeSeriesMetadata List until it is not empty */ - while (seqTimeSeriesMetadata.isEmpty() && orderUtils.hasNextSeqResource()) { - unpackSeqTsFileResource(); + if (seqTimeSeriesMetadata.isEmpty() && orderUtils.hasNextSeqResource()) { + // Avoid exceeding the time slice when a series cannot be found + if (!unpackSeqTsFileResource().isPresent()) { + return; + } } /* * Fill unSequence TimeSeriesMetadata Priority Queue until it is not empty */ - while (unSeqTimeSeriesMetadata.isEmpty() && orderUtils.hasNextUnseqResource()) { - unpackUnseqTsFileResource(); + if (unSeqTimeSeriesMetadata.isEmpty() && orderUtils.hasNextUnseqResource()) { + // Avoid exceeding the time slice when a series cannot be found + if (!unpackUnseqTsFileResource().isPresent()) { + return; + } } /* @@ -1136,27 +1172,40 @@ private void unpackAllOverlappedTsFilesToTimeSeriesMetadata(long endpointTime) unpackUnseqTsFileResource(); } while (orderUtils.hasNextSeqResource() && orderUtils.isCurSeqOverlappedWith(endpointTime)) { - unpackSeqTsFileResource(); + Optional timeSeriesMetadata = unpackSeqTsFileResource(); + // asc: if current seq tsfile's endTime >= endpointTime, we don't need to continue + // desc: if current seq tsfile's startTime <= endpointTime, we don't need to continue + if (timeSeriesMetadata.isPresent() + && orderUtils.overlappedSeqResourceSearchingNeedStop( + endpointTime, timeSeriesMetadata.get().getStatistics())) { + break; + } } } - private void unpackSeqTsFileResource() throws IOException { + private Optional unpackSeqTsFileResource() throws IOException { ITimeSeriesMetadata timeseriesMetadata = loadTimeSeriesMetadata(orderUtils.getNextSeqFileResource(true), true); // skip if data type is mismatched which may be caused by delete if (timeseriesMetadata != null && timeseriesMetadata.typeMatch(getTsDataTypeList())) { timeseriesMetadata.setSeq(true); seqTimeSeriesMetadata.add(timeseriesMetadata); + return Optional.of(timeseriesMetadata); + } else { + return Optional.empty(); } } - private void unpackUnseqTsFileResource() throws IOException { + private Optional unpackUnseqTsFileResource() throws IOException { ITimeSeriesMetadata timeseriesMetadata = loadTimeSeriesMetadata(orderUtils.getNextUnseqFileResource(true), false); // skip if data type is mismatched which may be caused by delete if (timeseriesMetadata != null && timeseriesMetadata.typeMatch(getTsDataTypeList())) { timeseriesMetadata.setSeq(false); unSeqTimeSeriesMetadata.add(timeseriesMetadata); + return Optional.of(timeseriesMetadata); + } else { + return Optional.empty(); } } @@ -1189,7 +1238,7 @@ private boolean filterAllSatisfy(Filter filter, IMetadata metadata) { protected static class VersionPageReader { private final QueryContext context; - private final PriorityMergeReader.MergeReaderPriority version; + private final MergeReaderPriority version; private final IPageReader data; private final boolean isSeq; @@ -1197,9 +1246,14 @@ protected static class VersionPageReader { private final boolean isMem; VersionPageReader( - QueryContext context, long version, long offset, IPageReader data, boolean isSeq) { + QueryContext context, + long fileTimestamp, + long version, + long offset, + IPageReader data, + boolean isSeq) { this.context = context; - this.version = new PriorityMergeReader.MergeReaderPriority(version, offset, isSeq); + this.version = new MergeReaderPriority(fileTimestamp, version, offset, isSeq); this.data = data; this.isSeq = isSeq; this.isAligned = data instanceof AlignedPageReader || data instanceof MemAlignedPageReader; @@ -1306,6 +1360,9 @@ boolean isTakeSeqAsFirst( TsFileResource getNextUnseqFileResource(boolean isDelete); void setCurSeqFileIndex(QueryDataSource dataSource); + + boolean overlappedSeqResourceSearchingNeedStop( + long endPointTime, Statistics currentStatistics); } class DescTimeOrderUtils implements TimeOrderUtils { @@ -1424,6 +1481,12 @@ public TsFileResource getNextUnseqFileResource(boolean isDelete) { public void setCurSeqFileIndex(QueryDataSource dataSource) { curSeqFileIndex = dataSource.getSeqResourcesSize() - 1; } + + @Override + public boolean overlappedSeqResourceSearchingNeedStop( + long endPointTime, Statistics currentStatistics) { + return currentStatistics.getStartTime() <= endPointTime; + } } class AscTimeOrderUtils implements TimeOrderUtils { @@ -1542,6 +1605,12 @@ public TsFileResource getNextUnseqFileResource(boolean isDelete) { public void setCurSeqFileIndex(QueryDataSource dataSource) { curSeqFileIndex = 0; } + + @Override + public boolean overlappedSeqResourceSearchingNeedStop( + long endPointTime, Statistics currentStatistics) { + return currentStatistics.getEndTime() >= endPointTime; + } } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/WindowManagerFactory.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/WindowManagerFactory.java index d165d721e2280..3c3fc6e616aaa 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/WindowManagerFactory.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/WindowManagerFactory.java @@ -69,6 +69,10 @@ private static VariationWindowManager genEqualEventWindowManager( return new EqualBinaryWindowManager(eventWindowParameter, ascending); case BOOLEAN: return new EqualBooleanWindowManager(eventWindowParameter, ascending); + case BLOB: + case STRING: + case TIMESTAMP: + case DATE: default: throw new UnSupportedDataTypeException( String.format( @@ -88,6 +92,12 @@ private static VariationWindowManager genVariationEventWindowManager( return new VariationFloatWindowManager(eventWindowParameter, ascending); case DOUBLE: return new VariationDoubleWindowManager(eventWindowParameter, ascending); + case TIMESTAMP: + case DATE: + case STRING: + case BOOLEAN: + case BLOB: + case TEXT: default: throw new UnSupportedDataTypeException( String.format( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/BottomInferenceWindowParameter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/BottomInferenceWindowParameter.java new file mode 100644 index 0000000000000..77953b122f4bc --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/BottomInferenceWindowParameter.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.queryengine.execution.operator.window.ainode; + +import org.apache.tsfile.utils.ReadWriteIOUtils; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; + +public class BottomInferenceWindowParameter extends InferenceWindowParameter { + + long windowSize; + + public BottomInferenceWindowParameter(long windowSize) { + this.windowSize = windowSize; + this.windowType = InferenceWindowType.TAIL; + } + + public long getWindowSize() { + return windowSize; + } + + @Override + public void serializeAttributes(ByteBuffer buffer) { + ReadWriteIOUtils.write(windowSize, buffer); + } + + @Override + public void serializeAttributes(DataOutputStream stream) throws IOException { + ReadWriteIOUtils.write(windowSize, stream); + } + + public static BottomInferenceWindowParameter deserialize(ByteBuffer byteBuffer) { + long windowSize = byteBuffer.getLong(); + return new BottomInferenceWindowParameter(windowSize); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof BottomInferenceWindowParameter)) { + return false; + } + BottomInferenceWindowParameter parameter = (BottomInferenceWindowParameter) obj; + return windowSize == parameter.windowSize; + } + + @Override + public int hashCode() { + return Long.hashCode(windowSize); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/CountInferenceWindow.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/CountInferenceWindow.java new file mode 100644 index 0000000000000..723e875934641 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/CountInferenceWindow.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.queryengine.execution.operator.window.ainode; + +public class CountInferenceWindow extends InferenceWindow { + + private final long interval; + private final long step; + + public CountInferenceWindow(long interval, long step) { + super(InferenceWindowType.COUNT); + this.interval = interval; + this.step = step; + } + + public long getInterval() { + return interval; + } + + public long getStep() { + return step; + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/CountInferenceWindowParameter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/CountInferenceWindowParameter.java new file mode 100644 index 0000000000000..6a6371c4a70ad --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/CountInferenceWindowParameter.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.queryengine.execution.operator.window.ainode; + +import org.apache.tsfile.utils.ReadWriteIOUtils; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Objects; + +public class CountInferenceWindowParameter extends InferenceWindowParameter { + + private final long interval; + private final long step; + + public CountInferenceWindowParameter(long interval, long step) { + this.windowType = InferenceWindowType.COUNT; + this.interval = interval; + this.step = step; + } + + public long getInterval() { + return interval; + } + + public long getStep() { + return step; + } + + @Override + public void serializeAttributes(ByteBuffer buffer) { + ReadWriteIOUtils.write(interval, buffer); + ReadWriteIOUtils.write(step, buffer); + } + + @Override + public void serializeAttributes(DataOutputStream stream) throws IOException { + ReadWriteIOUtils.write(interval, stream); + ReadWriteIOUtils.write(step, stream); + } + + public static CountInferenceWindowParameter deserialize(ByteBuffer byteBuffer) { + long interval = ReadWriteIOUtils.readLong(byteBuffer); + long step = ReadWriteIOUtils.readLong(byteBuffer); + return new CountInferenceWindowParameter(interval, step); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof CountInferenceWindowParameter)) { + return false; + } + CountInferenceWindowParameter parameter = (CountInferenceWindowParameter) obj; + return interval == parameter.interval && step == parameter.step; + } + + @Override + public int hashCode() { + return Objects.hash(interval, step); + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/HeadInferenceWindow.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/HeadInferenceWindow.java new file mode 100644 index 0000000000000..8e4f2cc65cb8e --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/HeadInferenceWindow.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.queryengine.execution.operator.window.ainode; + +public class HeadInferenceWindow extends InferenceWindow { + private final long windowSize; + + public HeadInferenceWindow(long windowSize) { + super(InferenceWindowType.HEAD); + this.windowSize = windowSize; + } + + public long getWindowSize() { + return windowSize; + } +} diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/plugin/env/PipeTaskConnectorRuntimeEnvironment.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/InferenceWindow.java similarity index 74% rename from iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/plugin/env/PipeTaskConnectorRuntimeEnvironment.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/InferenceWindow.java index 39b09618beb52..e5c00f910d0cd 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/plugin/env/PipeTaskConnectorRuntimeEnvironment.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/InferenceWindow.java @@ -17,11 +17,16 @@ * under the License. */ -package org.apache.iotdb.commons.pipe.config.plugin.env; +package org.apache.iotdb.db.queryengine.execution.operator.window.ainode; -public class PipeTaskConnectorRuntimeEnvironment extends PipeTaskRuntimeEnvironment { +public class InferenceWindow { + private final InferenceWindowType type; - public PipeTaskConnectorRuntimeEnvironment(String pipeName, long creationTime, int regionId) { - super(pipeName, creationTime, regionId); + public InferenceWindow(InferenceWindowType type) { + this.type = type; + } + + public InferenceWindowType getType() { + return type; } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/InferenceWindowParameter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/InferenceWindowParameter.java new file mode 100644 index 0000000000000..b9ab1343c3b83 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/InferenceWindowParameter.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.queryengine.execution.operator.window.ainode; + +import org.apache.iotdb.db.exception.sql.SemanticException; + +import org.apache.tsfile.utils.ReadWriteIOUtils; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; + +public abstract class InferenceWindowParameter { + + protected InferenceWindowType windowType; + + public InferenceWindowType getWindowType() { + return windowType; + } + + public abstract void serializeAttributes(ByteBuffer buffer); + + public abstract void serializeAttributes(DataOutputStream stream) throws IOException; + + public void serialize(ByteBuffer buffer) { + ReadWriteIOUtils.write(windowType.ordinal(), buffer); + serializeAttributes(buffer); + } + + public void serialize(DataOutputStream stream) throws IOException { + ReadWriteIOUtils.write(windowType.ordinal(), stream); + serializeAttributes(stream); + } + + public static InferenceWindowParameter deserialize(ByteBuffer byteBuffer) { + InferenceWindowType windowType = + InferenceWindowType.values()[ReadWriteIOUtils.readInt(byteBuffer)]; + if (windowType == InferenceWindowType.TAIL) { + return BottomInferenceWindowParameter.deserialize(byteBuffer); + } else if (windowType == InferenceWindowType.COUNT) { + return CountInferenceWindowParameter.deserialize(byteBuffer); + } else { + throw new SemanticException("Unsupported inference window type: " + windowType); + } + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/InferenceWindowType.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/InferenceWindowType.java new file mode 100644 index 0000000000000..f792327396f94 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/InferenceWindowType.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.db.queryengine.execution.operator.window.ainode; + +public enum InferenceWindowType { + HEAD, + TAIL, + COUNT +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/TailInferenceWindow.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/TailInferenceWindow.java new file mode 100644 index 0000000000000..3bbc568cac2be --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/window/ainode/TailInferenceWindow.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.queryengine.execution.operator.window.ainode; + +public class TailInferenceWindow extends InferenceWindow { + + private final long windowSize; + + public TailInferenceWindow(long windowSize) { + super(InferenceWindowType.TAIL); + this.windowSize = windowSize; + } + + public long getWindowSize() { + return windowSize; + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/schedule/DriverScheduler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/schedule/DriverScheduler.java index 09dab3d7819e4..a629c151fa901 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/schedule/DriverScheduler.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/schedule/DriverScheduler.java @@ -43,7 +43,6 @@ import org.apache.iotdb.db.queryengine.execution.schedule.queue.multilevelqueue.MultilevelPriorityQueue; import org.apache.iotdb.db.queryengine.execution.schedule.task.DriverTask; import org.apache.iotdb.db.queryengine.execution.schedule.task.DriverTaskStatus; -import org.apache.iotdb.db.queryengine.metric.DriverSchedulerMetricSet; import org.apache.iotdb.db.storageengine.rescon.quotas.DataNodeThrottleQuotaManager; import org.apache.iotdb.db.utils.SetThreadName; import org.apache.iotdb.mpp.rpc.thrift.TFragmentInstanceId; @@ -66,15 +65,10 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; -import static org.apache.iotdb.db.queryengine.metric.DriverSchedulerMetricSet.BLOCK_QUEUED_TIME; -import static org.apache.iotdb.db.queryengine.metric.DriverSchedulerMetricSet.READY_QUEUED_TIME; - /** The manager of fragment instances scheduling. */ public class DriverScheduler implements IDriverScheduler, IService { private static final Logger logger = LoggerFactory.getLogger(DriverScheduler.class); - private static final DriverSchedulerMetricSet DRIVER_SCHEDULER_METRIC_SET = - DriverSchedulerMetricSet.getInstance(); private static final IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig(); private static final double LEVEL_TIME_MULTIPLIER = 2; @@ -480,7 +474,6 @@ public void blockedToReady(DriverTask task) { .getDriverContext() .getFragmentInstanceContext() .addBlockQueuedTime(blockQueuedTime); - DRIVER_SCHEDULER_METRIC_SET.recordTaskQueueTime(BLOCK_QUEUED_TIME, blockQueuedTime); task.setLastEnterReadyQueueTime(currentTime); task.resetLevelScheduledTime(); readyQueue.repush(task); @@ -504,7 +497,6 @@ public boolean readyToRunning(DriverTask task) { .getDriverContext() .getFragmentInstanceContext() .addReadyQueuedTime(readyQueuedTime); - DRIVER_SCHEDULER_METRIC_SET.recordTaskQueueTime(READY_QUEUED_TIME, readyQueuedTime); } finally { task.unlock(); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/schedule/queue/IndexedBlockingQueue.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/schedule/queue/IndexedBlockingQueue.java index 1a63f01ffffb8..632b5b48ed6f2 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/schedule/queue/IndexedBlockingQueue.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/schedule/queue/IndexedBlockingQueue.java @@ -38,6 +38,9 @@ */ public abstract class IndexedBlockingQueue { + public static final String TOO_MANY_CONCURRENT_QUERIES_ERROR_MSG = + "The system can't allow more queries."; + protected final int capacity; protected final E queryHolder; protected int size; @@ -87,7 +90,7 @@ public synchronized void push(E element) { if (element == null) { throw new NullPointerException("pushed element is null"); } - Preconditions.checkState(size < capacity, "The system can't allow more queries."); + Preconditions.checkState(size < capacity, TOO_MANY_CONCURRENT_QUERIES_ERROR_MSG); pushToQueue(element); size++; this.notifyAll(); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/schedule/task/DriverTaskId.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/schedule/task/DriverTaskId.java index 865af681b370d..3dff2044a6ee6 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/schedule/task/DriverTaskId.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/schedule/task/DriverTaskId.java @@ -36,11 +36,12 @@ public class DriverTaskId implements ID, Comparable { // Currently, we just save pipelineId in driverTask since it's one-to-one relation. private final int pipelineId; private final String fullId; + private static final String EMPTY_FULL_ID = "EmptyFullId"; public DriverTaskId(FragmentInstanceId id, int pipelineId) { this.fragmentInstanceId = id; this.pipelineId = pipelineId; - this.fullId = String.format("%s.%d", id.getFullId(), pipelineId); + this.fullId = String.format("%s.%d", id == null ? EMPTY_FULL_ID : id.getFullId(), pipelineId); } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/metric/SeriesScanCostMetricSet.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/metric/SeriesScanCostMetricSet.java index a50381a75b343..ab6fb12f59631 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/metric/SeriesScanCostMetricSet.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/metric/SeriesScanCostMetricSet.java @@ -24,6 +24,7 @@ import org.apache.iotdb.metrics.AbstractMetricService; import org.apache.iotdb.metrics.impl.DoNothingMetricManager; import org.apache.iotdb.metrics.metricsets.IMetricSet; +import org.apache.iotdb.metrics.type.Counter; import org.apache.iotdb.metrics.type.Histogram; import org.apache.iotdb.metrics.type.Timer; import org.apache.iotdb.metrics.utils.MetricLevel; @@ -46,9 +47,103 @@ public static SeriesScanCostMetricSet getInstance() { public static final String NON_ALIGNED = "non_aligned"; public static final String MEM = "mem"; public static final String DISK = "disk"; + public static final String MEM_AND_DISK = "mem_and_disk"; + public static final String SEQUENCE = "sequence"; public static final String UNSEQUENCE = "unsequence"; + public static final String SEQ_AND_UNSEQ = "seq_and_unseq"; + + public static final String BLOOM_FILTER = "bloom_filter"; + public static final String TIMESERIES_METADATA = "timeseries_metadata"; + public static final String CHUNK = "chunk"; + + private Histogram loadBloomFilterFromCacheCountHistogram = + DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + private Histogram loadBloomFilterFromDiskCountHistogram = + DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + private Counter loadBloomFilterActualIOSizeCounter = DoNothingMetricManager.DO_NOTHING_COUNTER; + private Timer loadBloomFilterTime = DoNothingMetricManager.DO_NOTHING_TIMER; + + public void recordBloomFilterMetrics( + long loadBloomFilterFromCacheCount, + long loadBloomFilterFromDiskCount, + long loadBloomFilterActualIOSize, + long loadBloomFilterNanoTime) { + loadBloomFilterFromCacheCountHistogram.update(loadBloomFilterFromCacheCount); + loadBloomFilterFromDiskCountHistogram.update(loadBloomFilterFromDiskCount); + loadBloomFilterActualIOSizeCounter.inc(loadBloomFilterActualIOSize); + loadBloomFilterTime.updateNanos(loadBloomFilterNanoTime); + } + + private void bindBloomFilter(AbstractMetricService metricService) { + loadBloomFilterFromCacheCountHistogram = + metricService.getOrCreateHistogram( + Metric.METRIC_QUERY_CACHE.toString(), + MetricLevel.IMPORTANT, + Tag.TYPE.toString(), + BLOOM_FILTER, + Tag.FROM.toString(), + CACHE); + loadBloomFilterFromDiskCountHistogram = + metricService.getOrCreateHistogram( + Metric.METRIC_QUERY_CACHE.toString(), + MetricLevel.IMPORTANT, + Tag.TYPE.toString(), + BLOOM_FILTER, + Tag.FROM.toString(), + DISK); + loadBloomFilterActualIOSizeCounter = + metricService.getOrCreateCounter( + Metric.QUERY_DISK_READ.toString(), + MetricLevel.IMPORTANT, + Tag.TYPE.toString(), + BLOOM_FILTER); + loadBloomFilterTime = + metricService.getOrCreateTimer( + Metric.SERIES_SCAN_COST.toString(), + MetricLevel.IMPORTANT, + Tag.STAGE.toString(), + BLOOM_FILTER, + Tag.TYPE.toString(), + SEQ_AND_UNSEQ, + Tag.FROM.toString(), + MEM_AND_DISK); + } + + private void unbindBloomFilter(AbstractMetricService metricService) { + loadBloomFilterFromCacheCountHistogram = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + loadBloomFilterFromDiskCountHistogram = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + loadBloomFilterActualIOSizeCounter = DoNothingMetricManager.DO_NOTHING_COUNTER; + loadBloomFilterTime = DoNothingMetricManager.DO_NOTHING_TIMER; + + metricService.remove( + MetricType.HISTOGRAM, + Metric.METRIC_QUERY_CACHE.toString(), + Tag.TYPE.toString(), + BLOOM_FILTER, + Tag.FROM.toString(), + CACHE); + metricService.remove( + MetricType.HISTOGRAM, + Metric.METRIC_QUERY_CACHE.toString(), + Tag.TYPE.toString(), + BLOOM_FILTER, + Tag.FROM.toString(), + DISK); + metricService.remove( + MetricType.COUNTER, Metric.QUERY_DISK_READ.toString(), Tag.TYPE.toString(), BLOOM_FILTER); + metricService.remove( + MetricType.TIMER, + Metric.SERIES_SCAN_COST.toString(), + Tag.STAGE.toString(), + BLOOM_FILTER, + Tag.TYPE.toString(), + SEQ_AND_UNSEQ, + Tag.FROM.toString(), + MEM_AND_DISK); + } + private Histogram loadTimeSeriesMetadataDiskSeqHistogram = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; private Histogram loadTimeSeriesMetadataDiskUnSeqHistogram = @@ -72,10 +167,18 @@ public static SeriesScanCostMetricSet getInstance() { private Timer loadTimeSeriesMetadataMemSeqTime = DoNothingMetricManager.DO_NOTHING_TIMER; private Timer loadTimeSeriesMetadataMemUnSeqTime = DoNothingMetricManager.DO_NOTHING_TIMER; - public Timer loadTimeSeriesMetadataAlignedDiskSeqTime = DoNothingMetricManager.DO_NOTHING_TIMER; - public Timer loadTimeSeriesMetadataAlignedDiskUnSeqTime = DoNothingMetricManager.DO_NOTHING_TIMER; - public Timer loadTimeSeriesMetadataAlignedMemSeqTime = DoNothingMetricManager.DO_NOTHING_TIMER; - public Timer loadTimeSeriesMetadataAlignedMemUnSeqTime = DoNothingMetricManager.DO_NOTHING_TIMER; + private Timer loadTimeSeriesMetadataAlignedDiskSeqTime = DoNothingMetricManager.DO_NOTHING_TIMER; + private Timer loadTimeSeriesMetadataAlignedDiskUnSeqTime = + DoNothingMetricManager.DO_NOTHING_TIMER; + private Timer loadTimeSeriesMetadataAlignedMemSeqTime = DoNothingMetricManager.DO_NOTHING_TIMER; + private Timer loadTimeSeriesMetadataAlignedMemUnSeqTime = DoNothingMetricManager.DO_NOTHING_TIMER; + + private Histogram loadTimeSeriesMetadataFromCacheCountHistogram = + DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + private Histogram loadTimeSeriesMetadataFromDiskCountHistogram = + DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + private Counter loadTimeSeriesMetadataActualIOSizeCounter = + DoNothingMetricManager.DO_NOTHING_COUNTER; public void recordNonAlignedTimeSeriesMetadataCount(long c1, long c2, long c3, long c4) { loadTimeSeriesMetadataDiskSeqHistogram.update(c1); @@ -105,6 +208,15 @@ public void recordAlignedTimeSeriesMetadataTime(long t1, long t2, long t3, long loadTimeSeriesMetadataAlignedMemUnSeqTime.updateNanos(t4); } + public void recordTimeSeriesMetadataMetrics( + long loadTimeSeriesMetadataFromCacheCount, + long loadTimeSeriesMetadataFromDiskCount, + long loadTimeSeriesMetadataActualIOSize) { + loadTimeSeriesMetadataFromCacheCountHistogram.update(loadTimeSeriesMetadataFromCacheCount); + loadTimeSeriesMetadataFromDiskCountHistogram.update(loadTimeSeriesMetadataFromDiskCount); + loadTimeSeriesMetadataActualIOSizeCounter.inc(loadTimeSeriesMetadataActualIOSize); + } + private void bindTimeseriesMetadata(AbstractMetricService metricService) { loadTimeSeriesMetadataDiskSeqHistogram = metricService.getOrCreateHistogram( @@ -273,6 +385,57 @@ private void bindAlignedTimeseriesMetadata(AbstractMetricService metricService) MEM); } + private void bindTimeSeriesMetadataCache(AbstractMetricService metricService) { + loadTimeSeriesMetadataFromCacheCountHistogram = + metricService.getOrCreateHistogram( + Metric.METRIC_QUERY_CACHE.toString(), + MetricLevel.IMPORTANT, + Tag.TYPE.toString(), + TIMESERIES_METADATA, + Tag.FROM.toString(), + CACHE); + loadTimeSeriesMetadataFromDiskCountHistogram = + metricService.getOrCreateHistogram( + Metric.METRIC_QUERY_CACHE.toString(), + MetricLevel.IMPORTANT, + Tag.TYPE.toString(), + TIMESERIES_METADATA, + Tag.FROM.toString(), + DISK); + loadTimeSeriesMetadataActualIOSizeCounter = + metricService.getOrCreateCounter( + Metric.QUERY_DISK_READ.toString(), + MetricLevel.IMPORTANT, + Tag.TYPE.toString(), + TIMESERIES_METADATA); + } + + private void unbindTimeSeriesMetadataCache(AbstractMetricService metricService) { + loadTimeSeriesMetadataFromCacheCountHistogram = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + loadTimeSeriesMetadataFromDiskCountHistogram = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + loadTimeSeriesMetadataActualIOSizeCounter = DoNothingMetricManager.DO_NOTHING_COUNTER; + + metricService.remove( + MetricType.HISTOGRAM, + Metric.METRIC_QUERY_CACHE.toString(), + Tag.TYPE.toString(), + TIMESERIES_METADATA, + Tag.FROM.toString(), + CACHE); + metricService.remove( + MetricType.HISTOGRAM, + Metric.METRIC_QUERY_CACHE.toString(), + Tag.TYPE.toString(), + TIMESERIES_METADATA, + Tag.FROM.toString(), + DISK); + metricService.remove( + MetricType.COUNTER, + Metric.QUERY_DISK_READ.toString(), + Tag.TYPE.toString(), + TIMESERIES_METADATA); + } + private void unbindTimeseriesMetadata(AbstractMetricService metricService) { loadTimeSeriesMetadataDiskSeqHistogram = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; loadTimeSeriesMetadataDiskUnSeqHistogram = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; @@ -744,6 +907,10 @@ private void unbindChunkMetadataFilter(AbstractMetricService metricService) { private Timer constructChunkReadersNonAlignedMemTimer = DoNothingMetricManager.DO_NOTHING_TIMER; private Timer constructChunkReadersNonAlignedDiskTimer = DoNothingMetricManager.DO_NOTHING_TIMER; + private Histogram loadChunkFromCacheCountHistogram = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + private Histogram loadChunkFromDiskCountHistogram = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + private Counter loadChunkActualIOSizeCounter = DoNothingMetricManager.DO_NOTHING_COUNTER; + public void recordConstructChunkReadersCount( long alignedMemCount, long alignedDiskCount, @@ -763,6 +930,58 @@ public void recordConstructChunkReadersTime( constructChunkReadersNonAlignedDiskTimer.updateNanos(nonAlignedDiskTime); } + public void recordChunkMetrics( + long loadChunkFromCacheCount, long loadChunkFromDiskCount, long loadChunkActualIOSize) { + loadChunkFromCacheCountHistogram.update(loadChunkFromCacheCount); + loadChunkFromDiskCountHistogram.update(loadChunkFromDiskCount); + loadChunkActualIOSizeCounter.inc(loadChunkActualIOSize); + } + + private void bindChunk(AbstractMetricService metricService) { + loadChunkFromCacheCountHistogram = + metricService.getOrCreateHistogram( + Metric.METRIC_QUERY_CACHE.toString(), + MetricLevel.IMPORTANT, + Tag.TYPE.toString(), + CHUNK, + Tag.FROM.toString(), + CACHE); + loadChunkFromDiskCountHistogram = + metricService.getOrCreateHistogram( + Metric.METRIC_QUERY_CACHE.toString(), + MetricLevel.IMPORTANT, + Tag.TYPE.toString(), + CHUNK, + Tag.FROM.toString(), + DISK); + loadChunkActualIOSizeCounter = + metricService.getOrCreateCounter( + Metric.QUERY_DISK_READ.toString(), MetricLevel.IMPORTANT, Tag.TYPE.toString(), CHUNK); + } + + private void unbindChunk(AbstractMetricService metricService) { + loadChunkFromCacheCountHistogram = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + loadChunkFromDiskCountHistogram = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + loadChunkActualIOSizeCounter = DoNothingMetricManager.DO_NOTHING_COUNTER; + + metricService.remove( + MetricType.HISTOGRAM, + Metric.METRIC_QUERY_CACHE.toString(), + Tag.TYPE.toString(), + CHUNK, + Tag.FROM.toString(), + CACHE); + metricService.remove( + MetricType.HISTOGRAM, + Metric.METRIC_QUERY_CACHE.toString(), + Tag.TYPE.toString(), + CHUNK, + Tag.FROM.toString(), + DISK); + metricService.remove( + MetricType.COUNTER, Metric.QUERY_DISK_READ.toString(), Tag.TYPE.toString(), CHUNK); + } + private void bindConstructChunkReader(AbstractMetricService metricService) { constructChunkReadersAlignedMemHistogram = metricService.getOrCreateHistogram( @@ -1249,8 +1468,10 @@ private void unbindBuildTsBlockFromMergeReader(AbstractMetricService metricServi @Override public void bindTo(AbstractMetricService metricService) { + bindBloomFilter(metricService); bindTimeseriesMetadata(metricService); bindAlignedTimeseriesMetadata(metricService); + bindTimeSeriesMetadataCache(metricService); bindReadTimeseriesMetadata(metricService); bindTimeseriesMetadataModification(metricService); bindLoadChunkMetadataList(metricService); @@ -1258,6 +1479,7 @@ public void bindTo(AbstractMetricService metricService) { bindChunkMetadataFilter(metricService); bindConstructChunkReader(metricService); bindReadChunk(metricService); + bindChunk(metricService); bindInitChunkReader(metricService); bindTsBlockFromPageReader(metricService); bindBuildTsBlockFromMergeReader(metricService); @@ -1265,7 +1487,9 @@ public void bindTo(AbstractMetricService metricService) { @Override public void unbindFrom(AbstractMetricService metricService) { + unbindBloomFilter(metricService); unbindTimeseriesMetadata(metricService); + unbindTimeSeriesMetadataCache(metricService); unbindReadTimeseriesMetadata(metricService); unbindTimeseriesMetadataModification(metricService); unbindLoadChunkMetadataList(metricService); @@ -1273,6 +1497,7 @@ public void unbindFrom(AbstractMetricService metricService) { unbindChunkMetadataFilter(metricService); unbindConstructChunkReader(metricService); unbindReadChunk(metricService); + unbindChunk(metricService); unbindInitChunkReader(metricService); unbindTsBlockFromPageReader(metricService); unbindBuildTsBlockFromMergeReader(metricService); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/Coordinator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/Coordinator.java index 5d8c7830e3f47..ed76b837e801d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/Coordinator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/Coordinator.java @@ -26,6 +26,8 @@ import org.apache.iotdb.commons.client.sync.SyncDataNodeInternalServiceClient; import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory; import org.apache.iotdb.commons.concurrent.ThreadName; +import org.apache.iotdb.commons.conf.CommonConfig; +import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.conf.IoTDBConstant; import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; @@ -36,7 +38,6 @@ import org.apache.iotdb.db.queryengine.execution.QueryIdGenerator; import org.apache.iotdb.db.queryengine.plan.analyze.IPartitionFetcher; import org.apache.iotdb.db.queryengine.plan.analyze.lock.DataNodeSchemaLockManager; -import org.apache.iotdb.db.queryengine.plan.analyze.lock.SchemaLockType; import org.apache.iotdb.db.queryengine.plan.analyze.schema.ISchemaFetcher; import org.apache.iotdb.db.queryengine.plan.execution.ExecutionResult; import org.apache.iotdb.db.queryengine.plan.execution.IQueryExecution; @@ -53,10 +54,10 @@ import java.util.ArrayList; import java.util.List; -import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ThreadPoolExecutor; import java.util.function.BiFunction; import static org.apache.iotdb.commons.utils.StatusUtils.needRetry; @@ -72,10 +73,14 @@ public class Coordinator { private static final Logger LOGGER = LoggerFactory.getLogger(Coordinator.class); private static final int COORDINATOR_SCHEDULED_EXECUTOR_SIZE = 10; private static final IoTDBConfig CONFIG = IoTDBDescriptor.getInstance().getConfig(); + private static final CommonConfig COMMON_CONFIG = CommonDescriptor.getInstance().getConfig(); private static final Logger SLOW_SQL_LOGGER = LoggerFactory.getLogger(IoTDBConstant.SLOW_SQL_LOGGER_NAME); + private static final Logger SAMPLED_QUERIES_LOGGER = + LoggerFactory.getLogger(IoTDBConstant.SAMPLED_QUERIES_LOGGER_NAME); + private static final IClientManager SYNC_INTERNAL_SERVICE_CLIENT_MANAGER = new IClientManager.Factory() @@ -91,6 +96,7 @@ public class Coordinator { private final ExecutorService executor; private final ExecutorService writeOperationExecutor; private final ScheduledExecutorService scheduledExecutor; + private final ExecutorService dispatchExecutor; private final QueryIdGenerator queryIdGenerator = new QueryIdGenerator(IoTDBDescriptor.getInstance().getConfig().getDataNodeId()); @@ -104,12 +110,20 @@ private Coordinator() { this.executor = getQueryExecutor(); this.writeOperationExecutor = getWriteExecutor(); this.scheduledExecutor = getScheduledExecutor(); + int dispatchThreadNum = Math.max(20, Runtime.getRuntime().availableProcessors() * 2); + this.dispatchExecutor = + IoTDBThreadPoolFactory.newCachedThreadPool( + ThreadName.FRAGMENT_INSTANCE_DISPATCH.getName(), + dispatchThreadNum, + dispatchThreadNum, + new ThreadPoolExecutor.CallerRunsPolicy()); } private ExecutionResult execution( long queryId, SessionInfo session, String sql, + boolean userQuery, BiFunction iQueryExecutionFactory) { long startTime = System.currentTimeMillis(); QueryId globalQueryId = queryIdGenerator.createNextQueryId(); @@ -126,6 +140,7 @@ private ExecutionResult execution( session, DataNodeEndPoints.LOCAL_HOST_DATA_BLOCK_ENDPOINT, DataNodeEndPoints.LOCAL_HOST_INTERNAL_ENDPOINT); + queryContext.setUserQuery(userQuery); IQueryExecution execution = iQueryExecutionFactory.apply(queryContext, startTime); if (execution.isQuery()) { queryExecutionMap.put(queryId, execution); @@ -144,14 +159,7 @@ private ExecutionResult execution( if (queryContext != null) { queryContext.releaseAllMemoryReservedForFrontEnd(); } - if (queryContext != null && !queryContext.getAcquiredLockNumMap().isEmpty()) { - Map lockMap = queryContext.getAcquiredLockNumMap(); - for (Map.Entry entry : lockMap.entrySet()) { - for (int i = 0; i < entry.getValue(); i++) { - DataNodeSchemaLockManager.getInstance().releaseReadLock(entry.getKey()); - } - } - } + DataNodeSchemaLockManager.getInstance().releaseReadLock(queryContext); } } @@ -164,7 +172,7 @@ public ExecutionResult executeForTreeModel( IPartitionFetcher partitionFetcher, ISchemaFetcher schemaFetcher) { return executeForTreeModel( - statement, queryId, session, sql, partitionFetcher, schemaFetcher, Long.MAX_VALUE); + statement, queryId, session, sql, partitionFetcher, schemaFetcher, Long.MAX_VALUE, false); } public ExecutionResult executeForTreeModel( @@ -174,11 +182,13 @@ public ExecutionResult executeForTreeModel( String sql, IPartitionFetcher partitionFetcher, ISchemaFetcher schemaFetcher, - long timeOut) { + long timeOut, + boolean userQuery) { return execution( queryId, session, sql, + userQuery, ((queryContext, startTime) -> createQueryExecutionForTreeModel( statement, @@ -263,14 +273,28 @@ public void cleanupQueryExecution( LOGGER.debug("[CleanUpQuery]]"); queryExecution.stopAndCleanup(t); queryExecutionMap.remove(queryId); - if (queryExecution.isQuery()) { + if (queryExecution.isQuery() && queryExecution.isUserQuery()) { long costTime = queryExecution.getTotalExecutionTime(); + // print slow query if (costTime / 1_000_000 >= CONFIG.getSlowQueryThreshold()) { SLOW_SQL_LOGGER.info( "Cost: {} ms, {}", costTime / 1_000_000, getContentOfRequest(nativeApiRequest, queryExecution)); } + + // only sample successful query + if (t == null && COMMON_CONFIG.isEnableQuerySampling()) { // sampling is enabled + String queryRequest = getContentOfRequest(nativeApiRequest, queryExecution); + if (COMMON_CONFIG.isQuerySamplingHasRateLimit()) { + if (COMMON_CONFIG.getQuerySamplingRateLimiter().tryAcquire(queryRequest.length())) { + SAMPLED_QUERIES_LOGGER.info(queryRequest); + } + } else { + // no limit, always sampled + SAMPLED_QUERIES_LOGGER.info(queryRequest); + } + } } } } @@ -303,4 +327,8 @@ public long getTotalExecutionTime(long queryId) { } return -1L; } + + public ExecutorService getDispatchExecutor() { + return dispatchExecutor; + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/Analysis.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/Analysis.java index 94a1e518b74ba..c3f31fa8a3b45 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/Analysis.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/Analysis.java @@ -25,6 +25,7 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.common.rpc.thrift.TSchemaNode; import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; +import org.apache.iotdb.commons.model.ModelInformation; import org.apache.iotdb.commons.partition.DataPartition; import org.apache.iotdb.commons.partition.SchemaPartition; import org.apache.iotdb.commons.path.PartialPath; @@ -49,6 +50,7 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.parameter.GroupByTimeParameter; import org.apache.iotdb.db.queryengine.plan.planner.plan.parameter.IntoPathDescriptor; import org.apache.iotdb.db.queryengine.plan.planner.plan.parameter.OrderByParameter; +import org.apache.iotdb.db.queryengine.plan.planner.plan.parameter.model.ModelInferenceDescriptor; import org.apache.iotdb.db.queryengine.plan.statement.Statement; import org.apache.iotdb.db.queryengine.plan.statement.StatementType; import org.apache.iotdb.db.queryengine.plan.statement.component.Ordering; @@ -109,6 +111,10 @@ public class Analysis implements IAnalysis { // map from device name to series/aggregation under this device private Set sourceExpressions; + // In order to perform some optimization, when the source expression is + // not used later, nothing will be placed in this structure. + private boolean shouldHaveSourceExpression; + // input expressions of aggregations to be calculated private Set sourceTransformExpressions = new HashSet<>(); @@ -229,7 +235,9 @@ aggregation results last_value(temperature) and last_value(status), whereas buck // Key: non-writable view expression, Value: corresponding source expressions private Map> lastQueryNonWritableViewSourceExpressionMap; - private Set lastQueryBaseExpressions; + private Map> lastQueryOutputPathToSourceExpressionMap; + + private Set deviceExistViewSet; // header of result dataset private DatasetHeader respDatasetHeader; @@ -237,6 +245,8 @@ aggregation results last_value(temperature) and last_value(status), whereas buck // indicate whether the Nodes produce source data are VirtualSourceNodes private boolean isVirtualSource = false; + private ModelInferenceDescriptor modelInferenceDescriptor; + ///////////////////////////////////////////////////////////////////////////////////////////////// // SELECT INTO Analysis ///////////////////////////////////////////////////////////////////////////////////////////////// @@ -606,6 +616,14 @@ public void setSourceExpressions(Set sourceExpressions) { this.sourceExpressions = sourceExpressions; } + public void setShouldHaveSourceExpression(boolean shouldHaveSourceExpression) { + this.shouldHaveSourceExpression = shouldHaveSourceExpression; + } + + public boolean shouldHaveSourceExpression() { + return shouldHaveSourceExpression; + } + public Set getSourceTransformExpressions() { return sourceTransformExpressions; } @@ -871,12 +889,21 @@ public void setTimeseriesOrderingForLastQuery(Ordering timeseriesOrderingForLast this.timeseriesOrderingForLastQuery = timeseriesOrderingForLastQuery; } - public Set getLastQueryBaseExpressions() { - return this.lastQueryBaseExpressions; + public Map> getLastQueryOutputPathToSourceExpressionMap() { + return lastQueryOutputPathToSourceExpressionMap; } - public void setLastQueryBaseExpressions(Set lastQueryBaseExpressions) { - this.lastQueryBaseExpressions = lastQueryBaseExpressions; + public void setLastQueryOutputPathToSourceExpressionMap( + Map> lastQueryOutputPathToSourceExpressionMap) { + this.lastQueryOutputPathToSourceExpressionMap = lastQueryOutputPathToSourceExpressionMap; + } + + public Set getDeviceExistViewSet() { + return deviceExistViewSet; + } + + public void setDeviceExistViewSet(Set deviceExistViewSet) { + this.deviceExistViewSet = deviceExistViewSet; } public Map> getLastQueryNonWritableViewSourceExpressionMap() { @@ -892,6 +919,21 @@ public Map getOutputDeviceToQueriedDevicesMap() { return outputDeviceToQueriedDevicesMap; } + public ModelInferenceDescriptor getModelInferenceDescriptor() { + return modelInferenceDescriptor; + } + + public ModelInformation getModelInformation() { + if (modelInferenceDescriptor == null) { + return null; + } + return modelInferenceDescriptor.getModelInformation(); + } + + public void setModelInferenceDescriptor(ModelInferenceDescriptor modelInferenceDescriptor) { + this.modelInferenceDescriptor = modelInferenceDescriptor; + } + public void setOutputDeviceToQueriedDevicesMap( Map outputDeviceToQueriedDevicesMap) { this.outputDeviceToQueriedDevicesMap = outputDeviceToQueriedDevicesMap; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeVisitor.java index 6071f29edea6f..dab3163920588 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeVisitor.java @@ -20,12 +20,14 @@ package org.apache.iotdb.db.queryengine.plan.analyze; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; import org.apache.iotdb.commons.client.exception.ClientManagerException; import org.apache.iotdb.commons.conf.IoTDBConstant; import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.exception.IoTDBException; import org.apache.iotdb.commons.exception.MetadataException; +import org.apache.iotdb.commons.model.ModelInformation; import org.apache.iotdb.commons.partition.DataPartition; import org.apache.iotdb.commons.partition.DataPartitionQueryParam; import org.apache.iotdb.commons.partition.SchemaNodeManagementPartition; @@ -41,6 +43,7 @@ import org.apache.iotdb.confignode.rpc.thrift.TGetDataNodeLocationsResp; import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; +import org.apache.iotdb.db.exception.ainode.GetModelInfoException; import org.apache.iotdb.db.exception.metadata.template.TemplateIncompatibleException; import org.apache.iotdb.db.exception.metadata.view.UnsupportedViewException; import org.apache.iotdb.db.exception.sql.SemanticException; @@ -59,8 +62,16 @@ import org.apache.iotdb.db.queryengine.common.schematree.IMeasurementSchemaInfo; import org.apache.iotdb.db.queryengine.common.schematree.ISchemaTree; import org.apache.iotdb.db.queryengine.execution.operator.window.WindowType; +import org.apache.iotdb.db.queryengine.execution.operator.window.ainode.BottomInferenceWindowParameter; +import org.apache.iotdb.db.queryengine.execution.operator.window.ainode.CountInferenceWindow; +import org.apache.iotdb.db.queryengine.execution.operator.window.ainode.CountInferenceWindowParameter; +import org.apache.iotdb.db.queryengine.execution.operator.window.ainode.HeadInferenceWindow; +import org.apache.iotdb.db.queryengine.execution.operator.window.ainode.InferenceWindow; +import org.apache.iotdb.db.queryengine.execution.operator.window.ainode.InferenceWindowParameter; +import org.apache.iotdb.db.queryengine.execution.operator.window.ainode.InferenceWindowType; +import org.apache.iotdb.db.queryengine.execution.operator.window.ainode.TailInferenceWindow; import org.apache.iotdb.db.queryengine.metric.QueryPlanCostMetricSet; -import org.apache.iotdb.db.queryengine.metric.load.LoadTsFileCostMetricsSet; +import org.apache.iotdb.db.queryengine.plan.analyze.load.LoadTsFileAnalyzer; import org.apache.iotdb.db.queryengine.plan.analyze.lock.DataNodeSchemaLockManager; import org.apache.iotdb.db.queryengine.plan.analyze.lock.SchemaLockType; import org.apache.iotdb.db.queryengine.plan.analyze.schema.ISchemaFetcher; @@ -72,6 +83,7 @@ import org.apache.iotdb.db.queryengine.plan.expression.leaf.ConstantOperand; import org.apache.iotdb.db.queryengine.plan.expression.leaf.TimeSeriesOperand; import org.apache.iotdb.db.queryengine.plan.expression.multi.FunctionExpression; +import org.apache.iotdb.db.queryengine.plan.expression.visitor.ExistUnknownTypeInExpression; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.metedata.write.MeasurementGroup; import org.apache.iotdb.db.queryengine.plan.planner.plan.parameter.DeviceViewIntoPathDescriptor; import org.apache.iotdb.db.queryengine.plan.planner.plan.parameter.FillDescriptor; @@ -174,6 +186,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.TreeMap; import java.util.function.UnaryOperator; import java.util.stream.Collectors; @@ -187,9 +200,9 @@ import static org.apache.iotdb.db.queryengine.common.header.ColumnHeaderConstant.ENDTIME; import static org.apache.iotdb.db.queryengine.metric.QueryPlanCostMetricSet.PARTITION_FETCHER; import static org.apache.iotdb.db.queryengine.metric.QueryPlanCostMetricSet.SCHEMA_FETCHER; -import static org.apache.iotdb.db.queryengine.metric.load.LoadTsFileCostMetricsSet.ANALYSIS; import static org.apache.iotdb.db.queryengine.plan.analyze.ExpressionAnalyzer.bindSchemaForExpression; import static org.apache.iotdb.db.queryengine.plan.analyze.ExpressionAnalyzer.concatDeviceAndBindSchemaForExpression; +import static org.apache.iotdb.db.queryengine.plan.analyze.ExpressionAnalyzer.concatDeviceAndBindSchemaForHaving; import static org.apache.iotdb.db.queryengine.plan.analyze.ExpressionAnalyzer.getMeasurementExpression; import static org.apache.iotdb.db.queryengine.plan.analyze.ExpressionAnalyzer.normalizeExpression; import static org.apache.iotdb.db.queryengine.plan.analyze.ExpressionAnalyzer.searchAggregationExpressions; @@ -199,8 +212,10 @@ import static org.apache.iotdb.db.queryengine.plan.analyze.SelectIntoUtils.constructTargetDevice; import static org.apache.iotdb.db.queryengine.plan.analyze.SelectIntoUtils.constructTargetMeasurement; import static org.apache.iotdb.db.queryengine.plan.analyze.SelectIntoUtils.constructTargetPath; +import static org.apache.iotdb.db.queryengine.plan.analyze.SelectIntoUtils.constructTargetPathWithoutPlaceHolder; import static org.apache.iotdb.db.queryengine.plan.optimization.LimitOffsetPushDown.canPushDownLimitOffsetInGroupByTimeForDevice; import static org.apache.iotdb.db.queryengine.plan.optimization.LimitOffsetPushDown.pushDownLimitOffsetInGroupByTimeForDevice; +import static org.apache.iotdb.db.queryengine.plan.parser.ASTVisitor.parseNodeString; import static org.apache.iotdb.db.schemaengine.schemaregion.view.visitor.GetSourcePathsVisitor.getSourcePaths; import static org.apache.iotdb.db.utils.constant.SqlConstant.COUNT_TIME_HEADER; @@ -217,11 +232,14 @@ public class AnalyzeVisitor extends StatementVisitor public static final Expression END_TIME_EXPRESSION = TimeSeriesOperand.constructColumnHeaderExpression(ENDTIME, TSDataType.INT64); + private static final String INFERENCE_COLUMN_NAME = "output"; + private final List lastQueryColumnNames = new ArrayList<>(Arrays.asList("TIME", "TIMESERIES", "VALUE", "DATATYPE")); private final IPartitionFetcher partitionFetcher; private final ISchemaFetcher schemaFetcher; + private final IModelFetcher modelFetcher; private static final PerformanceOverviewMetrics PERFORMANCE_OVERVIEW_METRICS = PerformanceOverviewMetrics.getInstance(); @@ -229,6 +247,7 @@ public class AnalyzeVisitor extends StatementVisitor public AnalyzeVisitor(IPartitionFetcher partitionFetcher, ISchemaFetcher schemaFetcher) { this.partitionFetcher = partitionFetcher; this.schemaFetcher = schemaFetcher; + this.modelFetcher = ModelFetcher.getInstance(); } @Override @@ -268,6 +287,9 @@ public Analysis visitQuery(QueryStatement queryStatement, MPPQueryContext contex // check for semantic errors queryStatement.semanticCheck(); + // fetch model inference information and check + analyzeModelInference(analysis, queryStatement); + ISchemaTree schemaTree = analyzeSchema(queryStatement, analysis, context); // If there is no leaf node in the schema tree, the query should be completed immediately @@ -279,21 +301,27 @@ public Analysis visitQuery(QueryStatement queryStatement, MPPQueryContext contex analyzeGlobalTimeFilter(analysis, queryStatement); if (queryStatement.isLastQuery()) { + context.generateGlobalTimeFilter(analysis); return analyzeLastQuery(queryStatement, analysis, schemaTree, context); } List> outputExpressions; if (queryStatement.isAlignByDevice()) { - if (TemplatedAnalyze.canBuildPlanUseTemplate( - analysis, queryStatement, partitionFetcher, schemaTree, context)) { + List deviceList = analyzeFrom(queryStatement, schemaTree); + + if (deviceList.size() > 1 + && TemplatedAnalyze.canBuildPlanUseTemplate( + analysis, queryStatement, partitionFetcher, schemaTree, context, deviceList)) { + // when device size is less than 1, there is no need to use template optimization, i.e. no + // need to extract common variables return analysis; } - List deviceList = analyzeFrom(queryStatement, schemaTree); - if (canPushDownLimitOffsetInGroupByTimeForDevice(queryStatement)) { // remove the device which won't appear in resultSet after limit/offset - deviceList = pushDownLimitOffsetInGroupByTimeForDevice(deviceList, queryStatement); + deviceList = + pushDownLimitOffsetInGroupByTimeForDevice( + deviceList, queryStatement, context.getZoneId()); } outputExpressions = @@ -397,6 +425,77 @@ public Analysis visitQuery(QueryStatement queryStatement, MPPQueryContext contex return analysis; } + // check if there is proper model to inference for MODEL_NAME, there is no need to do the + // following analyze if there isn't. + private void analyzeModelInference(Analysis analysis, QueryStatement queryStatement) { + if (!queryStatement.hasModelInference()) { + return; + } + + // Get model metadata from configNode and do some check + String modelId = queryStatement.getModelName(); + TSStatus status = modelFetcher.fetchModel(modelId, analysis); + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + throw new GetModelInfoException(status.getMessage()); + } + ModelInformation modelInformation = analysis.getModelInformation(); + if (modelInformation == null || !modelInformation.available()) { + throw new SemanticException("Model " + modelId + " is not active"); + } + + // set inference window if there is + if (queryStatement.isSetInferenceWindow()) { + InferenceWindow window = queryStatement.getInferenceWindow(); + if (InferenceWindowType.HEAD == window.getType()) { + long windowSize = ((HeadInferenceWindow) window).getWindowSize(); + checkWindowSize(windowSize, modelInformation); + if (queryStatement.hasLimit() && queryStatement.getRowLimit() < windowSize) { + throw new SemanticException( + "Limit in Sql should be larger than window size in inference"); + } + // optimize head window by limitNode + queryStatement.setRowLimit(windowSize); + } else if (InferenceWindowType.TAIL == window.getType()) { + long windowSize = ((TailInferenceWindow) window).getWindowSize(); + checkWindowSize(windowSize, modelInformation); + InferenceWindowParameter inferenceWindowParameter = + new BottomInferenceWindowParameter(windowSize); + analysis + .getModelInferenceDescriptor() + .setInferenceWindowParameter(inferenceWindowParameter); + } else if (InferenceWindowType.COUNT == window.getType()) { + CountInferenceWindow countInferenceWindow = (CountInferenceWindow) window; + checkWindowSize(countInferenceWindow.getInterval(), modelInformation); + InferenceWindowParameter inferenceWindowParameter = + new CountInferenceWindowParameter( + countInferenceWindow.getInterval(), countInferenceWindow.getStep()); + analysis + .getModelInferenceDescriptor() + .setInferenceWindowParameter(inferenceWindowParameter); + } + } + + // set inference attributes if there is + if (queryStatement.hasInferenceAttributes()) { + analysis + .getModelInferenceDescriptor() + .setInferenceAttributes(queryStatement.getInferenceAttributes()); + } + } + + private void checkWindowSize(long windowSize, ModelInformation modelInformation) { + if (modelInformation.isBuiltIn()) { + return; + } + + if (modelInformation.getInputShape()[0] != windowSize) { + throw new SemanticException( + String.format( + "Window output %d is not equal to input size of model %d", + windowSize, modelInformation.getInputShape()[0])); + } + } + private ISchemaTree analyzeSchema( QueryStatement queryStatement, Analysis analysis, MPPQueryContext context) { // concat path and construct path pattern tree @@ -436,7 +535,6 @@ private ISchemaTree analyzeSchema( context.setFetchSchemaCost(schemaFetchCost); QueryPlanCostMetricSet.getInstance().recordPlanCost(SCHEMA_FETCHER, schemaFetchCost); } - analysis.setSchemaTree(schemaTree); return schemaTree; } @@ -504,48 +602,93 @@ private Analysis analyzeLastQuery( for (ResultColumn resultColumn : queryStatement.getSelectComponent().getResultColumns()) { selectExpressions.add(resultColumn.getExpression()); } - analyzeLastSource(analysis, selectExpressions, schemaTree, context); - analysis.setRespDatasetHeader(DatasetHeaderFactory.getLastQueryHeader()); - // fetch partition information - analyzeDataPartition(analysis, queryStatement, schemaTree, context); - - return analysis; + return analyzeLastSourceAndDataPartition(analysis, selectExpressions, schemaTree, context); } - private void analyzeLastSource( + private Analysis analyzeLastSourceAndDataPartition( Analysis analysis, List selectExpressions, ISchemaTree schemaTree, MPPQueryContext context) { - Set sourceExpressions = new LinkedHashSet<>(); - Set lastQueryBaseExpressions = new LinkedHashSet<>(); + + // For fetch data partition + Set allDeviceSet = new HashSet<>(); + + // For LogicalPlan + Set deviceExistViewSet = new HashSet<>(); + Map> outputPathToSourceExpressionMap = new LinkedHashMap<>(); Map> lastQueryNonWritableViewSourceExpressionMap = null; + Ordering timeseriesOrdering = analysis.getTimeseriesOrderingForLastQuery(); + + boolean hasAliasView = false; for (Expression selectExpression : selectExpressions) { for (Expression lastQuerySourceExpression : bindSchemaForExpression(selectExpression, schemaTree, context)) { if (lastQuerySourceExpression instanceof TimeSeriesOperand) { - lastQueryBaseExpressions.add(lastQuerySourceExpression); - sourceExpressions.add(lastQuerySourceExpression); - } else { - if (lastQueryNonWritableViewSourceExpressionMap == null) { - lastQueryNonWritableViewSourceExpressionMap = new HashMap<>(); + TimeSeriesOperand timeSeriesOperand = (TimeSeriesOperand) lastQuerySourceExpression; + MeasurementPath outputPath = + (MeasurementPath) + (timeSeriesOperand.isViewExpression() + ? timeSeriesOperand.getViewPath() + : timeSeriesOperand.getPath()); + String actualDeviceID = + ExpressionAnalyzer.getDeviceNameInSourceExpression(timeSeriesOperand); + String outputDeviceID = + timeSeriesOperand.isViewExpression() ? outputPath.getDevice() : actualDeviceID; + if (timeSeriesOperand.isViewExpression()) { + deviceExistViewSet.add(outputDeviceID); + if (!hasAliasView) { + allDeviceSet.addAll(outputPathToSourceExpressionMap.keySet()); + hasAliasView = true; + } + allDeviceSet.add(actualDeviceID); + } else if (hasAliasView) { + allDeviceSet.add(actualDeviceID); } + // If we use actual deviceId, it may overwrite other expression of same measurement in + // Map. + outputPathToSourceExpressionMap + .computeIfAbsent( + outputDeviceID, + k -> + timeseriesOrdering != null + ? new TreeMap<>(timeseriesOrdering.getStringComparator()) + : new LinkedHashMap<>()) + .put(outputPath.getMeasurement(), timeSeriesOperand); + } else { + lastQueryNonWritableViewSourceExpressionMap = + lastQueryNonWritableViewSourceExpressionMap == null + ? new HashMap<>() + : lastQueryNonWritableViewSourceExpressionMap; List sourceExpressionsOfNonWritableView = searchSourceExpressions(lastQuerySourceExpression); lastQueryNonWritableViewSourceExpressionMap.putIfAbsent( lastQuerySourceExpression, sourceExpressionsOfNonWritableView); - sourceExpressions.addAll(sourceExpressionsOfNonWritableView); + for (Expression expression : sourceExpressionsOfNonWritableView) { + allDeviceSet.add(ExpressionAnalyzer.getDeviceNameInSourceExpression(expression)); + } } } } + if (allDeviceSet.isEmpty()) { + allDeviceSet = outputPathToSourceExpressionMap.keySet(); + } else if (!hasAliasView) { + allDeviceSet.addAll(outputPathToSourceExpressionMap.keySet()); + } - analysis.setSourceExpressions(sourceExpressions); - analysis.setLastQueryBaseExpressions(lastQueryBaseExpressions); + analysis.setShouldHaveSourceExpression(!allDeviceSet.isEmpty()); + analysis.setLastQueryOutputPathToSourceExpressionMap(outputPathToSourceExpressionMap); + analysis.setDeviceExistViewSet( + deviceExistViewSet.isEmpty() ? Collections.emptySet() : deviceExistViewSet); analysis.setLastQueryNonWritableViewSourceExpressionMap( lastQueryNonWritableViewSourceExpressionMap); + + DataPartition dataPartition = fetchDataPartitionByDevices(allDeviceSet, schemaTree, context); + analysis.setDataPartitionInfo(dataPartition); + return analysis; } private void updateSchemaTreeByViews( @@ -967,8 +1110,7 @@ private void analyzeHaving( for (PartialPath device : deviceSet) { List expressionsInHaving = - concatDeviceAndBindSchemaForExpression( - havingExpression, device, schemaTree, queryContext); + concatDeviceAndBindSchemaForHaving(havingExpression, device, schemaTree, queryContext); conJunctions.addAll( expressionsInHaving.stream() @@ -981,6 +1123,10 @@ private void analyzeHaving( for (Expression aggregationExpression : searchAggregationExpressions(expression)) { Expression normalizedAggregationExpression = normalizeExpression(aggregationExpression); + if (!new ExistUnknownTypeInExpression().process(aggregationExpression, null).isEmpty()) { + continue; + } + analyzeExpressionType(analysis, aggregationExpression); analyzeExpressionType(analysis, normalizedAggregationExpression); @@ -1574,6 +1720,28 @@ static void analyzeOutput( return; } + if (queryStatement.hasModelInference()) { + ModelInformation modelInformation = analysis.getModelInformation(); + // check input + checkInputShape(modelInformation, outputExpressions); + checkInputType(analysis, modelInformation, outputExpressions); + + // set output + List columnHeaders = new ArrayList<>(); + int[] outputShape = modelInformation.getOutputShape(); + TSDataType[] outputDataType = modelInformation.getOutputDataType(); + for (int i = 0; i < outputShape[1]; i++) { + columnHeaders.add(new ColumnHeader(INFERENCE_COLUMN_NAME + i, outputDataType[i])); + } + analysis + .getModelInferenceDescriptor() + .setOutputColumnNames( + columnHeaders.stream().map(ColumnHeader::getColumnName).collect(Collectors.toList())); + boolean isIgnoreTimestamp = !queryStatement.isGenerateTime(); + analysis.setRespDatasetHeader(new DatasetHeader(columnHeaders, isIgnoreTimestamp)); + return; + } + boolean isIgnoreTimestamp = queryStatement.isAggregationQuery() && !queryStatement.isGroupBy(); List columnHeaders = new ArrayList<>(); if (queryStatement.isAlignByDevice()) { @@ -1592,6 +1760,72 @@ static void analyzeOutput( analysis.setRespDatasetHeader(new DatasetHeader(columnHeaders, isIgnoreTimestamp)); } + // check if the result of SQL matches the input of model + private static void checkInputShape( + ModelInformation modelInformation, List> outputExpressions) { + if (modelInformation.isBuiltIn()) { + modelInformation.setInputColumnSize(outputExpressions.size()); + return; + } + + // check inputShape + int[] inputShape = modelInformation.getInputShape(); + if (inputShape.length != 2) { + throw new SemanticException( + String.format( + "The input shape of model is not correct, the dimension of input shape should be 2, actual dimension is %d", + inputShape.length)); + } + int columnNumber = inputShape[1]; + if (columnNumber != outputExpressions.size()) { + throw new SemanticException( + String.format( + "The column number of SQL result does not match the number of model input [%d] for inference", + columnNumber)); + } + } + + private static void checkInputType( + Analysis analysis, + ModelInformation modelInformation, + List> outputExpressions) { + + if (modelInformation.isBuiltIn()) { + TSDataType[] inputType = new TSDataType[outputExpressions.size()]; + for (int i = 0; i < outputExpressions.size(); i++) { + Expression inputExpression = outputExpressions.get(i).left; + TSDataType inputDataType = analysis.getType(inputExpression); + if (!inputDataType.isNumeric()) { + throw new SemanticException( + String.format( + "The type of SQL result column [%s in %d] should be numeric when inference", + inputDataType, i)); + } + inputType[i] = inputDataType; + } + modelInformation.setInputDataType(inputType); + return; + } + + TSDataType[] inputType = modelInformation.getInputDataType(); + if (inputType.length != modelInformation.getInputShape()[1]) { + throw new SemanticException( + String.format( + "The inputType does not match the input shape [%d] for inference", + modelInformation.getInputShape()[1])); + } + for (int i = 0; i < inputType.length; i++) { + Expression inputExpression = outputExpressions.get(i).left; + TSDataType inputDataType = analysis.getType(inputExpression); + if (inputDataType != inputType[i]) { + throw new SemanticException( + String.format( + "The type of SQL result column [%s in %d] does not match the type of model input [%s] when inference", + inputDataType, i, inputType[i])); + } + } + } + // For last query private void analyzeLastOrderBy(Analysis analysis, QueryStatement queryStatement) { if (!queryStatement.hasOrderBy()) { @@ -2091,7 +2325,11 @@ public static Pair, Pair> getTimePart result.add(timePartitionSlot); // next init timePartitionSlot = new TTimePartitionSlot(endTime); - endTime = endTime + TimePartitionUtils.getTimePartitionInterval(); + // beware of overflow + endTime = + endTime + TimePartitionUtils.getTimePartitionInterval() > endTime + ? endTime + TimePartitionUtils.getTimePartitionInterval() + : Long.MAX_VALUE; } else { index++; if (index < size) { @@ -2162,7 +2400,7 @@ private void analyzeInto( constructTargetMeasurement( sourceDevice.concatNode(sourceColumn.getExpressionString()), measurementTemplate); } else { - targetMeasurement = measurementTemplate; + targetMeasurement = parseNodeString(measurementTemplate); } deviceViewIntoPathDescriptor.specifyTargetDeviceMeasurement( sourceDevice, targetDevice, sourceColumn.getExpressionString(), targetMeasurement); @@ -2233,7 +2471,7 @@ private void analyzeInto( } targetPath = constructTargetPath(sourcePath, deviceTemplate, measurementTemplate); } else { - targetPath = deviceTemplate.concatNode(measurementTemplate); + targetPath = constructTargetPathWithoutPlaceHolder(deviceTemplate, measurementTemplate); } intoPathDescriptor.specifyTargetPath(sourceColumn, viewPath, targetPath); intoPathDescriptor.specifyDeviceAlignment( @@ -2382,7 +2620,7 @@ public Analysis visitCreateTimeseries( analysis.setStatement(createTimeSeriesStatement); checkIsTemplateCompatible( - createTimeSeriesStatement.getPath(), createTimeSeriesStatement.getAlias(), context, true); + createTimeSeriesStatement.getPath(), createTimeSeriesStatement.getAlias(), context); PathPatternTree patternTree = new PathPatternTree(); patternTree.appendFullPath(createTimeSeriesStatement.getPath()); @@ -2394,32 +2632,27 @@ public Analysis visitCreateTimeseries( } private void checkIsTemplateCompatible( - PartialPath timeseriesPath, String alias, MPPQueryContext context, boolean takeLock) { - if (takeLock) { - DataNodeSchemaLockManager.getInstance().takeReadLock(SchemaLockType.TIMESERIES_VS_TEMPLATE); - context.addAcquiredLockNum(SchemaLockType.TIMESERIES_VS_TEMPLATE); - } - Pair templateInfo = - schemaFetcher.checkTemplateSetAndPreSetInfo(timeseriesPath, alias); + final PartialPath timeSeriesPath, final String alias, final MPPQueryContext context) { + DataNodeSchemaLockManager.getInstance() + .takeReadLock(context, SchemaLockType.TIMESERIES_VS_TEMPLATE); + final Pair templateInfo = + schemaFetcher.checkTemplateSetAndPreSetInfo(timeSeriesPath, alias); if (templateInfo != null) { throw new SemanticException( new TemplateIncompatibleException( - timeseriesPath.getFullPath(), templateInfo.left.getName(), templateInfo.right)); + timeSeriesPath.getFullPath(), templateInfo.left.getName(), templateInfo.right)); } } private void checkIsTemplateCompatible( - PartialPath devicePath, - List measurements, - List aliasList, - MPPQueryContext context, - boolean takeLock) { - if (takeLock) { - DataNodeSchemaLockManager.getInstance().takeReadLock(SchemaLockType.TIMESERIES_VS_TEMPLATE); - context.addAcquiredLockNum(SchemaLockType.TIMESERIES_VS_TEMPLATE); - } + final PartialPath devicePath, + final List measurements, + final List aliasList, + final MPPQueryContext context) { + DataNodeSchemaLockManager.getInstance() + .takeReadLock(context, SchemaLockType.TIMESERIES_VS_TEMPLATE); for (int i = 0; i < measurements.size(); i++) { - Pair templateInfo = + final Pair templateInfo = schemaFetcher.checkTemplateSetAndPreSetInfo( devicePath.concatNode(measurements.get(i)), aliasList == null ? null : aliasList.get(i)); @@ -2437,12 +2670,12 @@ private void analyzeSchemaProps(Map props) { if (props == null || props.isEmpty()) { return; } - Map caseChangeMap = new HashMap<>(); - for (String key : props.keySet()) { + final Map caseChangeMap = new HashMap<>(); + for (final String key : props.keySet()) { caseChangeMap.put(key.toLowerCase(Locale.ROOT), key); } - for (Map.Entry caseChangeEntry : caseChangeMap.entrySet()) { - String lowerCaseKey = caseChangeEntry.getKey(); + for (final Map.Entry caseChangeEntry : caseChangeMap.entrySet()) { + final String lowerCaseKey = caseChangeEntry.getKey(); if (!ALLOWED_SCHEMA_PROPS.contains(lowerCaseKey)) { throw new SemanticException( new MetadataException( @@ -2455,11 +2688,11 @@ private void analyzeSchemaProps(Map props) { } } - private void analyzeSchemaProps(List> propsList) { + private void analyzeSchemaProps(final List> propsList) { if (propsList == null) { return; } - for (Map props : propsList) { + for (final Map props : propsList) { analyzeSchemaProps(props); } } @@ -2486,8 +2719,7 @@ public Analysis visitCreateAlignedTimeseries( createAlignedTimeSeriesStatement.getDevicePath(), createAlignedTimeSeriesStatement.getMeasurements(), createAlignedTimeSeriesStatement.getAliasList(), - context, - true); + context); PathPatternTree pathPatternTree = new PathPatternTree(); for (String measurement : createAlignedTimeSeriesStatement.getMeasurements()) { @@ -2514,8 +2746,7 @@ public Analysis visitInternalCreateTimeseries( internalCreateTimeSeriesStatement.getDevicePath(), internalCreateTimeSeriesStatement.getMeasurements(), null, - context, - true); + context); PathPatternTree pathPatternTree = new PathPatternTree(); for (String measurement : internalCreateTimeSeriesStatement.getMeasurements()) { @@ -2533,24 +2764,22 @@ public Analysis visitInternalCreateTimeseries( @Override public Analysis visitInternalCreateMultiTimeSeries( - InternalCreateMultiTimeSeriesStatement internalCreateMultiTimeSeriesStatement, - MPPQueryContext context) { + final InternalCreateMultiTimeSeriesStatement internalCreateMultiTimeSeriesStatement, + final MPPQueryContext context) { context.setQueryType(QueryType.WRITE); - Analysis analysis = new Analysis(); + final Analysis analysis = new Analysis(); analysis.setStatement(internalCreateMultiTimeSeriesStatement); - PathPatternTree pathPatternTree = new PathPatternTree(); - DataNodeSchemaLockManager.getInstance().takeReadLock(SchemaLockType.TIMESERIES_VS_TEMPLATE); - context.addAcquiredLockNum(SchemaLockType.TIMESERIES_VS_TEMPLATE); - for (Map.Entry> entry : + final PathPatternTree pathPatternTree = new PathPatternTree(); + for (final Map.Entry> entry : internalCreateMultiTimeSeriesStatement.getDeviceMap().entrySet()) { checkIsTemplateCompatible( - entry.getKey(), entry.getValue().right.getMeasurements(), null, context, false); + entry.getKey(), entry.getValue().right.getMeasurements(), null, context); pathPatternTree.appendFullPath(entry.getKey().concatNode(ONE_LEVEL_PATH_WILDCARD)); } - SchemaPartition schemaPartitionInfo; + final SchemaPartition schemaPartitionInfo; schemaPartitionInfo = partitionFetcher.getOrCreateSchemaPartition( pathPatternTree, context.getSession().getUserName()); @@ -2559,29 +2788,28 @@ public Analysis visitInternalCreateMultiTimeSeries( } @Override - public Analysis visitCreateMultiTimeseries( - CreateMultiTimeSeriesStatement createMultiTimeSeriesStatement, MPPQueryContext context) { + public Analysis visitCreateMultiTimeSeries( + final CreateMultiTimeSeriesStatement createMultiTimeSeriesStatement, + final MPPQueryContext context) { context.setQueryType(QueryType.WRITE); - Analysis analysis = new Analysis(); + final Analysis analysis = new Analysis(); analysis.setStatement(createMultiTimeSeriesStatement); analyzeSchemaProps(createMultiTimeSeriesStatement.getPropsList()); - List timeseriesPathList = createMultiTimeSeriesStatement.getPaths(); - List aliasList = createMultiTimeSeriesStatement.getAliasList(); + final List timeseriesPathList = createMultiTimeSeriesStatement.getPaths(); + final List aliasList = createMultiTimeSeriesStatement.getAliasList(); - DataNodeSchemaLockManager.getInstance().takeReadLock(SchemaLockType.TIMESERIES_VS_TEMPLATE); - context.addAcquiredLockNum(SchemaLockType.TIMESERIES_VS_TEMPLATE); for (int i = 0; i < timeseriesPathList.size(); i++) { checkIsTemplateCompatible( - timeseriesPathList.get(i), aliasList == null ? null : aliasList.get(i), context, false); + timeseriesPathList.get(i), aliasList == null ? null : aliasList.get(i), context); } - PathPatternTree patternTree = new PathPatternTree(); - for (PartialPath path : createMultiTimeSeriesStatement.getPaths()) { + final PathPatternTree patternTree = new PathPatternTree(); + for (final PartialPath path : createMultiTimeSeriesStatement.getPaths()) { patternTree.appendFullPath(path); } - SchemaPartition schemaPartitionInfo = + final SchemaPartition schemaPartitionInfo = partitionFetcher.getOrCreateSchemaPartition( patternTree, context.getSession().getUserName()); analysis.setSchemaPartitionInfo(schemaPartitionInfo); @@ -2589,7 +2817,7 @@ public Analysis visitCreateMultiTimeseries( } @Override - public Analysis visitAlterTimeseries( + public Analysis visitAlterTimeSeries( AlterTimeSeriesStatement alterTimeSeriesStatement, MPPQueryContext context) { context.setQueryType(QueryType.WRITE); Analysis analysis = new Analysis(); @@ -2791,6 +3019,12 @@ public Analysis visitInsertRowsOfOneDevice( @Override public Analysis visitPipeEnrichedStatement( PipeEnrichedStatement pipeEnrichedStatement, MPPQueryContext context) { + // The LoadTsFileStatement is a special case, it needs isGeneratedByPipe information + // in the analyzer to execute the tsfile-tablet conversion in some cases. + if (pipeEnrichedStatement.getInnerStatement() instanceof LoadTsFileStatement) { + ((LoadTsFileStatement) pipeEnrichedStatement.getInnerStatement()).markIsGeneratedByPipe(); + } + Analysis analysis = pipeEnrichedStatement.getInnerStatement().accept(this, context); // statement may be changed because of logical view @@ -2850,10 +3084,9 @@ private InsertBaseStatement removeLogicalView( public Analysis visitLoadFile(LoadTsFileStatement loadTsFileStatement, MPPQueryContext context) { context.setQueryType(QueryType.WRITE); - final long startTime = System.nanoTime(); - try (final LoadTsfileAnalyzer loadTsfileAnalyzer = - new LoadTsfileAnalyzer(loadTsFileStatement, context, partitionFetcher, schemaFetcher)) { - return loadTsfileAnalyzer.analyzeFileByFile(); + try (final LoadTsFileAnalyzer loadTsfileAnalyzer = + new LoadTsFileAnalyzer(loadTsFileStatement, context, partitionFetcher, schemaFetcher)) { + return loadTsfileAnalyzer.analyzeFileByFile(new Analysis()); } catch (final Exception e) { final String exceptionMessage = String.format( @@ -2865,9 +3098,6 @@ public Analysis visitLoadFile(LoadTsFileStatement loadTsFileStatement, MPPQueryC analysis.setFinishQueryAfterAnalyze(true); analysis.setFailStatus(RpcUtils.getStatus(TSStatusCode.LOAD_FILE_ERROR, exceptionMessage)); return analysis; - } finally { - LoadTsFileCostMetricsSet.getInstance() - .recordPhaseTimeCost(ANALYSIS, System.nanoTime() - startTime); } } @@ -3001,13 +3231,12 @@ public Analysis visitShowTimeSeries( updateSchemaTreeByViews(analysis, schemaTree, context); logger.debug("[EndFetchSchema]]"); - analyzeLastSource( + analyzeLastSourceAndDataPartition( analysis, Collections.singletonList( new TimeSeriesOperand(showTimeSeriesStatement.getPathPattern())), schemaTree, context); - analyzeDataPartition(analysis, new QueryStatement(), schemaTree, context); } analysis.setRespDatasetHeader(DatasetHeaderFactory.getShowTimeSeriesHeader()); @@ -3380,6 +3609,7 @@ public Analysis visitDeleteData( } } analysis.setSchemaTree(schemaTree); + context.setReleaseSchemaTreeAfterAnalyzing(false); Map> sgNameToQueryParamsMap = new HashMap<>(); @@ -3876,10 +4106,10 @@ private Pair, PartialPath> findAllViewsInPaths( } private void checkTargetPathsInCreateLogicalView( - Analysis analysis, - CreateLogicalViewStatement createLogicalViewStatement, - MPPQueryContext context) { - Pair checkResult = createLogicalViewStatement.checkTargetPaths(); + final Analysis analysis, + final CreateLogicalViewStatement createLogicalViewStatement, + final MPPQueryContext context) { + final Pair checkResult = createLogicalViewStatement.checkTargetPaths(); if (Boolean.FALSE.equals(checkResult.left)) { analysis.setFinishQueryAfterAnalyze(true); analysis.setFailStatus( @@ -3890,10 +4120,10 @@ private void checkTargetPathsInCreateLogicalView( } // Make sure there are no redundant paths in targets. Note that redundant paths in source // are legal. - List targetPathList = createLogicalViewStatement.getTargetPathList(); - Set targetStringSet = new HashSet<>(); - for (PartialPath path : targetPathList) { - boolean repeatPathNotExist = targetStringSet.add(path.toString()); + final List targetPathList = createLogicalViewStatement.getTargetPathList(); + final Set targetStringSet = new HashSet<>(); + for (final PartialPath path : targetPathList) { + final boolean repeatPathNotExist = targetStringSet.add(path.toString()); if (!repeatPathNotExist) { analysis.setFinishQueryAfterAnalyze(true); analysis.setFailStatus( @@ -3905,12 +4135,10 @@ private void checkTargetPathsInCreateLogicalView( } // Make sure all paths are not under any templates try { - DataNodeSchemaLockManager.getInstance().takeReadLock(SchemaLockType.TIMESERIES_VS_TEMPLATE); - context.addAcquiredLockNum(SchemaLockType.TIMESERIES_VS_TEMPLATE); - for (PartialPath path : createLogicalViewStatement.getTargetPathList()) { - checkIsTemplateCompatible(path, null, context, false); + for (final PartialPath path : createLogicalViewStatement.getTargetPathList()) { + checkIsTemplateCompatible(path, null, context); } - } catch (Exception e) { + } catch (final Exception e) { analysis.setFinishQueryAfterAnalyze(true); analysis.setFailStatus( RpcUtils.getStatus( @@ -3921,14 +4149,14 @@ private void checkTargetPathsInCreateLogicalView( @Override public Analysis visitShowLogicalView( - ShowLogicalViewStatement showLogicalViewStatement, MPPQueryContext context) { + final ShowLogicalViewStatement showLogicalViewStatement, final MPPQueryContext context) { context.setQueryType(QueryType.READ); - Analysis analysis = new Analysis(); + final Analysis analysis = new Analysis(); analysis.setStatement(showLogicalViewStatement); - PathPatternTree patternTree = new PathPatternTree(); + final PathPatternTree patternTree = new PathPatternTree(); patternTree.appendPathPattern(showLogicalViewStatement.getPathPattern()); - SchemaPartition schemaPartitionInfo = partitionFetcher.getSchemaPartition(patternTree); + final SchemaPartition schemaPartitionInfo = partitionFetcher.getSchemaPartition(patternTree); analysis.setSchemaPartitionInfo(schemaPartitionInfo); analysis.setRespDatasetHeader(DatasetHeaderFactory.getShowLogicalViewHeader()); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/Analyzer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/Analyzer.java index f7ca04f7d0535..3dfaa0a1137b2 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/Analyzer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/Analyzer.java @@ -43,8 +43,23 @@ public Analyzer( public Analysis analyze(Statement statement) { long startTime = System.nanoTime(); - Analysis analysis = - new AnalyzeVisitor(partitionFetcher, schemaFetcher).process(statement, context); + AnalyzeVisitor visitor = new AnalyzeVisitor(partitionFetcher, schemaFetcher); + Analysis analysis = null; + context.setReserveMemoryForSchemaTreeFunc( + mem -> { + context.reserveMemoryForFrontEnd(mem); + // For temporary and independently counted memory, we need process it immediately + context.reserveMemoryForFrontEndImmediately(); + }); + try { + analysis = visitor.process(statement, context); + } finally { + if (analysis != null && context.releaseSchemaTreeAfterAnalyzing()) { + analysis.setSchemaTree(null); + context.releaseMemoryForSchemaTree(); + } + context.setReserveMemoryForSchemaTreeFunc(null); + } if (statement.isQuery()) { QueryPlanCostMetricSet.getInstance().recordPlanCost(ANALYZER, System.nanoTime() - startTime); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ClusterPartitionFetcher.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ClusterPartitionFetcher.java index e76973c516b3d..ff19dd79cf58e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ClusterPartitionFetcher.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ClusterPartitionFetcher.java @@ -56,9 +56,10 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.LinkedList; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -415,15 +416,21 @@ private TDataPartitionReq constructDataPartitionReqForQuery( private SchemaPartition parseSchemaPartitionTableResp( TSchemaPartitionTableResp schemaPartitionTableResp) { Map> regionReplicaMap = new HashMap<>(); - for (Map.Entry> entry1 : + for (final Map.Entry> entry1 : schemaPartitionTableResp.getSchemaPartitionTable().entrySet()) { - Map result1 = - regionReplicaMap.computeIfAbsent(entry1.getKey(), k -> new HashMap<>()); - for (Map.Entry entry2 : - entry1.getValue().entrySet()) { - TSeriesPartitionSlot seriesPartitionSlot = entry2.getKey(); - TConsensusGroupId consensusGroupId = entry2.getValue(); - result1.put(seriesPartitionSlot, partitionCache.getRegionReplicaSet(consensusGroupId)); + String database = entry1.getKey(); + final Map result1 = + regionReplicaMap.computeIfAbsent(database, k -> new HashMap<>()); + + Map orderedMap = + new LinkedHashMap<>(entry1.getValue()); + List orderedGroupIds = new ArrayList<>(orderedMap.values()); + List regionReplicaSets = + partitionCache.getRegionReplicaSet(orderedGroupIds); + + int index = 0; + for (Map.Entry entry2 : orderedMap.entrySet()) { + result1.put(entry2.getKey(), regionReplicaSets.get(index++)); } } @@ -443,6 +450,29 @@ private SchemaNodeManagementPartition parseSchemaNodeManagementPartitionResp( } private DataPartition parseDataPartitionResp(TDataPartitionTableResp dataPartitionTableResp) { + final Set uniqueConsensusGroupIds = new HashSet<>(); + for (final Map< + String, Map>>> + partitionTable : Collections.singleton(dataPartitionTableResp.getDataPartitionTable())) { + for (final Map>> + seriesPartitionMap : partitionTable.values()) { + for (final Map> timePartitionMap : + seriesPartitionMap.values()) { + for (final List consensusGroupIds : timePartitionMap.values()) { + uniqueConsensusGroupIds.addAll(consensusGroupIds); + } + } + } + } + + final List allRegionReplicaSets = + partitionCache.getRegionReplicaSet(new ArrayList<>(uniqueConsensusGroupIds)); + final List consensusGroupIds = new ArrayList<>(uniqueConsensusGroupIds); + final Map regionReplicaSetMap = new HashMap<>(); + for (int i = 0; i < allRegionReplicaSets.size(); i++) { + regionReplicaSetMap.put(consensusGroupIds.get(i), allRegionReplicaSets.get(i)); + } + Map>>> regionReplicaSet = new HashMap<>(); for (Map.Entry< @@ -456,9 +486,9 @@ private DataPartition parseDataPartitionResp(TDataPartitionTableResp dataPartiti result1.computeIfAbsent(entry2.getKey(), k -> new HashMap<>()); for (Map.Entry> entry3 : entry2.getValue().entrySet()) { - List regionReplicaSets = new LinkedList<>(); - for (TConsensusGroupId consensusGroupId : entry3.getValue()) { - regionReplicaSets.add(partitionCache.getRegionReplicaSet(consensusGroupId)); + final List regionReplicaSets = new ArrayList<>(); + for (TConsensusGroupId groupId : entry3.getValue()) { + regionReplicaSets.add(regionReplicaSetMap.get(groupId)); } result2.put(entry3.getKey(), regionReplicaSets); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ExpressionAnalyzer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ExpressionAnalyzer.java index fa29e4a20f87a..969497946114f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ExpressionAnalyzer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ExpressionAnalyzer.java @@ -52,6 +52,7 @@ import org.apache.iotdb.db.queryengine.plan.expression.visitor.cartesian.BindSchemaForExpressionVisitor; import org.apache.iotdb.db.queryengine.plan.expression.visitor.cartesian.BindSchemaForPredicateVisitor; import org.apache.iotdb.db.queryengine.plan.expression.visitor.cartesian.ConcatDeviceAndBindSchemaForExpressionVisitor; +import org.apache.iotdb.db.queryengine.plan.expression.visitor.cartesian.ConcatDeviceAndBindSchemaForHavingVisitor; import org.apache.iotdb.db.queryengine.plan.expression.visitor.cartesian.ConcatDeviceAndBindSchemaForPredicateVisitor; import org.apache.iotdb.db.queryengine.plan.expression.visitor.cartesian.ConcatExpressionWithSuffixPathsVisitor; import org.apache.iotdb.db.queryengine.plan.statement.component.ResultColumn; @@ -489,6 +490,18 @@ public static List concatDeviceAndBindSchemaForPredicate( devicePath, schemaTree, isWhere, queryContext)); } + public static List concatDeviceAndBindSchemaForHaving( + final Expression predicate, + final PartialPath devicePath, + final ISchemaTree schemaTree, + final MPPQueryContext queryContext) { + return new ConcatDeviceAndBindSchemaForHavingVisitor() + .process( + predicate, + new ConcatDeviceAndBindSchemaForHavingVisitor.Context( + devicePath, schemaTree, queryContext)); + } + /** * Search for subexpressions that can be queried natively, including all time series. * diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ExpressionUtils.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ExpressionUtils.java index fc9d3a0aa5180..a27e323e1e62b 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ExpressionUtils.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ExpressionUtils.java @@ -69,9 +69,17 @@ public static List reconstructTimeSeriesOperandsWithMemoryCheck( final MPPQueryContext queryContext) { List resultExpressions = new ArrayList<>(); for (PartialPath actualPath : actualPaths) { - resultExpressions.add( - reserveMemoryForExpression( - queryContext, reconstructTimeSeriesOperand(rawExpression, actualPath))); + Expression expression = reconstructTimeSeriesOperand(rawExpression, actualPath); + long memCost; + if (queryContext.useSampledAvgTimeseriesOperandMemCost()) { + memCost = queryContext.getAvgTimeseriesOperandMemCost(); + } else { + memCost = expression.ramBytesUsed(); + queryContext.calculateAvgTimeseriesOperandMemCost(memCost); + } + queryContext.reserveMemoryForFrontEnd(memCost); + + resultExpressions.add(expression); } return resultExpressions; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/IModelFetcher.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/IModelFetcher.java new file mode 100644 index 0000000000000..1feecaefde9c5 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/IModelFetcher.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.queryengine.plan.analyze; + +import org.apache.iotdb.common.rpc.thrift.TSStatus; + +public interface IModelFetcher { + /** Get model information by model id from configNode. */ + TSStatus fetchModel(String modelId, Analysis analysis); +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ModelFetcher.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ModelFetcher.java new file mode 100644 index 0000000000000..8cefb5e0cf3c4 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ModelFetcher.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.queryengine.plan.analyze; + +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.client.IClientManager; +import org.apache.iotdb.commons.client.exception.ClientManagerException; +import org.apache.iotdb.commons.consensus.ConfigRegionId; +import org.apache.iotdb.commons.model.ModelInformation; +import org.apache.iotdb.confignode.rpc.thrift.TGetModelInfoReq; +import org.apache.iotdb.confignode.rpc.thrift.TGetModelInfoResp; +import org.apache.iotdb.db.exception.ainode.ModelNotFoundException; +import org.apache.iotdb.db.exception.sql.StatementAnalyzeException; +import org.apache.iotdb.db.protocol.client.ConfigNodeClient; +import org.apache.iotdb.db.protocol.client.ConfigNodeClientManager; +import org.apache.iotdb.db.protocol.client.ConfigNodeInfo; +import org.apache.iotdb.db.queryengine.plan.planner.plan.parameter.model.ModelInferenceDescriptor; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.apache.thrift.TException; + +public class ModelFetcher implements IModelFetcher { + + private final IClientManager configNodeClientManager = + ConfigNodeClientManager.getInstance(); + + private static final class ModelFetcherHolder { + + private static final ModelFetcher INSTANCE = new ModelFetcher(); + + private ModelFetcherHolder() {} + } + + public static ModelFetcher getInstance() { + return ModelFetcherHolder.INSTANCE; + } + + private ModelFetcher() {} + + @Override + public TSStatus fetchModel(String modelName, Analysis analysis) { + try (ConfigNodeClient client = + configNodeClientManager.borrowClient(ConfigNodeInfo.CONFIG_REGION_ID)) { + TGetModelInfoResp getModelInfoResp = client.getModelInfo(new TGetModelInfoReq(modelName)); + if (getModelInfoResp.getStatus().getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + if (getModelInfoResp.modelInfo != null && getModelInfoResp.isSetAiNodeAddress()) { + analysis.setModelInferenceDescriptor( + new ModelInferenceDescriptor( + getModelInfoResp.aiNodeAddress, + ModelInformation.deserialize(getModelInfoResp.modelInfo))); + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } else { + TSStatus status = new TSStatus(TSStatusCode.GET_MODEL_INFO_ERROR.getStatusCode()); + status.setMessage(String.format("model [%s] is not available", modelName)); + return status; + } + } else { + throw new ModelNotFoundException(getModelInfoResp.getStatus().getMessage()); + } + } catch (ClientManagerException | TException e) { + throw new StatementAnalyzeException(e.getMessage()); + } + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/SelectIntoUtils.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/SelectIntoUtils.java index d0c3617d530f9..2a64f06d18684 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/SelectIntoUtils.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/SelectIntoUtils.java @@ -26,7 +26,6 @@ import org.apache.iotdb.db.queryengine.common.schematree.ISchemaTree; import org.apache.iotdb.db.queryengine.plan.expression.Expression; import org.apache.iotdb.db.queryengine.plan.expression.leaf.TimeSeriesOperand; -import org.apache.iotdb.db.queryengine.plan.parser.ASTVisitor; import org.apache.iotdb.db.utils.TypeInferenceUtils; import org.apache.tsfile.enums.TSDataType; @@ -41,6 +40,7 @@ import static com.google.common.base.Preconditions.checkState; import static org.apache.iotdb.commons.conf.IoTDBConstant.DOUBLE_COLONS; import static org.apache.iotdb.commons.conf.IoTDBConstant.LEVELED_PATH_TEMPLATE_PATTERN; +import static org.apache.iotdb.db.queryengine.plan.parser.ASTVisitor.parseNodeString; public class SelectIntoUtils { @@ -48,6 +48,18 @@ private SelectIntoUtils() { // forbidding instantiation } + public static PartialPath constructTargetPathWithoutPlaceHolder( + PartialPath devicePath, String measurement) { + String[] originalDeviceNodes = devicePath.getNodes(); + String[] resNodes = new String[originalDeviceNodes.length + 1]; + + for (int i = 0; i < originalDeviceNodes.length; i++) { + resNodes[i] = parseNodeString(originalDeviceNodes[i]); + } + resNodes[resNodes.length - 1] = parseNodeString(measurement); + return new MeasurementPath(resNodes); + } + public static PartialPath constructTargetPath( PartialPath sourcePath, PartialPath deviceTemplate, String measurementTemplate) { PartialPath targetDevice = constructTargetDevice(sourcePath.getDevicePath(), deviceTemplate); @@ -108,7 +120,7 @@ private static String applyLevelPlaceholder(String templateNode, String[] source resNode = matcher.replaceFirst(sourceNodes[index]); matcher = LEVELED_PATH_TEMPLATE_PATTERN.matcher(resNode); } - return ASTVisitor.parseNodeString(resNode); + return parseNodeString(resNode); } public static boolean checkIsAllRawSeriesQuery(List expressions) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/TemplatedAggregationAnalyze.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/TemplatedAggregationAnalyze.java index 79f8ba0a8c11a..67837a6b78cd9 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/TemplatedAggregationAnalyze.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/TemplatedAggregationAnalyze.java @@ -53,7 +53,6 @@ import static org.apache.iotdb.db.queryengine.plan.analyze.TemplatedAnalyze.analyzeDataPartition; import static org.apache.iotdb.db.queryengine.plan.analyze.TemplatedAnalyze.analyzeDeviceToWhere; import static org.apache.iotdb.db.queryengine.plan.analyze.TemplatedAnalyze.analyzeDeviceViewOutput; -import static org.apache.iotdb.db.queryengine.plan.analyze.TemplatedAnalyze.analyzeFrom; import static org.apache.iotdb.db.queryengine.plan.optimization.LimitOffsetPushDown.canPushDownLimitOffsetInGroupByTimeForDevice; import static org.apache.iotdb.db.queryengine.plan.optimization.LimitOffsetPushDown.pushDownLimitOffsetInGroupByTimeForDevice; import static org.apache.iotdb.db.utils.constant.SqlConstant.COUNT_TIME; @@ -67,7 +66,8 @@ static boolean canBuildAggregationPlanUseTemplate( IPartitionFetcher partitionFetcher, ISchemaTree schemaTree, MPPQueryContext context, - Template template) { + Template template, + List deviceList) { // not support order by expression and non-aligned template if (queryStatement.hasOrderByExpression() || !template.isDirectAligned()) { @@ -76,11 +76,11 @@ static boolean canBuildAggregationPlanUseTemplate( analysis.setNoWhereAndAggregation(false); - List deviceList = analyzeFrom(queryStatement, schemaTree); - if (canPushDownLimitOffsetInGroupByTimeForDevice(queryStatement)) { // remove the device which won't appear in resultSet after limit/offset - deviceList = pushDownLimitOffsetInGroupByTimeForDevice(deviceList, queryStatement); + deviceList = + pushDownLimitOffsetInGroupByTimeForDevice( + deviceList, queryStatement, context.getZoneId()); } List> outputExpressions = new ArrayList<>(); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/TemplatedAnalyze.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/TemplatedAnalyze.java index 93defcafac707..157ddd723cd1a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/TemplatedAnalyze.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/TemplatedAnalyze.java @@ -96,7 +96,8 @@ public static boolean canBuildPlanUseTemplate( QueryStatement queryStatement, IPartitionFetcher partitionFetcher, ISchemaTree schemaTree, - MPPQueryContext context) { + MPPQueryContext context, + List deviceList) { if (queryStatement.getGroupByComponent() != null || queryStatement.isSelectInto() || queryStatement.hasFill() @@ -113,7 +114,7 @@ public static boolean canBuildPlanUseTemplate( if (queryStatement.isAggregationQuery()) { return canBuildAggregationPlanUseTemplate( - analysis, queryStatement, partitionFetcher, schemaTree, context, template); + analysis, queryStatement, partitionFetcher, schemaTree, context, template, deviceList); } List> outputExpressions = new ArrayList<>(); @@ -174,8 +175,6 @@ public static boolean canBuildPlanUseTemplate( analyzeSelect(queryStatement, analysis, outputExpressions, template); - List deviceList = analyzeFrom(queryStatement, schemaTree); - analyzeDeviceToWhere(analysis, queryStatement); if (analysis.getWhereExpression() != null && analysis.getWhereExpression().equals(ConstantOperand.FALSE)) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/TypeProvider.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/TypeProvider.java index 7c38bbdc4e9c4..ef26583119eb6 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/TypeProvider.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/TypeProvider.java @@ -21,6 +21,7 @@ import org.apache.tsfile.enums.TSDataType; import org.apache.tsfile.utils.ReadWriteIOUtils; +import org.apache.tsfile.write.schema.IMeasurementSchema; import java.io.DataOutputStream; import java.io.IOException; @@ -49,7 +50,12 @@ public TypeProvider(Map typeMap, TemplatedInfo templatedInfo } public TSDataType getType(String symbol) { - return typeMap.get(symbol); + TSDataType type = typeMap.get(symbol); + if (templatedInfo == null || type != null) { + return type; + } + IMeasurementSchema schema = templatedInfo.getSchemaMap().get(symbol); + return schema == null ? null : schema.getType(); } public void setType(String symbol, TSDataType dataType) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/StorageGroupCacheResult.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/DatabaseCacheResult.java similarity index 97% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/StorageGroupCacheResult.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/DatabaseCacheResult.java index 53771fa18710a..52b6fd258a904 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/StorageGroupCacheResult.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/DatabaseCacheResult.java @@ -24,7 +24,7 @@ import java.util.List; import java.util.Map; -public abstract class StorageGroupCacheResult { +public abstract class DatabaseCacheResult { /** the result */ private boolean success = true; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/PartitionCache.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/PartitionCache.java index 56934931b8cc0..2c62d8a67ffea 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/PartitionCache.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/PartitionCache.java @@ -29,6 +29,7 @@ import org.apache.iotdb.commons.client.exception.ClientManagerException; import org.apache.iotdb.commons.consensus.ConfigRegionId; import org.apache.iotdb.commons.exception.IoTDBException; +import org.apache.iotdb.commons.exception.IoTDBRuntimeException; import org.apache.iotdb.commons.exception.MetadataException; import org.apache.iotdb.commons.partition.DataPartition; import org.apache.iotdb.commons.partition.DataPartitionQueryParam; @@ -67,9 +68,9 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -137,8 +138,8 @@ public PartitionCache() { */ public Map> getStorageGroupToDevice( List devicePaths, boolean tryToFetch, boolean isAutoCreate, String userName) { - StorageGroupCacheResult> result = - new StorageGroupCacheResult>() { + DatabaseCacheResult> result = + new DatabaseCacheResult>() { @Override public void put(String device, String storageGroupName) { map.computeIfAbsent(storageGroupName, k -> new ArrayList<>()); @@ -159,8 +160,8 @@ public void put(String device, String storageGroupName) { */ public Map getDeviceToStorageGroup( List devicePaths, boolean tryToFetch, boolean isAutoCreate, String userName) { - StorageGroupCacheResult result = - new StorageGroupCacheResult() { + DatabaseCacheResult result = + new DatabaseCacheResult() { @Override public void put(String device, String storageGroupName) { map.put(device, storageGroupName); @@ -194,13 +195,13 @@ private String getStorageGroupName(String devicePath) { * @param devicePaths the devices that need to hit */ private void fetchStorageGroupAndUpdateCache( - StorageGroupCacheResult result, List devicePaths) + DatabaseCacheResult result, List devicePaths) throws ClientManagerException, TException { storageGroupCacheLock.writeLock().lock(); try (ConfigNodeClient client = configNodeClientManager.borrowClient(ConfigNodeInfo.CONFIG_REGION_ID)) { result.reset(); - getStorageGroupMap(result, devicePaths, true); + getDatabaseMap(result, devicePaths, true); if (!result.isSuccess()) { TGetDatabaseReq req = new TGetDatabaseReq(ROOT_PATH, SchemaConstant.ALL_MATCH_SCOPE_BINARY); TDatabaseSchemaResp storageGroupSchemaResp = client.getMatchedDatabaseSchemas(req); @@ -209,7 +210,7 @@ private void fetchStorageGroupAndUpdateCache( Set storageGroupNames = storageGroupSchemaResp.getDatabaseSchemaMap().keySet(); // update all database into cache updateStorageCache(storageGroupNames); - getStorageGroupMap(result, devicePaths, true); + getDatabaseMap(result, devicePaths, true); } } } finally { @@ -226,7 +227,7 @@ private void fetchStorageGroupAndUpdateCache( * @throws RuntimeException if failed to create database */ private void createStorageGroupAndUpdateCache( - StorageGroupCacheResult result, List devicePaths, String userName) + DatabaseCacheResult result, List devicePaths, String userName) throws ClientManagerException, MetadataException, TException { storageGroupCacheLock.writeLock().lock(); try (ConfigNodeClient client = @@ -234,25 +235,25 @@ private void createStorageGroupAndUpdateCache( // Try to check whether database need to be created result.reset(); // Try to hit database with all missed devices - getStorageGroupMap(result, devicePaths, false); + getDatabaseMap(result, devicePaths, false); if (!result.isSuccess()) { // Try to get database needed to be created from missed device - Set storageGroupNamesNeedCreated = new HashSet<>(); + Set databaseNamesNeedCreated = new HashSet<>(); for (String devicePath : result.getMissedDevices()) { if (devicePath.equals(SchemaConstant.SYSTEM_DATABASE) || devicePath.startsWith(SchemaConstant.SYSTEM_DATABASE + ".")) { - storageGroupNamesNeedCreated.add(SchemaConstant.SYSTEM_DATABASE); + databaseNamesNeedCreated.add(SchemaConstant.SYSTEM_DATABASE); } else { - PartialPath storageGroupNameNeedCreated = + PartialPath databaseNameNeedCreated = MetaUtils.getStorageGroupPathByLevel( new PartialPath(devicePath), config.getDefaultStorageGroupLevel()); - storageGroupNamesNeedCreated.add(storageGroupNameNeedCreated.getFullPath()); + databaseNamesNeedCreated.add(databaseNameNeedCreated.getFullPath()); } } // Try to create databases one by one until done or one database fail - Set successFullyCreatedStorageGroup = new HashSet<>(); - for (String storageGroupName : storageGroupNamesNeedCreated) { + Set successFullyCreatedDatabases = new HashSet<>(); + for (String databaseName : databaseNamesNeedCreated) { long startTime = System.nanoTime(); try { if (!AuthorityChecker.SUPER_USER.equals(userName)) { @@ -270,31 +271,30 @@ private void createStorageGroupAndUpdateCache( PerformanceOverviewMetrics.getInstance().recordAuthCost(System.nanoTime() - startTime); } TDatabaseSchema storageGroupSchema = new TDatabaseSchema(); - storageGroupSchema.setName(storageGroupName); - if (SchemaConstant.SYSTEM_DATABASE.equals(storageGroupName)) { - storageGroupSchema.setSchemaReplicationFactor(1); - storageGroupSchema.setDataReplicationFactor(1); + storageGroupSchema.setName(databaseName); + if (SchemaConstant.SYSTEM_DATABASE.equals(databaseName)) { storageGroupSchema.setMinSchemaRegionGroupNum(1); storageGroupSchema.setMaxSchemaRegionGroupNum(1); storageGroupSchema.setMaxDataRegionGroupNum(1); storageGroupSchema.setMaxDataRegionGroupNum(1); } TSStatus tsStatus = client.setDatabase(storageGroupSchema); - if (TSStatusCode.SUCCESS_STATUS.getStatusCode() == tsStatus.getCode()) { - successFullyCreatedStorageGroup.add(storageGroupName); - } else { + if (TSStatusCode.SUCCESS_STATUS.getStatusCode() == tsStatus.getCode() + || TSStatusCode.DATABASE_ALREADY_EXISTS.getStatusCode() == tsStatus.getCode()) { + successFullyCreatedDatabases.add(databaseName); + } else if (TSStatusCode.DATABASE_CONFLICT.getStatusCode() != tsStatus.getCode()) { // Try to update cache by databases successfully created - updateStorageCache(successFullyCreatedStorageGroup); + updateStorageCache(successFullyCreatedDatabases); logger.warn( "[{} Cache] failed to create database {}", CacheMetrics.STORAGE_GROUP_CACHE_NAME, - storageGroupName); + databaseName); throw new RuntimeException(new IoTDBException(tsStatus.message, tsStatus.code)); } } // Try to update database cache when all databases has already been created - updateStorageCache(storageGroupNamesNeedCreated); - getStorageGroupMap(result, devicePaths, false); + updateStorageCache(databaseNamesNeedCreated); + getDatabaseMap(result, devicePaths, false); } } finally { storageGroupCacheLock.writeLock().unlock(); @@ -308,8 +308,8 @@ private void createStorageGroupAndUpdateCache( * @param devicePaths the devices that need to hit * @param failFast if true, return when failed. if false, return when all devices hit */ - private void getStorageGroupMap( - StorageGroupCacheResult result, List devicePaths, boolean failFast) { + private void getDatabaseMap( + DatabaseCacheResult result, List devicePaths, boolean failFast) { storageGroupCacheLock.readLock().lock(); try { // reset result before try @@ -318,10 +318,12 @@ private void getStorageGroupMap( for (String devicePath : devicePaths) { String storageGroupName = getStorageGroupName(devicePath); if (null == storageGroupName) { - logger.debug( - "[{} Cache] miss when search device {}", - CacheMetrics.STORAGE_GROUP_CACHE_NAME, - devicePath); + if (logger.isDebugEnabled()) { + logger.debug( + "[{} Cache] miss when search device {}", + CacheMetrics.STORAGE_GROUP_CACHE_NAME, + devicePath); + } status = false; if (failFast) { break; @@ -336,10 +338,12 @@ private void getStorageGroupMap( if (!status) { result.setFailed(); } - logger.debug( - "[{} Cache] hit when search device {}", - CacheMetrics.STORAGE_GROUP_CACHE_NAME, - devicePaths); + if (logger.isDebugEnabled()) { + logger.debug( + "[{} Cache] hit when search device {}", + CacheMetrics.STORAGE_GROUP_CACHE_NAME, + devicePaths); + } cacheMetrics.record(status, CacheMetrics.STORAGE_GROUP_CACHE_NAME); } finally { storageGroupCacheLock.readLock().unlock(); @@ -356,7 +360,7 @@ private void getStorageGroupMap( * @param userName */ private void getStorageGroupCacheResult( - StorageGroupCacheResult result, + DatabaseCacheResult result, List devicePaths, boolean tryToFetch, boolean isAutoCreate, @@ -370,7 +374,7 @@ private void getStorageGroupCacheResult( } } // first try to hit database in fast-fail way - getStorageGroupMap(result, devicePaths, true); + getDatabaseMap(result, devicePaths, true); if (!result.isSuccess() && tryToFetch) { try { // try to fetch database from config node when miss @@ -382,9 +386,13 @@ private void getStorageGroupCacheResult( throw new StatementAnalyzeException("Failed to get database Map"); } } - } catch (TException | MetadataException | ClientManagerException e) { + } catch (MetadataException e) { + throw new IoTDBRuntimeException( + "An error occurred when executing getDeviceToDatabase():" + e.getMessage(), + e.getErrorCode()); + } catch (TException | ClientManagerException e) { throw new StatementAnalyzeException( - "An error occurred when executing getDeviceToStorageGroup():" + e.getMessage()); + "An error occurred when executing getDeviceToDatabase():" + e.getMessage(), e); } } } @@ -436,44 +444,53 @@ public void removeFromStorageGroupCache() { /** * get regionReplicaSet from local and confignode * - * @param consensusGroupId the id of consensus group - * @return regionReplicaSet + * @param consensusGroupIds the ids of consensus group + * @return List * @throws RuntimeException if failed to get regionReplicaSet from confignode * @throws StatementAnalyzeException if there are exception when try to get latestRegionRouteMap */ - public TRegionReplicaSet getRegionReplicaSet(TConsensusGroupId consensusGroupId) { - TRegionReplicaSet result; + public List getRegionReplicaSet(List consensusGroupIds) { + if (consensusGroupIds.isEmpty()) { + return Collections.emptyList(); + } + List result; // try to get regionReplicaSet from cache regionReplicaSetLock.readLock().lock(); try { - result = groupIdToReplicaSetMap.get(consensusGroupId); + result = getRegionReplicaSetInternal(consensusGroupIds); } finally { regionReplicaSetLock.readLock().unlock(); } - if (result == null) { + if (result.isEmpty()) { // if not hit then try to get regionReplicaSet from confignode regionReplicaSetLock.writeLock().lock(); try { - // verify that there are not hit in cache - if (!groupIdToReplicaSetMap.containsKey(consensusGroupId)) { + // double check after getting the write lock + result = getRegionReplicaSetInternal(consensusGroupIds); + if (result.isEmpty()) { try (ConfigNodeClient client = configNodeClientManager.borrowClient(ConfigNodeInfo.CONFIG_REGION_ID)) { TRegionRouteMapResp resp = client.getLatestRegionRouteMap(); if (TSStatusCode.SUCCESS_STATUS.getStatusCode() == resp.getStatus().getCode()) { updateGroupIdToReplicaSetMap(resp.getTimestamp(), resp.getRegionRouteMap()); + } else { + logger.warn( + "Unexpected error when getRegionReplicaSet: status {}, regionMap: {}", + resp.getStatus(), + resp.getRegionRouteMap()); } + result = getRegionReplicaSetInternal(consensusGroupIds); // if confignode don't have then will throw RuntimeException - if (!groupIdToReplicaSetMap.containsKey(consensusGroupId)) { + if (result.isEmpty()) { // failed to get RegionReplicaSet from confignode throw new RuntimeException( - "Failed to get replicaSet of consensus group[id= " + consensusGroupId + "]"); + "Failed to get replicaSet of consensus groups[ids= " + consensusGroupIds + "]"); } } catch (ClientManagerException | TException e) { throw new StatementAnalyzeException( "An error occurred when executing getRegionReplicaSet():" + e.getMessage()); } } - result = groupIdToReplicaSetMap.get(consensusGroupId); } finally { regionReplicaSetLock.writeLock().unlock(); } @@ -482,6 +499,20 @@ public TRegionReplicaSet getRegionReplicaSet(TConsensusGroupId consensusGroupId) return result; } + private List getRegionReplicaSetInternal( + List consensusGroupIds) { + List result = new ArrayList<>(consensusGroupIds.size()); + for (TConsensusGroupId groupId : consensusGroupIds) { + TRegionReplicaSet replicaSet = groupIdToReplicaSetMap.get(groupId); + if (replicaSet != null) { + result.add(replicaSet); + } else { + return Collections.emptyList(); + } + } + return result; + } + /** * update regionReplicaSetMap according to timestamp * @@ -543,34 +574,45 @@ public SchemaPartition getSchemaPartition(Map> storageGroup schemaPartitionCache.getIfPresent(storageGroupName); if (null == schemaPartitionTable) { // if database not find, then return cache miss. - logger.debug( - "[{} Cache] miss when search database {}", - CacheMetrics.SCHEMA_PARTITION_CACHE_NAME, - storageGroupName); + if (logger.isDebugEnabled()) { + logger.debug( + "[{} Cache] miss when search database {}", + CacheMetrics.SCHEMA_PARTITION_CACHE_NAME, + storageGroupName); + } cacheMetrics.record(false, CacheMetrics.SCHEMA_PARTITION_CACHE_NAME); return null; } Map map = schemaPartitionTable.getSchemaPartitionMap(); // check cache for each device + List seriesPartitionSlots = new ArrayList<>(entry.getValue().size()); + List consensusGroupIds = new ArrayList<>(entry.getValue().size()); for (String device : entry.getValue()) { TSeriesPartitionSlot seriesPartitionSlot = partitionExecutor.getSeriesPartitionSlot(device); if (!map.containsKey(seriesPartitionSlot)) { // if one device not find, then return cache miss. - logger.debug( - "[{} Cache] miss when search device {}", - CacheMetrics.SCHEMA_PARTITION_CACHE_NAME, - device); + if (logger.isDebugEnabled()) { + logger.debug( + "[{} Cache] miss when search device {}", + CacheMetrics.SCHEMA_PARTITION_CACHE_NAME, + device); + } cacheMetrics.record(false, CacheMetrics.SCHEMA_PARTITION_CACHE_NAME); return null; } - TConsensusGroupId consensusGroupId = map.get(seriesPartitionSlot); - TRegionReplicaSet regionReplicaSet = getRegionReplicaSet(consensusGroupId); - regionReplicaSetMap.put(seriesPartitionSlot, regionReplicaSet); + seriesPartitionSlots.add(seriesPartitionSlot); + consensusGroupIds.add(map.get(seriesPartitionSlot)); + } + List replicaSets = getRegionReplicaSet(consensusGroupIds); + for (int i = 0; i < replicaSets.size(); i++) { + regionReplicaSetMap.put(seriesPartitionSlots.get(i), replicaSets.get(i)); } } - logger.debug("[{} Cache] hit", CacheMetrics.SCHEMA_PARTITION_CACHE_NAME); + if (logger.isDebugEnabled()) { + logger.debug("[{} Cache] hit", CacheMetrics.SCHEMA_PARTITION_CACHE_NAME); + } // cache hit cacheMetrics.record(true, CacheMetrics.SCHEMA_PARTITION_CACHE_NAME); return new SchemaPartition( @@ -644,24 +686,119 @@ public DataPartition getDataPartition( Map> storageGroupToQueryParamsMap) { dataPartitionCacheLock.readLock().lock(); try { - if (storageGroupToQueryParamsMap.size() == 0) { + if (storageGroupToQueryParamsMap.isEmpty()) { cacheMetrics.record(false, CacheMetrics.DATA_PARTITION_CACHE_NAME); return null; } - Map>>> - dataPartitionMap = new HashMap<>(); + + final Set allConsensusGroupIds = new HashSet<>(); + final Map> consensusGroupToTimeSlotMap = + new HashMap<>(); + // check cache for each database for (Map.Entry> entry : storageGroupToQueryParamsMap.entrySet()) { - if (null == entry.getValue() - || entry.getValue().isEmpty() - || !getStorageGroupDataPartition(dataPartitionMap, entry.getKey(), entry.getValue())) { + String databaseName = entry.getKey(); + List params = entry.getValue(); + + if (null == params || params.isEmpty()) { + cacheMetrics.record(false, CacheMetrics.DATA_PARTITION_CACHE_NAME); + return null; + } + + DataPartitionTable dataPartitionTable = dataPartitionCache.getIfPresent(databaseName); + if (null == dataPartitionTable) { + if (logger.isDebugEnabled()) { + logger.debug( + "[{} Cache] miss when search database {}", + CacheMetrics.DATA_PARTITION_CACHE_NAME, + databaseName); + } cacheMetrics.record(false, CacheMetrics.DATA_PARTITION_CACHE_NAME); return null; } + + Map cachedDatabasePartitionMap = + dataPartitionTable.getDataPartitionMap(); + + for (DataPartitionQueryParam param : params) { + TSeriesPartitionSlot seriesPartitionSlot; + if (null != param.getDevicePath()) { + seriesPartitionSlot = partitionExecutor.getSeriesPartitionSlot(param.getDevicePath()); + } else { + return null; + } + + SeriesPartitionTable cachedSeriesPartitionTable = + cachedDatabasePartitionMap.get(seriesPartitionSlot); + if (null == cachedSeriesPartitionTable) { + if (logger.isDebugEnabled()) { + logger.debug( + "[{} Cache] miss when search device {}", + CacheMetrics.DATA_PARTITION_CACHE_NAME, + param.getDevicePath()); + } + cacheMetrics.record(false, CacheMetrics.DATA_PARTITION_CACHE_NAME); + return null; + } + + Map> cachedTimePartitionSlot = + cachedSeriesPartitionTable.getSeriesPartitionMap(); + + if (param.getTimePartitionSlotList().isEmpty()) { + return null; + } + + for (TTimePartitionSlot timePartitionSlot : param.getTimePartitionSlotList()) { + List cacheConsensusGroupIds = + cachedTimePartitionSlot.get(timePartitionSlot); + if (null == cacheConsensusGroupIds + || cacheConsensusGroupIds.isEmpty() + || null == timePartitionSlot) { + if (logger.isDebugEnabled()) { + logger.debug( + "[{} Cache] miss when search time partition {}", + CacheMetrics.DATA_PARTITION_CACHE_NAME, + timePartitionSlot); + } + cacheMetrics.record(false, CacheMetrics.DATA_PARTITION_CACHE_NAME); + return null; + } + + for (TConsensusGroupId groupId : cacheConsensusGroupIds) { + allConsensusGroupIds.add(groupId); + consensusGroupToTimeSlotMap + .computeIfAbsent(groupId, k -> new HashSet<>()) + .add( + new TimeSlotRegionInfo(databaseName, seriesPartitionSlot, timePartitionSlot)); + } + } + } } - logger.debug("[{} Cache] hit", CacheMetrics.DATA_PARTITION_CACHE_NAME); - // cache hit + + final List consensusGroupIds = new ArrayList<>(allConsensusGroupIds); + final List allRegionReplicaSets = getRegionReplicaSet(consensusGroupIds); + + Map>>> + dataPartitionMap = new HashMap<>(); + + for (int i = 0; i < allRegionReplicaSets.size(); i++) { + TConsensusGroupId groupId = consensusGroupIds.get(i); + TRegionReplicaSet replicaSet = allRegionReplicaSets.get(i); + + for (TimeSlotRegionInfo info : consensusGroupToTimeSlotMap.get(groupId)) { + dataPartitionMap + .computeIfAbsent(info.databaseName, k -> new HashMap<>()) + .computeIfAbsent(info.seriesPartitionSlot, k -> new HashMap<>()) + .computeIfAbsent(info.timePartitionSlot, k -> new ArrayList<>()) + .add(replicaSet); + } + } + + if (logger.isDebugEnabled()) { + logger.debug("[{} Cache] hit", CacheMetrics.DATA_PARTITION_CACHE_NAME); + } + cacheMetrics.record(true, CacheMetrics.DATA_PARTITION_CACHE_NAME); return new DataPartition(dataPartitionMap, seriesSlotExecutorName, seriesPartitionSlotNum); } finally { @@ -669,120 +806,39 @@ public DataPartition getDataPartition( } } - /** - * get dataPartition from database - * - * @param dataPartitionMap result - * @param storageGroupName database that need to get - * @param dataPartitionQueryParams specific query params of data partition - * @return whether hit - */ - private boolean getStorageGroupDataPartition( - Map>>> - dataPartitionMap, - String storageGroupName, - List dataPartitionQueryParams) { - DataPartitionTable dataPartitionTable = dataPartitionCache.getIfPresent(storageGroupName); - if (null == dataPartitionTable) { - logger.debug( - "[{} Cache] miss when search database {}", - CacheMetrics.DATA_PARTITION_CACHE_NAME, - storageGroupName); - return false; - } - Map cachedStorageGroupPartitionMap = - dataPartitionTable.getDataPartitionMap(); - Map>> - seriesSlotToTimePartitionMap = - dataPartitionMap.computeIfAbsent(storageGroupName, k -> new HashMap<>()); - // check cache for each device - for (DataPartitionQueryParam dataPartitionQueryParam : dataPartitionQueryParams) { - if (!getDeviceDataPartition( - seriesSlotToTimePartitionMap, dataPartitionQueryParam, cachedStorageGroupPartitionMap)) { - return false; - } + private static class TimeSlotRegionInfo { + final String databaseName; + final TSeriesPartitionSlot seriesPartitionSlot; + final TTimePartitionSlot timePartitionSlot; + + TimeSlotRegionInfo( + String databaseName, + TSeriesPartitionSlot seriesPartitionSlot, + TTimePartitionSlot timePartitionSlot) { + this.databaseName = databaseName; + this.seriesPartitionSlot = seriesPartitionSlot; + this.timePartitionSlot = timePartitionSlot; } - return true; - } - /** - * get dataPartition from device - * - * @param seriesSlotToTimePartitionMap result - * @param dataPartitionQueryParam specific query param of data partition - * @param cachedStorageGroupPartitionMap all cached data partition map of related database - * @return whether hit - */ - private boolean getDeviceDataPartition( - Map>> - seriesSlotToTimePartitionMap, - DataPartitionQueryParam dataPartitionQueryParam, - Map cachedStorageGroupPartitionMap) { - TSeriesPartitionSlot seriesPartitionSlot; - if (null != dataPartitionQueryParam.getDevicePath()) { - seriesPartitionSlot = - partitionExecutor.getSeriesPartitionSlot(dataPartitionQueryParam.getDevicePath()); - } else { - return false; - } - SeriesPartitionTable cachedSeriesPartitionTable = - cachedStorageGroupPartitionMap.get(seriesPartitionSlot); - if (null == cachedSeriesPartitionTable) { - if (logger.isDebugEnabled()) { - logger.debug( - "[{} Cache] miss when search device {}", - CacheMetrics.DATA_PARTITION_CACHE_NAME, - dataPartitionQueryParam.getDevicePath()); - } - return false; - } - Map> cachedTimePartitionSlot = - cachedSeriesPartitionTable.getSeriesPartitionMap(); - Map> timePartitionSlotListMap = - seriesSlotToTimePartitionMap.computeIfAbsent(seriesPartitionSlot, k -> new HashMap<>()); - // Notice: when query all time partition, then miss - if (dataPartitionQueryParam.getTimePartitionSlotList().isEmpty()) { - return false; - } - // check cache for each time partition - for (TTimePartitionSlot timePartitionSlot : - dataPartitionQueryParam.getTimePartitionSlotList()) { - if (!getTimeSlotDataPartition( - timePartitionSlotListMap, timePartitionSlot, cachedTimePartitionSlot)) { + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) { return false; } - } - return true; - } - /** - * get dataPartition from time slot - * - * @param timePartitionSlotListMap result - * @param timePartitionSlot the specific time partition slot of data partition - * @param cachedTimePartitionSlot all cached time slot map of related device - * @return whether hit - */ - private boolean getTimeSlotDataPartition( - Map> timePartitionSlotListMap, - TTimePartitionSlot timePartitionSlot, - Map> cachedTimePartitionSlot) { - List cacheConsensusGroupId = cachedTimePartitionSlot.get(timePartitionSlot); - if (null == cacheConsensusGroupId - || cacheConsensusGroupId.isEmpty() - || null == timePartitionSlot) { - logger.debug( - "[{} Cache] miss when search time partition {}", - CacheMetrics.DATA_PARTITION_CACHE_NAME, - timePartitionSlot); - return false; + TimeSlotRegionInfo that = (TimeSlotRegionInfo) o; + return Objects.equals(databaseName, that.databaseName) + && Objects.equals(seriesPartitionSlot, that.seriesPartitionSlot) + && Objects.equals(timePartitionSlot, that.timePartitionSlot); } - List regionReplicaSets = new LinkedList<>(); - for (TConsensusGroupId consensusGroupId : cacheConsensusGroupId) { - regionReplicaSets.add(getRegionReplicaSet(consensusGroupId)); + + @Override + public int hashCode() { + int result = Objects.hashCode(databaseName); + result = 31 * result + Objects.hashCode(seriesPartitionSlot); + result = 31 * result + Objects.hashCode(timePartitionSlot); + return result; } - timePartitionSlotListMap.put(timePartitionSlot, regionReplicaSets); - return true; } /** @@ -865,12 +921,16 @@ public void invalidAllDataPartitionCache() { // endregion public void invalidAllCache() { - logger.debug("[Partition Cache] invalid"); + if (logger.isDebugEnabled()) { + logger.debug("[Partition Cache] invalid"); + } removeFromStorageGroupCache(); invalidAllDataPartitionCache(); invalidAllSchemaPartitionCache(); invalidReplicaSetCache(); - logger.debug("[Partition Cache] is invalid:{}", this); + if (logger.isDebugEnabled()) { + logger.debug("[Partition Cache] is invalid:{}", this); + } } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/DataNodeTTLCache.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/DataNodeTTLCache.java index 5c7623051599a..095a78b9b2c40 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/DataNodeTTLCache.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/DataNodeTTLCache.java @@ -122,6 +122,15 @@ public long getTTLInMS(String[] path) { } } + public boolean dataInDatabaseMayHaveTTL(String db) throws IllegalPathException { + lock.readLock().lock(); + try { + return ttlCache.dataInDatabaseMayHaveTTL(db); + } finally { + lock.readLock().unlock(); + } + } + /** * Get ttl of one specific path node without time precision conversion. If this node does not set * ttl, then return -1. diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/LastCacheLoadStrategy.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/LastCacheLoadStrategy.java new file mode 100644 index 0000000000000..d9738e7d3044d --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/LastCacheLoadStrategy.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.db.queryengine.plan.relational.metadata.fetcher.cache; + +public enum LastCacheLoadStrategy { + // when a TsFile is loaded, read its data to update LastCache + UPDATE, + // similar to UPDATE, but will invalidate cache of Blob series instead of updating them + UPDATE_NO_BLOB, + // when a TsFile is loaded, clean its included device in LastCache + CLEAN_DEVICE, + // when a TsFile is loaded, clean all LastCache + CLEAN_ALL +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/TimeSeriesSchemaCache.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/TimeSeriesSchemaCache.java index 99d3d17f66608..fadd38c5ae437 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/TimeSeriesSchemaCache.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/schema/TimeSeriesSchemaCache.java @@ -309,7 +309,7 @@ public int updateValue(int index, SchemaCacheEntry value) { if (entry == null) { synchronized (dualKeyCache) { entry = dualKeyCache.get(devicePath, measurements[index]); - if (null == entry) { + if (null == entry && measurementSchemas != null) { entry = new SchemaCacheEntry(database, measurementSchemas[index], null, isAligned); dualKeyCache.put(devicePath, measurements[index], entry); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/LoadTsfileAnalyzer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/load/LoadTsFileAnalyzer.java similarity index 60% rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/LoadTsfileAnalyzer.java rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/load/LoadTsFileAnalyzer.java index b9b229f8705e5..a02393d390699 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/LoadTsfileAnalyzer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/load/LoadTsFileAnalyzer.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.db.queryengine.plan.analyze; +package org.apache.iotdb.db.queryengine.plan.analyze.load; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.auth.AuthException; @@ -28,6 +28,7 @@ import org.apache.iotdb.commons.consensus.ConfigRegionId; import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.path.PartialPath; +import org.apache.iotdb.commons.path.PatternTreeMap; import org.apache.iotdb.commons.schema.SchemaConstant; import org.apache.iotdb.commons.service.metric.PerformanceOverviewMetrics; import org.apache.iotdb.confignode.rpc.thrift.TGetDatabaseReq; @@ -35,10 +36,11 @@ import org.apache.iotdb.db.auth.AuthorityChecker; import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.exception.LoadFileException; -import org.apache.iotdb.db.exception.LoadReadOnlyException; -import org.apache.iotdb.db.exception.LoadRuntimeOutOfMemoryException; -import org.apache.iotdb.db.exception.VerifyMetadataException; +import org.apache.iotdb.db.exception.LoadAnalyzeException; +import org.apache.iotdb.db.exception.LoadAnalyzeTypeMismatchException; +import org.apache.iotdb.db.exception.load.LoadEmptyFileException; +import org.apache.iotdb.db.exception.load.LoadFileException; +import org.apache.iotdb.db.exception.load.LoadRuntimeOutOfMemoryException; import org.apache.iotdb.db.exception.sql.SemanticException; import org.apache.iotdb.db.protocol.client.ConfigNodeClient; import org.apache.iotdb.db.protocol.client.ConfigNodeClientManager; @@ -47,9 +49,9 @@ import org.apache.iotdb.db.queryengine.common.MPPQueryContext; import org.apache.iotdb.db.queryengine.common.schematree.DeviceSchemaInfo; import org.apache.iotdb.db.queryengine.common.schematree.ISchemaTree; -import org.apache.iotdb.db.queryengine.load.LoadTsFileAnalyzeSchemaMemoryBlock; -import org.apache.iotdb.db.queryengine.load.LoadTsFileMemoryManager; import org.apache.iotdb.db.queryengine.plan.Coordinator; +import org.apache.iotdb.db.queryengine.plan.analyze.Analysis; +import org.apache.iotdb.db.queryengine.plan.analyze.IPartitionFetcher; import org.apache.iotdb.db.queryengine.plan.analyze.schema.ISchemaFetcher; import org.apache.iotdb.db.queryengine.plan.analyze.schema.SchemaValidator; import org.apache.iotdb.db.queryengine.plan.execution.ExecutionResult; @@ -57,14 +59,25 @@ import org.apache.iotdb.db.queryengine.plan.statement.crud.LoadTsFileStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.DatabaseSchemaStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.ShowDatabaseStatement; +import org.apache.iotdb.db.storageengine.dataregion.modification.Modification; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResourceStatus; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.timeindex.FileTimeIndex; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.timeindex.ITimeIndex; import org.apache.iotdb.db.storageengine.dataregion.utils.TsFileResourceUtils; +import org.apache.iotdb.db.storageengine.load.active.ActiveLoadUtil; +import org.apache.iotdb.db.storageengine.load.converter.LoadTsFileDataTypeConverter; +import org.apache.iotdb.db.storageengine.load.memory.LoadTsFileMemoryBlock; +import org.apache.iotdb.db.storageengine.load.memory.LoadTsFileMemoryManager; +import org.apache.iotdb.db.storageengine.load.metrics.LoadTsFileCostMetricsSet; +import org.apache.iotdb.db.utils.ModificationUtils; import org.apache.iotdb.db.utils.TimestampPrecisionUtils; import org.apache.iotdb.db.utils.constant.SqlConstant; +import org.apache.iotdb.db.utils.datastructure.PatternTreeMapFactory; import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.TSStatusCode; +import org.apache.commons.io.FileUtils; import org.apache.thrift.TException; import org.apache.tsfile.common.constant.TsFileConstant; import org.apache.tsfile.enums.TSDataType; @@ -81,6 +94,7 @@ import org.slf4j.LoggerFactory; import java.io.File; +import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -93,9 +107,15 @@ import java.util.Set; import java.util.stream.Collectors; -public class LoadTsfileAnalyzer implements AutoCloseable { +import static org.apache.iotdb.db.storageengine.load.metrics.LoadTsFileCostMetricsSet.ANALYSIS; +import static org.apache.iotdb.db.storageengine.load.metrics.LoadTsFileCostMetricsSet.ANALYSIS_ASYNC_MOVE; - private static final Logger LOGGER = LoggerFactory.getLogger(LoadTsfileAnalyzer.class); +public class LoadTsFileAnalyzer implements AutoCloseable { + + private static final Logger LOGGER = LoggerFactory.getLogger(LoadTsFileAnalyzer.class); + + private static final LoadTsFileCostMetricsSet LOAD_TSFILE_COST_METRICS_SET = + LoadTsFileCostMetricsSet.getInstance(); private static final IClientManager CONFIG_NODE_CLIENT_MANAGER = ConfigNodeClientManager.getInstance(); @@ -121,7 +141,22 @@ public class LoadTsfileAnalyzer implements AutoCloseable { private final SchemaAutoCreatorAndVerifier schemaAutoCreatorAndVerifier; - LoadTsfileAnalyzer( + private final boolean isGeneratedByPipe; + + private final List tsFiles; + private final List isMiniTsFile; + private boolean isMiniTsFileConverted = false; + + // User specified configs + private final int databaseLevel; + private final boolean isAsyncLoad; + private final boolean isVerifySchema; + private final boolean isAutoCreateDatabase; + private final boolean isDeleteAfterLoad; + private final boolean isConvertOnTypeMismatch; + private final long tabletConversionThresholdBytes; + + public LoadTsFileAnalyzer( LoadTsFileStatement loadTsFileStatement, MPPQueryContext context, IPartitionFetcher partitionFetcher, @@ -133,19 +168,103 @@ public class LoadTsfileAnalyzer implements AutoCloseable { this.schemaFetcher = schemaFetcher; this.schemaAutoCreatorAndVerifier = new SchemaAutoCreatorAndVerifier(); + + this.isGeneratedByPipe = loadTsFileStatement.isGeneratedByPipe(); + + this.tsFiles = loadTsFileStatement.getTsFiles(); + this.isMiniTsFile = new ArrayList<>(Collections.nCopies(this.tsFiles.size(), false)); + + this.databaseLevel = loadTsFileStatement.getDatabaseLevel(); + this.isAsyncLoad = loadTsFileStatement.isAsyncLoad(); + this.isVerifySchema = loadTsFileStatement.isVerifySchema(); + this.isAutoCreateDatabase = loadTsFileStatement.isAutoCreateDatabase(); + this.isDeleteAfterLoad = loadTsFileStatement.isDeleteAfterLoad(); + this.isConvertOnTypeMismatch = loadTsFileStatement.isConvertOnTypeMismatch(); + this.tabletConversionThresholdBytes = loadTsFileStatement.getTabletConversionThresholdBytes(); } - public Analysis analyzeFileByFile() { - final Analysis analysis = new Analysis(); + public Analysis analyzeFileByFile(Analysis analysis) { + if (!checkBeforeAnalyzeFileByFile(analysis)) { + return analysis; + } - // check if the system is read only - if (CommonDescriptor.getInstance().getConfig().isReadOnly()) { + if (isAsyncLoad && doAsyncLoad(analysis)) { + return analysis; + } + + try { + if (!doAnalyzeFileByFile(analysis)) { + return analysis; + } + final long startTime = System.nanoTime(); + + try { + schemaAutoCreatorAndVerifier.flush(); + } finally { + LOAD_TSFILE_COST_METRICS_SET.recordPhaseTimeCost( + LoadTsFileCostMetricsSet.ANALYSIS, System.nanoTime() - startTime); + } + } catch (AuthException e) { + return setFailAnalysisForAuthException(analysis, e); + } catch (LoadAnalyzeTypeMismatchException e) { + executeTabletConversionOnException(analysis, e); + // just return false to STOP the analysis process, + // the real result on the conversion will be set in the analysis. + return analysis; + } catch (Exception e) { + final String exceptionMessage = + String.format( + "Auto create or verify schema error when executing statement %s. Detail: %s.", + loadTsFileStatement, + e.getMessage() == null ? e.getClass().getName() : e.getMessage()); + LOGGER.warn(exceptionMessage, e); analysis.setFinishQueryAfterAnalyze(true); - analysis.setFailStatus( - RpcUtils.getStatus(TSStatusCode.SYSTEM_READ_ONLY, LoadReadOnlyException.MESSAGE)); + analysis.setFailStatus(RpcUtils.getStatus(TSStatusCode.LOAD_FILE_ERROR, exceptionMessage)); return analysis; } + LOGGER.info("Load - Analysis Stage: all tsfiles have been analyzed."); + + if (reconstructStatementIfMiniFileConverted()) { + // All mini tsfiles are converted to tablets, so the analysis is finished. + analysis.setFinishQueryAfterAnalyze(true); + analysis.setFailStatus(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS)); + return analysis; + } + + // data partition will be queried in the scheduler + analysis.setStatement(loadTsFileStatement); + return analysis; + } + + private boolean doAsyncLoad(final Analysis analysis) { + final long startTime = System.nanoTime(); + try { + if (ActiveLoadUtil.loadTsFileAsyncToActiveDir(tsFiles, null, isDeleteAfterLoad)) { + analysis.setFinishQueryAfterAnalyze(true); + analysis.setFailStatus(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS)); + analysis.setStatement(loadTsFileStatement); + return true; + } + LOGGER.info("Async Load has failed, and is now trying to load sync"); + return false; + } finally { + LoadTsFileCostMetricsSet.getInstance() + .recordPhaseTimeCost(ANALYSIS_ASYNC_MOVE, System.nanoTime() - startTime); + } + } + + private boolean checkBeforeAnalyzeFileByFile(Analysis analysis) { + // check if the system is read only + if (CommonDescriptor.getInstance().getConfig().isReadOnly()) { + LOGGER.info( + "LoadTsFileAnalyzer: Current datanode is read only, will try to convert to tablets and insert later."); + } + + return true; + } + + private boolean doAnalyzeFileByFile(Analysis analysis) { // analyze tsfile metadata file by file for (int i = 0, tsfileNum = loadTsFileStatement.getTsFiles().size(); i < tsfileNum; i++) { final File tsFile = loadTsFileStatement.getTsFiles().get(i); @@ -162,15 +281,22 @@ public Analysis analyzeFileByFile() { continue; } + final long startTime = System.nanoTime(); try { - analyzeSingleTsFile(tsFile); + analyzeSingleTsFile(tsFile, i); if (LOGGER.isInfoEnabled()) { LOGGER.info( "Load - Analysis Stage: {}/{} tsfiles have been analyzed, progress: {}%", i + 1, tsfileNum, String.format("%.3f", (i + 1) * 100.00 / tsfileNum)); } } catch (AuthException e) { - return createFailAnalysisForAuthException(e); + setFailAnalysisForAuthException(analysis, e); + return false; + } catch (LoadAnalyzeTypeMismatchException e) { + executeTabletConversionOnException(analysis, e); + // just return false to STOP the analysis process, + // the real result on the conversion will be set in the analysis. + return false; } catch (Exception e) { final String exceptionMessage = String.format( @@ -179,96 +305,142 @@ public Analysis analyzeFileByFile() { LOGGER.warn(exceptionMessage, e); analysis.setFinishQueryAfterAnalyze(true); analysis.setFailStatus(RpcUtils.getStatus(TSStatusCode.LOAD_FILE_ERROR, exceptionMessage)); - return analysis; + return false; + } finally { + LoadTsFileCostMetricsSet.getInstance() + .recordPhaseTimeCost(ANALYSIS, System.nanoTime() - startTime); } } - try { - schemaAutoCreatorAndVerifier.flush(); - } catch (AuthException e) { - return createFailAnalysisForAuthException(e); - } catch (Exception e) { - final String exceptionMessage = - String.format( - "Auto create or verify schema error when executing statement %s. Detail: %s.", - loadTsFileStatement, - e.getMessage() == null ? e.getClass().getName() : e.getMessage()); - LOGGER.warn(exceptionMessage, e); - analysis.setFinishQueryAfterAnalyze(true); - analysis.setFailStatus( - RpcUtils.getStatus( - TSStatusCode.LOAD_FILE_ERROR, - String.format( - "Auto create or verify schema error when executing statement %s.", - loadTsFileStatement))); - return analysis; - } - - LOGGER.info("Load - Analysis Stage: all tsfiles have been analyzed."); - - // data partition will be queried in the scheduler - analysis.setStatement(loadTsFileStatement); - return analysis; - } - - @Override - public void close() { - schemaAutoCreatorAndVerifier.close(); + return true; } - private void analyzeSingleTsFile(File tsFile) throws IOException, AuthException { + private void analyzeSingleTsFile(final File tsFile, int index) throws Exception { try (final TsFileSequenceReader reader = new TsFileSequenceReader(tsFile.getAbsolutePath())) { // can be reused when constructing tsfile resource final TsFileSequenceReaderTimeseriesMetadataIterator timeseriesMetadataIterator = - new TsFileSequenceReaderTimeseriesMetadataIterator(reader, true, 1); - - // construct tsfile resource - final TsFileResource tsFileResource = new TsFileResource(tsFile); - if (!tsFileResource.resourceFileExists()) { - // it will be serialized in LoadSingleTsFileNode - tsFileResource.updatePlanIndexes(reader.getMinPlanIndex()); - tsFileResource.updatePlanIndexes(reader.getMaxPlanIndex()); - } else { - tsFileResource.deserialize(); - } + new TsFileSequenceReaderTimeseriesMetadataIterator( + reader, + true, + IoTDBDescriptor.getInstance() + .getConfig() + .getLoadTsFileAnalyzeSchemaBatchReadTimeSeriesMetadataCount()); // check if the tsfile is empty if (!timeseriesMetadataIterator.hasNext()) { - LOGGER.warn("device2TimeseriesMetadata is empty, because maybe the tsfile is empty"); + throw new LoadEmptyFileException(tsFile.getAbsolutePath()); + } + + if (0 <= tabletConversionThresholdBytes + && tsFile.length() <= tabletConversionThresholdBytes + && handleSingleMiniFile(index)) { return; } - long writePointCount = 0; + doAnalyzeSingleFile(tsFile, reader, timeseriesMetadataIterator); + } catch (final LoadEmptyFileException loadEmptyFileException) { + LOGGER.warn("Empty file detected, will skip loading this file: {}", tsFile.getAbsolutePath()); + if (isDeleteAfterLoad) { + FileUtils.deleteQuietly(tsFile); + } + } + } - final boolean isAutoCreateSchemaOrVerifySchemaEnabled = - IoTDBDescriptor.getInstance().getConfig().isAutoCreateSchemaEnabled() - || loadTsFileStatement.isVerifySchema(); - while (timeseriesMetadataIterator.hasNext()) { - final Map> device2TimeseriesMetadata = - timeseriesMetadataIterator.next(); + private boolean handleSingleMiniFile(final int i) throws FileNotFoundException { + final long startTime = System.nanoTime(); + try { + final LoadTsFileDataTypeConverter loadTsFileDataTypeConverter = + new LoadTsFileDataTypeConverter(context, isGeneratedByPipe); + + final TSStatus status = + loadTsFileDataTypeConverter + .convertForTreeModel( + new LoadTsFileStatement(tsFiles.get(i).getPath()) + .setDeleteAfterLoad(isDeleteAfterLoad) + .setConvertOnTypeMismatch(isConvertOnTypeMismatch)) + .orElse(null); + + if (status == null || !loadTsFileDataTypeConverter.isSuccessful(status)) { + LOGGER.warn( + "Load: Failed to convert mini tsfile {} to tablets from statement {}. Status: {}.", + tsFiles.get(i).getPath(), + loadTsFileStatement, + status); + return false; + } - if (isAutoCreateSchemaOrVerifySchemaEnabled) { - schemaAutoCreatorAndVerifier.autoCreateAndVerify(reader, device2TimeseriesMetadata); - } + // A mark of successful conversion + isMiniTsFile.set(i, Boolean.TRUE); + isMiniTsFileConverted = true; - if (!tsFileResource.resourceFileExists()) { - TsFileResourceUtils.updateTsFileResource(device2TimeseriesMetadata, tsFileResource); - } + loadTsFileStatement.addTsFileResource(null); + loadTsFileStatement.addWritePointCount(0); + return true; + } finally { + LOAD_TSFILE_COST_METRICS_SET.recordPhaseTimeCost( + LoadTsFileCostMetricsSet.ANALYSIS_CAST_TABLETS, System.nanoTime() - startTime); + } + } + + private void doAnalyzeSingleFile( + final File tsFile, + final TsFileSequenceReader reader, + final TsFileSequenceReaderTimeseriesMetadataIterator timeseriesMetadataIterator) + throws IOException, LoadAnalyzeException, AuthException { + // construct tsfile resource + final TsFileResource tsFileResource = constructTsFileResource(reader, tsFile); + + long writePointCount = 0; + + schemaAutoCreatorAndVerifier.setCurrentModificationsAndTimeIndex(tsFileResource); + + final boolean isAutoCreateSchemaOrVerifySchemaEnabled = + IoTDBDescriptor.getInstance().getConfig().isAutoCreateSchemaEnabled() || isVerifySchema; + + while (timeseriesMetadataIterator.hasNext()) { + final Map> device2TimeseriesMetadata = + timeseriesMetadataIterator.next(); + // Update time index no matter if resource file exists or not, because resource file may be + // untrusted + TsFileResourceUtils.updateTsFileResource( + device2TimeseriesMetadata, + tsFileResource, + IoTDBDescriptor.getInstance().getConfig().isCacheLastValuesForLoad()); + schemaAutoCreatorAndVerifier.setCurrentTimeIndex(tsFileResource.getTimeIndex()); - // TODO: how to get the correct write point count when - // !isAutoCreateSchemaOrVerifySchemaEnabled - writePointCount += getWritePointCount(device2TimeseriesMetadata); - } if (isAutoCreateSchemaOrVerifySchemaEnabled) { - schemaAutoCreatorAndVerifier.flushAndClearDeviceIsAlignedCacheIfNecessary(); + schemaAutoCreatorAndVerifier.autoCreateAndVerify(reader, device2TimeseriesMetadata); } + // TODO: how to get the correct write point count when + // !isAutoCreateSchemaOrVerifySchemaEnabled + writePointCount += getWritePointCount(device2TimeseriesMetadata); + } + if (isAutoCreateSchemaOrVerifySchemaEnabled) { + schemaAutoCreatorAndVerifier.flushAndClearDeviceIsAlignedCacheIfNecessary(); + } - TimestampPrecisionUtils.checkTimestampPrecision(tsFileResource.getFileEndTime()); - tsFileResource.setStatus(TsFileResourceStatus.NORMAL); + TimestampPrecisionUtils.checkTimestampPrecision(tsFileResource.getFileEndTime()); + tsFileResource.setStatus(TsFileResourceStatus.NORMAL); - loadTsFileStatement.addTsFileResource(tsFileResource); - loadTsFileStatement.addWritePointCount(writePointCount); + loadTsFileStatement.addTsFileResource(tsFileResource); + loadTsFileStatement.addWritePointCount(writePointCount); + } + + private TsFileResource constructTsFileResource( + final TsFileSequenceReader reader, final File tsFile) throws IOException { + final TsFileResource tsFileResource = new TsFileResource(tsFile); + if (!tsFileResource.resourceFileExists()) { + // it will be serialized in LoadSingleTsFileNode + tsFileResource.updatePlanIndexes(reader.getMinPlanIndex()); + tsFileResource.updatePlanIndexes(reader.getMaxPlanIndex()); + } else { + tsFileResource.deserialize(); + // Reset tsfileResource's isGeneratedByPipe mark to prevent deserializing the wrong mark. + // If this tsfile is loaded by a pipe receiver, the correct mark will be added in + // `listenToTsFile` + tsFileResource.setGeneratedByPipe(isGeneratedByPipe); } + return tsFileResource; } private long getWritePointCount( @@ -279,13 +451,65 @@ private long getWritePointCount( .sum(); } - private Analysis createFailAnalysisForAuthException(AuthException e) { - Analysis analysis = new Analysis(); + private boolean reconstructStatementIfMiniFileConverted() { + if (!isMiniTsFileConverted) { + return false; + } + + return loadTsFileStatement.reconstructStatementIfMiniFileConverted(isMiniTsFile); + } + + private Analysis setFailAnalysisForAuthException(Analysis analysis, AuthException e) { analysis.setFinishQueryAfterAnalyze(true); analysis.setFailStatus(RpcUtils.getStatus(e.getCode(), e.getMessage())); return analysis; } + private Analysis executeTabletConversionOnException( + final Analysis analysis, final LoadAnalyzeException e) { + if (shouldSkipConversion(e)) { + analysis.setFailStatus( + new TSStatus(TSStatusCode.LOAD_FILE_ERROR.getStatusCode()).setMessage(e.getMessage())); + analysis.setFinishQueryAfterAnalyze(true); + return analysis; + } + + final LoadTsFileDataTypeConverter loadTsFileDataTypeConverter = + new LoadTsFileDataTypeConverter(context, isGeneratedByPipe); + final TSStatus status = + loadTsFileStatement.isConvertOnTypeMismatch() + ? loadTsFileDataTypeConverter.convertForTreeModel(loadTsFileStatement).orElse(null) + : null; + + if (status == null) { + LOGGER.warn( + "Load: Failed to convert to tablets from statement {}. Status is null.", + loadTsFileStatement); + analysis.setFailStatus( + new TSStatus(TSStatusCode.LOAD_FILE_ERROR.getStatusCode()).setMessage(e.getMessage())); + } else if (!loadTsFileDataTypeConverter.isSuccessful(status)) { + LOGGER.warn( + "Load: Failed to convert to tablets from statement {}. Status: {}", + loadTsFileStatement, + status); + analysis.setFailStatus(status); + } + + analysis.setFinishQueryAfterAnalyze(true); + analysis.setStatement(loadTsFileStatement); + return analysis; + } + + private boolean shouldSkipConversion(LoadAnalyzeException e) { + return (e instanceof LoadAnalyzeTypeMismatchException) + && !loadTsFileStatement.isConvertOnTypeMismatch(); + } + + @Override + public void close() { + schemaAutoCreatorAndVerifier.close(); + } + private final class SchemaAutoCreatorAndVerifier { private final LoadTsFileAnalyzeSchemaCache schemaCache; @@ -293,15 +517,50 @@ private SchemaAutoCreatorAndVerifier() throws LoadRuntimeOutOfMemoryException { this.schemaCache = new LoadTsFileAnalyzeSchemaCache(); } + public void setCurrentModificationsAndTimeIndex(TsFileResource resource) throws IOException { + schemaCache.setCurrentModificationsAndTimeIndex(resource); + } + + public void setCurrentTimeIndex(final ITimeIndex timeIndex) { + schemaCache.setCurrentTimeIndex(timeIndex); + } + public void autoCreateAndVerify( TsFileSequenceReader reader, - Map> device2TimeseriesMetadataList) - throws IOException, AuthException { + Map> device2TimeSeriesMetadataList) + throws IOException, AuthException, LoadAnalyzeTypeMismatchException { for (final Map.Entry> entry : - device2TimeseriesMetadataList.entrySet()) { + device2TimeSeriesMetadataList.entrySet()) { final IDeviceID device = entry.getKey(); + try { + if (schemaCache.isDeviceDeletedByMods(device)) { + continue; + } + } catch (IllegalPathException e) { + LOGGER.warn( + "Failed to check if device {} is deleted by mods. Will see it as not deleted.", + device, + e); + } + for (final TimeseriesMetadata timeseriesMetadata : entry.getValue()) { + try { + if (schemaCache.isTimeSeriesDeletedByMods(device, timeseriesMetadata)) { + continue; + } + } catch (IllegalPathException e) { + // In aligned devices, there may be empty measurements which will cause + // IllegalPathException. + if (!timeseriesMetadata.getMeasurementId().isEmpty()) { + LOGGER.warn( + "Failed to check if device {}, timeSeries {} is deleted by mods. Will see it as not deleted.", + device, + timeseriesMetadata.getMeasurementId(), + e); + } + } + final TSDataType dataType = timeseriesMetadata.getTsDataType(); if (TSDataType.VECTOR.equals(dataType)) { schemaCache @@ -371,23 +630,24 @@ public void flushAndClearDeviceIsAlignedCacheIfNecessary() throws SemanticExcept schemaCache.clearDeviceIsAlignedCacheIfNecessary(); } - public void flush() throws AuthException { + public void flush() throws AuthException, LoadAnalyzeTypeMismatchException { doAutoCreateAndVerify(); schemaCache.clearTimeSeries(); } - private void doAutoCreateAndVerify() throws SemanticException, AuthException { + private void doAutoCreateAndVerify() + throws SemanticException, AuthException, LoadAnalyzeTypeMismatchException { if (schemaCache.getDevice2TimeSeries().isEmpty()) { return; } try { - if (loadTsFileStatement.isVerifySchema()) { + if (isVerifySchema) { makeSureNoDuplicatedMeasurementsInDevices(); } - if (loadTsFileStatement.isAutoCreateDatabase()) { + if (isAutoCreateDatabase) { autoCreateDatabase(); } @@ -395,21 +655,21 @@ private void doAutoCreateAndVerify() throws SemanticException, AuthException { // isAutoCreateSchemaEnabled is false. final ISchemaTree schemaTree = autoCreateSchema(); - if (loadTsFileStatement.isVerifySchema()) { + if (isVerifySchema) { verifySchema(schemaTree); } - } catch (AuthException e) { + } catch (AuthException | LoadAnalyzeTypeMismatchException e) { throw e; } catch (Exception e) { LOGGER.warn("Auto create or verify schema error.", e); throw new SemanticException( String.format( - "Auto create or verify schema error when executing statement %s.", - loadTsFileStatement)); + "Auto create or verify schema error when executing statement %s. Detail: %s.", + loadTsFileStatement, e.getMessage())); } } - private void makeSureNoDuplicatedMeasurementsInDevices() throws VerifyMetadataException { + private void makeSureNoDuplicatedMeasurementsInDevices() throws LoadAnalyzeException { for (final Map.Entry> entry : schemaCache.getDevice2TimeSeries().entrySet()) { final IDeviceID device = entry.getKey(); @@ -417,7 +677,7 @@ private void makeSureNoDuplicatedMeasurementsInDevices() throws VerifyMetadataEx for (final MeasurementSchema timeseriesSchema : entry.getValue()) { final String measurement = timeseriesSchema.getMeasurementId(); if (measurement2Schema.containsKey(measurement)) { - throw new VerifyMetadataException( + throw new LoadAnalyzeException( String.format("Duplicated measurements %s in device %s.", measurement, device)); } measurement2Schema.put(measurement, timeseriesSchema); @@ -426,8 +686,8 @@ private void makeSureNoDuplicatedMeasurementsInDevices() throws VerifyMetadataEx } private void autoCreateDatabase() - throws VerifyMetadataException, LoadFileException, IllegalPathException, AuthException { - final int databasePrefixNodesLength = loadTsFileStatement.getDatabaseLevel() + 1; + throws LoadAnalyzeException, LoadFileException, IllegalPathException, AuthException { + final int databasePrefixNodesLength = databaseLevel + 1; final Set databasesNeededToBeSet = new HashSet<>(); for (final IDeviceID device : schemaCache.getDevice2TimeSeries().keySet()) { @@ -435,7 +695,7 @@ private void autoCreateDatabase() final String[] devicePrefixNodes = devicePath.getNodes(); if (devicePrefixNodes.length < databasePrefixNodesLength) { - throw new VerifyMetadataException( + throw new LoadAnalyzeException( String.format( "Database level %d is longer than device %s.", databasePrefixNodesLength, device)); @@ -502,9 +762,17 @@ private void executeSetDatabaseStatement(Statement statement) "", partitionFetcher, schemaFetcher, - IoTDBDescriptor.getInstance().getConfig().getQueryTimeoutThreshold()); + IoTDBDescriptor.getInstance().getConfig().getQueryTimeoutThreshold(), + false); if (result.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode() - && result.status.code != TSStatusCode.DATABASE_ALREADY_EXISTS.getStatusCode()) { + && result.status.code + != TSStatusCode.DATABASE_ALREADY_EXISTS + .getStatusCode() // In tree model, if the user creates a conflict database + // concurrently, for instance, the + // database created by user is root.db.ss.a, the auto-creation failed database is root.db, + // we wait till "getOrCreatePartition" to judge if the time series (like root.db.ss.a.e / + // root.db.ss.a) conflicts with the created database. just do not throw exception here. + && result.status.code != TSStatusCode.DATABASE_CONFLICT.getStatusCode()) { LOGGER.warn( "Create database error, statement: {}, result status is: {}", statement, result.status); throw new LoadFileException( @@ -558,7 +826,7 @@ private ISchemaTree autoCreateSchema() throws IllegalPathException { } private void verifySchema(ISchemaTree schemaTree) - throws VerifyMetadataException, IllegalPathException { + throws LoadAnalyzeException, IllegalPathException, LoadAnalyzeTypeMismatchException { for (final Map.Entry> entry : schemaCache.getDevice2TimeSeries().entrySet()) { final IDeviceID device = entry.getKey(); @@ -571,7 +839,7 @@ private void verifySchema(ISchemaTree schemaTree) .collect(Collectors.toList())); if (iotdbDeviceSchemaInfo == null) { - throw new VerifyMetadataException( + throw new LoadAnalyzeException( String.format( "Device %s does not exist in IoTDB and can not be created. " + "Please check weather auto-create-schema is enabled.", @@ -581,13 +849,12 @@ private void verifySchema(ISchemaTree schemaTree) // check device schema: is aligned or not final boolean isAlignedInTsFile = schemaCache.getDeviceIsAligned(device); final boolean isAlignedInIoTDB = iotdbDeviceSchemaInfo.isAligned(); - if (isAlignedInTsFile != isAlignedInIoTDB) { - throw new VerifyMetadataException( - String.format( - "Device %s in TsFile is %s, but in IoTDB is %s.", - device, - isAlignedInTsFile ? "aligned" : "not aligned", - isAlignedInIoTDB ? "aligned" : "not aligned")); + if (LOGGER.isInfoEnabled() && isAlignedInTsFile != isAlignedInIoTDB) { + LOGGER.info( + "Device {} in TsFile is {}, but in IoTDB is {}.", + device, + isAlignedInTsFile ? "aligned" : "not aligned", + isAlignedInIoTDB ? "aligned" : "not aligned"); } // check timeseries schema @@ -597,7 +864,7 @@ private void verifySchema(ISchemaTree schemaTree) final MeasurementSchema tsFileSchema = tsfileTimeseriesSchemas.get(i); final MeasurementSchema iotdbSchema = iotdbTimeseriesSchemas.get(i); if (iotdbSchema == null) { - throw new VerifyMetadataException( + throw new LoadAnalyzeException( String.format( "Measurement %s does not exist in IoTDB and can not be created. " + "Please check weather auto-create-schema is enabled.", @@ -605,15 +872,14 @@ private void verifySchema(ISchemaTree schemaTree) } // check datatype - if (!tsFileSchema.getType().equals(iotdbSchema.getType())) { - throw new VerifyMetadataException( - String.format( - "Measurement %s%s%s datatype not match, TsFile: %s, IoTDB: %s", - device, - TsFileConstant.PATH_SEPARATOR, - iotdbSchema.getMeasurementId(), - tsFileSchema.getType(), - iotdbSchema.getType())); + if (LOGGER.isInfoEnabled() && !tsFileSchema.getType().equals(iotdbSchema.getType())) { + LOGGER.info( + "Measurement {}{}{} datatype not match, TsFile: {}, IoTDB: {}", + device, + TsFileConstant.PATH_SEPARATOR, + iotdbSchema.getMeasurementId(), + tsFileSchema.getType(), + iotdbSchema.getType()); } // check encoding @@ -654,25 +920,31 @@ public void close() { private static class LoadTsFileAnalyzeSchemaCache { - private final LoadTsFileAnalyzeSchemaMemoryBlock block; + private final LoadTsFileMemoryBlock block; private Map> currentBatchDevice2TimeSeriesSchemas; private Map tsFileDevice2IsAligned; private Set alreadySetDatabases; + private PatternTreeMap currentModifications; + private ITimeIndex currentTimeIndex; + private long batchDevice2TimeSeriesSchemasMemoryUsageSizeInBytes = 0; private long tsFileDevice2IsAlignedMemoryUsageSizeInBytes = 0; private long alreadySetDatabasesMemoryUsageSizeInBytes = 0; + private long currentModificationsMemoryUsageSizeInBytes = 0; + private long currentTimeIndexMemoryUsageSizeInBytes = 0; private int currentBatchTimeSeriesCount = 0; public LoadTsFileAnalyzeSchemaCache() throws LoadRuntimeOutOfMemoryException { this.block = LoadTsFileMemoryManager.getInstance() - .allocateAnalyzeSchemaMemoryBlock(ANALYZE_SCHEMA_MEMORY_SIZE_IN_BYTES); + .allocateMemoryBlock(ANALYZE_SCHEMA_MEMORY_SIZE_IN_BYTES); this.currentBatchDevice2TimeSeriesSchemas = new HashMap<>(); this.tsFileDevice2IsAligned = new HashMap<>(); this.alreadySetDatabases = new HashSet<>(); + this.currentModifications = PatternTreeMapFactory.getModsPatternTreeMap(); } public Map> getDevice2TimeSeries() { @@ -728,6 +1000,47 @@ public void addIsAlignedCache(IDeviceID device, boolean isAligned, boolean addIf } } + public void setCurrentModificationsAndTimeIndex(TsFileResource resource) throws IOException { + clearModificationsAndTimeIndex(); + + resource + .getModFile() + .getModifications() + .forEach( + modification -> currentModifications.append(modification.getPath(), modification)); + + currentModificationsMemoryUsageSizeInBytes = currentModifications.ramBytesUsed(); + block.addMemoryUsage(currentModificationsMemoryUsageSizeInBytes); + + if (resource.resourceFileExists()) { + currentTimeIndex = resource.getTimeIndex(); + if (currentTimeIndex instanceof FileTimeIndex) { + currentTimeIndex = resource.buildDeviceTimeIndex(); + } + currentTimeIndexMemoryUsageSizeInBytes = currentTimeIndex.calculateRamSize(); + block.addMemoryUsage(currentTimeIndexMemoryUsageSizeInBytes); + } + } + + public void setCurrentTimeIndex(final ITimeIndex timeIndex) { + currentTimeIndex = timeIndex; + } + + public boolean isDeviceDeletedByMods(IDeviceID device) throws IllegalPathException { + return ModificationUtils.isDeviceDeletedByMods( + currentModifications, currentTimeIndex, device); + } + + public boolean isTimeSeriesDeletedByMods( + IDeviceID device, TimeseriesMetadata timeseriesMetadata) throws IllegalPathException { + return ModificationUtils.isTimeSeriesDeletedByMods( + currentModifications, + device, + timeseriesMetadata.getMeasurementId(), + timeseriesMetadata.getStatistics().getStartTime(), + timeseriesMetadata.getStatistics().getEndTime()); + } + public void addAlreadySetDatabase(PartialPath database) { long memoryUsageSizeInBytes = 0; if (alreadySetDatabases.add(database)) { @@ -757,6 +1070,15 @@ public void clearTimeSeries() { currentBatchTimeSeriesCount = 0; } + public void clearModificationsAndTimeIndex() { + currentModifications = PatternTreeMapFactory.getModsPatternTreeMap(); + currentTimeIndex = null; + block.reduceMemoryUsage(currentModificationsMemoryUsageSizeInBytes); + block.reduceMemoryUsage(currentTimeIndexMemoryUsageSizeInBytes); + currentModificationsMemoryUsageSizeInBytes = 0; + currentTimeIndexMemoryUsageSizeInBytes = 0; + } + public void clearAlignedCache() { tsFileDevice2IsAligned.clear(); block.reduceMemoryUsage(tsFileDevice2IsAlignedMemoryUsageSizeInBytes); @@ -795,6 +1117,7 @@ private void clearDatabasesCache() { public void close() { clearTimeSeries(); + clearModificationsAndTimeIndex(); clearAlignedCache(); clearDatabasesCache(); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/lock/DataNodeSchemaLockManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/lock/DataNodeSchemaLockManager.java index b57986ccdec2d..c7bda59fd85f3 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/lock/DataNodeSchemaLockManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/lock/DataNodeSchemaLockManager.java @@ -19,6 +19,8 @@ package org.apache.iotdb.db.queryengine.plan.analyze.lock; +import org.apache.iotdb.db.queryengine.common.MPPQueryContext; + import java.util.concurrent.locks.ReentrantReadWriteLock; public class DataNodeSchemaLockManager { @@ -34,26 +36,33 @@ public static DataNodeSchemaLockManager getInstance() { } private DataNodeSchemaLockManager() { - int lockNum = SchemaLockType.values().length; + final int lockNum = SchemaLockType.values().length; this.locks = new ReentrantReadWriteLock[lockNum]; for (int i = 0; i < lockNum; i++) { locks[i] = new ReentrantReadWriteLock(false); } } - public void takeReadLock(SchemaLockType lockType) { - locks[lockType.ordinal()].readLock().lock(); + public void takeReadLock(final MPPQueryContext context, final SchemaLockType lockType) { + if (context.addAcquiredLock(lockType)) { + locks[lockType.ordinal()].readLock().lock(); + } } - public void releaseReadLock(SchemaLockType lockType) { - locks[lockType.ordinal()].readLock().unlock(); + public void releaseReadLock(final MPPQueryContext queryContext) { + if (queryContext != null && !queryContext.getAcquiredLocks().isEmpty()) { + queryContext + .getAcquiredLocks() + .forEach(lockType -> locks[lockType.ordinal()].readLock().unlock()); + queryContext.getAcquiredLocks().clear(); + } } - public void takeWriteLock(SchemaLockType lockType) { + public void takeWriteLock(final SchemaLockType lockType) { locks[lockType.ordinal()].writeLock().lock(); } - public void releaseWriteLock(SchemaLockType lockType) { + public void releaseWriteLock(final SchemaLockType lockType) { locks[lockType.ordinal()].writeLock().unlock(); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/AutoCreateSchemaExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/AutoCreateSchemaExecutor.java index 3d2cae267fc23..0cfa03dbda418 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/AutoCreateSchemaExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/AutoCreateSchemaExecutor.java @@ -93,7 +93,8 @@ private ExecutionResult executeStatement(Statement statement, MPPQueryContext co schemaFetcher, context == null || context.getQueryType().equals(QueryType.WRITE) ? config.getQueryTimeoutThreshold() - : context.getTimeOut()); + : context.getTimeOut(), + false); } // Auto create the missing measurements and merge them into given schemaTree @@ -123,7 +124,7 @@ void autoCreateTimeSeries( dataTypesOfMissingMeasurement.add(tsDataType); encodingsOfMissingMeasurement.add(getDefaultEncoding(tsDataType)); compressionTypesOfMissingMeasurement.add( - TSFileDescriptor.getInstance().getConfig().getCompressor()); + TSFileDescriptor.getInstance().getConfig().getCompressor(tsDataType)); } }); @@ -180,7 +181,9 @@ void autoCreateTimeSeries( measurements[measurementIndex], tsDataTypes[measurementIndex], getDefaultEncoding(tsDataTypes[measurementIndex]), - TSFileDescriptor.getInstance().getConfig().getCompressor()); + TSFileDescriptor.getInstance() + .getConfig() + .getCompressor(tsDataTypes[measurementIndex])); } return v; }); @@ -347,7 +350,9 @@ void autoCreateMissingMeasurements( ? getDefaultEncoding(tsDataTypes[measurementIndex]) : encodings[measurementIndex], compressionTypes == null - ? TSFileDescriptor.getInstance().getConfig().getCompressor() + ? TSFileDescriptor.getInstance() + .getConfig() + .getCompressor(tsDataTypes[measurementIndex]) : compressionTypes[measurementIndex]); } return v; @@ -391,7 +396,8 @@ void autoCreateMissingMeasurements( && compressionTypesList.get(finalDeviceIndex1) != null) { compressionType = compressionTypesList.get(finalDeviceIndex1)[index]; } else { - compressionType = TSFileDescriptor.getInstance().getConfig().getCompressor(); + compressionType = + TSFileDescriptor.getInstance().getConfig().getCompressor(dataType); } templateExtendInfo.addMeasurement( measurement, dataType, encoding, compressionType); @@ -500,8 +506,8 @@ private void internalCreateTimeSeries( // Auto create timeseries and return the existing timeseries info private List executeInternalCreateTimeseriesStatement( - Statement statement, MPPQueryContext context) { - TSStatus status = + final Statement statement, final MPPQueryContext context) { + final TSStatus status = AuthorityChecker.checkAuthority(statement, context.getSession().getUserName()); if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { throw new RuntimeException(new IoTDBException(status.getMessage(), status.getCode())); @@ -509,7 +515,7 @@ private List executeInternalCreateTimeseriesStatement( ExecutionResult executionResult = executeStatement(statement, context); - int statusCode = executionResult.status.getCode(); + final int statusCode = executionResult.status.getCode(); if (statusCode == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { return Collections.emptyList(); } @@ -519,19 +525,23 @@ private List executeInternalCreateTimeseriesStatement( new IoTDBException(executionResult.status.getMessage(), statusCode)); } - Set failedCreationSet = new HashSet<>(); - List alreadyExistingMeasurements = new ArrayList<>(); - for (TSStatus subStatus : executionResult.status.subStatus) { + final Set failedCreationSet = new HashSet<>(); + final List alreadyExistingMeasurements = new ArrayList<>(); + for (final TSStatus subStatus : executionResult.status.subStatus) { if (subStatus.code == TSStatusCode.TIMESERIES_ALREADY_EXIST.getStatusCode()) { alreadyExistingMeasurements.add( MeasurementPath.parseDataFromString(subStatus.getMessage())); } else { - failedCreationSet.add(subStatus.message); + failedCreationSet.add(subStatus); } } if (!failedCreationSet.isEmpty()) { - throw new SemanticException(new MetadataException(String.join("; ", failedCreationSet))); + throw new SemanticException( + new MetadataException( + failedCreationSet.stream() + .map(TSStatus::toString) + .collect(Collectors.joining("; ")))); } return alreadyExistingMeasurements; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ClusterSchemaFetchExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ClusterSchemaFetchExecutor.java index 64fe751c37785..d302482c523f8 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ClusterSchemaFetchExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ClusterSchemaFetchExecutor.java @@ -29,6 +29,7 @@ import org.apache.iotdb.db.protocol.session.SessionManager; import org.apache.iotdb.db.queryengine.common.MPPQueryContext; import org.apache.iotdb.db.queryengine.common.schematree.ClusterSchemaTree; +import org.apache.iotdb.db.queryengine.exception.MemoryNotEnoughException; import org.apache.iotdb.db.queryengine.plan.Coordinator; import org.apache.iotdb.db.queryengine.plan.analyze.ClusterPartitionFetcher; import org.apache.iotdb.db.queryengine.plan.analyze.QueryType; @@ -90,7 +91,8 @@ private ExecutionResult executionStatement( sql, ClusterPartitionFetcher.getInstance(), schemaFetcher, - timeout); + timeout, + false); } /** @@ -244,12 +246,15 @@ private ClusterSchemaTree executeSchemaFetchQuery( ExecutionResult executionResult = executionStatement(queryId, fetchStatement, context); if (executionResult.status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { throw new RuntimeException( - String.format( - "cannot fetch schema, status is: %s, msg is: %s", - executionResult.status.getCode(), executionResult.status.getMessage())); + new IoTDBException( + String.format( + "Fetch Schema failed, because %s", executionResult.status.getMessage()), + executionResult.status.getCode())); } try (SetThreadName threadName = new SetThreadName(executionResult.queryId.getId())) { ClusterSchemaTree result = new ClusterSchemaTree(); + ClusterSchemaTree.SchemaNodeBatchDeserializer deserializer = + new ClusterSchemaTree.SchemaNodeBatchDeserializer(); Set databaseSet = new HashSet<>(); while (coordinator.getQueryExecution(queryId).hasNextResult()) { // The query will be transited to FINISHED when invoking getBatchResult() at the last time @@ -266,7 +271,7 @@ private ClusterSchemaTree executeSchemaFetchQuery( } Column column = tsBlock.get().getColumn(0); for (int i = 0; i < column.getPositionCount(); i++) { - parseFetchedData(column.getBinary(i), result, databaseSet); + parseFetchedData(column.getBinary(i), result, deserializer, databaseSet, context); } } result.setDatabases(databaseSet); @@ -281,7 +286,11 @@ private ClusterSchemaTree executeSchemaFetchQuery( } private void parseFetchedData( - Binary data, ClusterSchemaTree resultSchemaTree, Set databaseSet) { + Binary data, + ClusterSchemaTree resultSchemaTree, + ClusterSchemaTree.SchemaNodeBatchDeserializer deserializer, + Set databaseSet, + MPPQueryContext context) { InputStream inputStream = new ByteArrayInputStream(data.getValues()); try { byte type = ReadWriteIOUtils.readByte(inputStream); @@ -291,11 +300,30 @@ private void parseFetchedData( databaseSet.add(ReadWriteIOUtils.readString(inputStream)); } } else if (type == 1) { - resultSchemaTree.mergeSchemaTree(ClusterSchemaTree.deserialize(inputStream)); + // for data from old version + ClusterSchemaTree deserializedSchemaTree = ClusterSchemaTree.deserialize(inputStream); + if (context != null) { + context.reserveMemoryForSchemaTree(deserializedSchemaTree.ramBytesUsed()); + } + resultSchemaTree.mergeSchemaTree(deserializedSchemaTree); + } else if (type == 2 || type == 3) { + if (deserializer.isFirstBatch()) { + long memCost = ReadWriteIOUtils.readLong(inputStream); + if (context != null) { + context.reserveMemoryForSchemaTree(memCost); + } + } + deserializer.deserializeFromBatch(inputStream); + if (type == 3) { + // 'type == 3' indicates this batch is finished + resultSchemaTree.mergeSchemaTree(deserializer.finish()); + } } else { throw new RuntimeException( new MetadataException("Failed to fetch schema because of unrecognized data")); } + } catch (MemoryNotEnoughException e) { + throw e; } catch (Exception e) { throw new RuntimeException(e); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ClusterSchemaFetcher.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ClusterSchemaFetcher.java index 11c0979a2b7da..13a9f5fc97a6d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ClusterSchemaFetcher.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ClusterSchemaFetcher.java @@ -181,17 +181,17 @@ public ClusterSchemaTree fetchSchemaWithTags( @Override public void fetchAndComputeSchemaWithAutoCreate( - ISchemaComputationWithAutoCreation schemaComputationWithAutoCreation, - MPPQueryContext context) { + final ISchemaComputationWithAutoCreation schemaComputationWithAutoCreation, + final MPPQueryContext context) { // The schema cache R/W and fetch operation must be locked together thus the cache clean // operation executed by delete timeseries will be effective. - DataNodeSchemaLockManager.getInstance().takeReadLock(SchemaLockType.VALIDATE_VS_DELETION); - context.addAcquiredLockNum(SchemaLockType.VALIDATE_VS_DELETION); + DataNodeSchemaLockManager.getInstance() + .takeReadLock(context, SchemaLockType.VALIDATE_VS_DELETION); schemaCache.takeReadLock(); try { - Pair templateSetInfo = + final Pair templateSetInfo = templateManager.checkTemplateSetInfo(schemaComputationWithAutoCreation.getDevicePath()); - List indexOfMissingMeasurements; + final List indexOfMissingMeasurements; if (templateSetInfo == null) { // normal timeseries indexOfMissingMeasurements = @@ -209,7 +209,7 @@ public void fetchAndComputeSchemaWithAutoCreate( } // offer null for the rest missing schema processing - for (int index : indexOfMissingMeasurements) { + for (final int index : indexOfMissingMeasurements) { schemaComputationWithAutoCreation.computeMeasurement(index, null); } } finally { @@ -219,12 +219,13 @@ public void fetchAndComputeSchemaWithAutoCreate( @Override public void fetchAndComputeSchemaWithAutoCreate( - List schemaComputationWithAutoCreationList, - MPPQueryContext context) { + final List + schemaComputationWithAutoCreationList, + final MPPQueryContext context) { // The schema cache R/W and fetch operation must be locked together thus the cache clean - // operation executed by delete timeseries will be effective. - DataNodeSchemaLockManager.getInstance().takeReadLock(SchemaLockType.VALIDATE_VS_DELETION); - context.addAcquiredLockNum(SchemaLockType.VALIDATE_VS_DELETION); + // operation executed by delete timeSeries will be effective. + DataNodeSchemaLockManager.getInstance() + .takeReadLock(context, SchemaLockType.VALIDATE_VS_DELETION); schemaCache.takeReadLock(); try { @@ -258,25 +259,26 @@ public void fetchAndComputeSchemaWithAutoCreate( @Override public ISchemaTree fetchSchemaListWithAutoCreate( - List devicePathList, - List measurementsList, - List tsDataTypesList, - List encodingsList, - List compressionTypesList, - List isAlignedList, - MPPQueryContext context) { + final List devicePathList, + final List measurementsList, + final List tsDataTypesList, + final List encodingsList, + final List compressionTypesList, + final List isAlignedList, + final MPPQueryContext context) { // The schema cache R/W and fetch operation must be locked together thus the cache clean - // operation executed by delete timeseries will be effective. - DataNodeSchemaLockManager.getInstance().takeReadLock(SchemaLockType.VALIDATE_VS_DELETION); - context.addAcquiredLockNum(SchemaLockType.VALIDATE_VS_DELETION); + // operation executed by delete timeSeries will be effective. + DataNodeSchemaLockManager.getInstance() + .takeReadLock(context, SchemaLockType.VALIDATE_VS_DELETION); schemaCache.takeReadLock(); try { - ClusterSchemaTree schemaTree = new ClusterSchemaTree(); - List> indexOfMissingMeasurementsList = new ArrayList<>(devicePathList.size()); - List indexOfDevicesWithMissingMeasurements = new ArrayList<>(); + final ClusterSchemaTree schemaTree = new ClusterSchemaTree(); + final List> indexOfMissingMeasurementsList = + new ArrayList<>(devicePathList.size()); + final List indexOfDevicesWithMissingMeasurements = new ArrayList<>(); for (int i = 0; i < devicePathList.size(); i++) { schemaTree.mergeSchemaTree(schemaCache.get(devicePathList.get(i), measurementsList.get(i))); - List indexOfMissingMeasurements = + final List indexOfMissingMeasurements = checkMissingMeasurements(schemaTree, devicePathList.get(i), measurementsList.get(i)); if (!indexOfMissingMeasurements.isEmpty()) { indexOfDevicesWithMissingMeasurements.add(i); @@ -289,8 +291,8 @@ public ISchemaTree fetchSchemaListWithAutoCreate( return schemaTree; } - // try fetch the missing schema from remote and cache fetched schema - ClusterSchemaTree remoteSchemaTree = + // Try fetch the missing schema from remote and cache fetched schema + final ClusterSchemaTree remoteSchemaTree = clusterSchemaFetchExecutor.fetchSchemaOfMultiDevices( devicePathList, measurementsList, @@ -305,9 +307,9 @@ public ISchemaTree fetchSchemaListWithAutoCreate( return schemaTree; } - // auto create the still missing schema and merge them into schemaTree - List indexOfDevicesNeedAutoCreateSchema = new ArrayList<>(); - List> indexOfMeasurementsNeedAutoCreate = new ArrayList<>(); + // Auto create the still missing schema and merge them into schemaTree + final List indexOfDevicesNeedAutoCreateSchema = new ArrayList<>(); + final List> indexOfMeasurementsNeedAutoCreate = new ArrayList<>(); List indexOfMissingMeasurements; int deviceIndex; for (int i = 0, size = indexOfDevicesWithMissingMeasurements.size(); i < size; i++) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/TemplateSchemaFetcher.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/TemplateSchemaFetcher.java index 42c73e1c7dd8d..5967afceada25 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/TemplateSchemaFetcher.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/TemplateSchemaFetcher.java @@ -142,13 +142,20 @@ void processTemplateTimeSeries( measurements = schemaComputationWithAutoCreation.getMeasurements(); for (int j = 0; j < measurements.length; j++) { if (!template.hasSchema(measurements[j])) { + TSDataType dataType = schemaComputationWithAutoCreation.getDataType(j); + if (dataType == null) { + // the data type is not provided and cannot be inferred (the value is also null), + // skip this measurement + continue; + } + extensionMeasurementMap .computeIfAbsent(template.getName(), TemplateExtendInfo::new) .addMeasurement( measurements[j], - schemaComputationWithAutoCreation.getDataType(j), - getDefaultEncoding(schemaComputationWithAutoCreation.getDataType(j)), - TSFileDescriptor.getInstance().getConfig().getCompressor()); + dataType, + getDefaultEncoding(dataType), + TSFileDescriptor.getInstance().getConfig().getCompressor(dataType)); } } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/IQueryExecution.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/IQueryExecution.java index b35123e8f707c..5cb2d4b449cd5 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/IQueryExecution.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/IQueryExecution.java @@ -53,6 +53,8 @@ public interface IQueryExecution { boolean isQuery(); + boolean isUserQuery(); + String getQueryId(); long getStartExecutionTime(); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/QueryExecution.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/QueryExecution.java index 8cb8ea48ea827..28f9753e87afb 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/QueryExecution.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/QueryExecution.java @@ -605,8 +605,12 @@ private ExecutionResult getExecutionResult(QueryState state) { // If RETRYING is triggered by this QueryExecution, the stateMachine.getFailureStatus() is also // not null. We should only return the failure status when QueryExecution is in Done state. - if (state.isDone() && stateMachine.getFailureStatus() != null) { - tsstatus = stateMachine.getFailureStatus(); + if (state.isDone()) { + if (analysis.getFailStatus() != null) { + tsstatus = analysis.getFailStatus(); + } else if (stateMachine.getFailureStatus() != null) { + tsstatus = stateMachine.getFailureStatus(); + } } // collect redirect info to client for writing @@ -629,6 +633,11 @@ public boolean isQuery() { return context.getQueryType() == QueryType.READ; } + @Override + public boolean isUserQuery() { + return context.isUserQuery(); + } + @Override public String getQueryId() { return context.getQueryId().getId(); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/ConfigExecution.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/ConfigExecution.java index c0886d9184289..3076e87d5b48e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/ConfigExecution.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/ConfigExecution.java @@ -219,6 +219,11 @@ public boolean isQuery() { return context.getQueryType() == QueryType.READ; } + @Override + public boolean isUserQuery() { + return context.isUserQuery(); + } + @Override public String getQueryId() { return context.getQueryId().getId(); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/ConfigTaskVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/ConfigTaskVisitor.java index e9b36c80c9149..7a0556007ca3a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/ConfigTaskVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/ConfigTaskVisitor.java @@ -19,6 +19,7 @@ package org.apache.iotdb.db.queryengine.plan.execution.config; +import org.apache.iotdb.db.exception.sql.SemanticException; import org.apache.iotdb.db.queryengine.common.MPPQueryContext; import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.CountDatabaseTask; import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.CountTimeSlotListTask; @@ -36,7 +37,8 @@ import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.GetRegionIdTask; import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.GetSeriesSlotListTask; import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.GetTimeSlotListTask; -import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.MigrateRegionTask; +import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.RemoveConfigNodeTask; +import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.RemoveDataNodeTask; import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.SetTTLTask; import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.ShowClusterDetailsTask; import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.ShowClusterIdTask; @@ -52,6 +54,14 @@ import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.ShowTriggersTask; import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.ShowVariablesTask; import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.UnSetTTLTask; +import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.model.CreateModelTask; +import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.model.DropModelTask; +import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.model.ShowAINodesTask; +import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.model.ShowModelsTask; +import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.region.ExtendRegionTask; +import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.region.MigrateRegionTask; +import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.region.ReconstructRegionTask; +import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.region.RemoveRegionTask; import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.template.AlterSchemaTemplateTask; import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.template.CreateSchemaTemplateTask; import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.template.DeactivateSchemaTemplateTask; @@ -86,8 +96,9 @@ import org.apache.iotdb.db.queryengine.plan.execution.config.sys.quota.ShowSpaceQuotaTask; import org.apache.iotdb.db.queryengine.plan.execution.config.sys.quota.ShowThrottleQuotaTask; import org.apache.iotdb.db.queryengine.plan.execution.config.sys.subscription.CreateTopicTask; +import org.apache.iotdb.db.queryengine.plan.execution.config.sys.subscription.DropSubscriptionTask; import org.apache.iotdb.db.queryengine.plan.execution.config.sys.subscription.DropTopicTask; -import org.apache.iotdb.db.queryengine.plan.execution.config.sys.subscription.ShowSubscriptionTask; +import org.apache.iotdb.db.queryengine.plan.execution.config.sys.subscription.ShowSubscriptionsTask; import org.apache.iotdb.db.queryengine.plan.execution.config.sys.subscription.ShowTopicsTask; import org.apache.iotdb.db.queryengine.plan.statement.Statement; import org.apache.iotdb.db.queryengine.plan.statement.StatementNode; @@ -106,7 +117,8 @@ import org.apache.iotdb.db.queryengine.plan.statement.metadata.GetRegionIdStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.GetSeriesSlotListStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.GetTimeSlotListStatement; -import org.apache.iotdb.db.queryengine.plan.statement.metadata.MigrateRegionStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.RemoveConfigNodeStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.RemoveDataNodeStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.SetTTLStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.ShowClusterIdStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.ShowClusterStatement; @@ -120,6 +132,10 @@ import org.apache.iotdb.db.queryengine.plan.statement.metadata.ShowTriggersStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.ShowVariablesStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.UnSetTTLStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.model.CreateModelStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.model.DropModelStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.model.ShowAINodesStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.model.ShowModelsStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.pipe.AlterPipeStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.pipe.CreatePipePluginStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.pipe.CreatePipeStatement; @@ -129,7 +145,12 @@ import org.apache.iotdb.db.queryengine.plan.statement.metadata.pipe.ShowPipesStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.pipe.StartPipeStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.pipe.StopPipeStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.region.ExtendRegionStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.region.MigrateRegionStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.region.ReconstructRegionStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.region.RemoveRegionStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.subscription.CreateTopicStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.subscription.DropSubscriptionStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.subscription.DropTopicStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.subscription.ShowSubscriptionsStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.subscription.ShowTopicsStatement; @@ -163,6 +184,9 @@ import org.apache.tsfile.exception.NotImplementedException; +import static org.apache.iotdb.commons.executable.ExecutableManager.getUnTrustedUriErrorMsg; +import static org.apache.iotdb.commons.executable.ExecutableManager.isUriTrusted; + public class ConfigTaskVisitor extends StatementVisitor { @Override @@ -308,7 +332,16 @@ public IConfigTask visitKillQuery( @Override public IConfigTask visitCreateFunction( CreateFunctionStatement createFunctionStatement, MPPQueryContext context) { - return new CreateFunctionTask(createFunctionStatement); + if (!createFunctionStatement.isUsingURI() + || (createFunctionStatement.getUriString() != null + && isUriTrusted(createFunctionStatement.getUriString()))) { + // 1. user specified uri and that uri is trusted + // 2. user doesn't specify uri + return new CreateFunctionTask(createFunctionStatement); + } else { + // user specified uri and that uri is not trusted + throw new SemanticException(getUnTrustedUriErrorMsg(createFunctionStatement.getUriString())); + } } @Override @@ -326,7 +359,16 @@ public IConfigTask visitShowFunctions( @Override public IConfigTask visitCreateTrigger( CreateTriggerStatement createTriggerStatement, MPPQueryContext context) { - return new CreateTriggerTask(createTriggerStatement); + if (!createTriggerStatement.isUsingURI() + || (createTriggerStatement.getUriString() != null + && isUriTrusted(createTriggerStatement.getUriString()))) { + // 1. user specified uri and that uri is trusted + // 2. user doesn't specify uri + return new CreateTriggerTask(createTriggerStatement); + } else { + // user specified uri and that uri is not trusted + throw new SemanticException(getUnTrustedUriErrorMsg(createTriggerStatement.getUriString())); + } } @Override @@ -344,7 +386,16 @@ public IConfigTask visitShowTriggers( @Override public IConfigTask visitCreatePipePlugin( CreatePipePluginStatement createPipePluginStatement, MPPQueryContext context) { - return new CreatePipePluginTask(createPipePluginStatement); + if (createPipePluginStatement.getUriString() != null + && isUriTrusted(createPipePluginStatement.getUriString())) { + // 1. user specified uri and that uri is trusted + // 2. user doesn't specify uri + return new CreatePipePluginTask(createPipePluginStatement); + } else { + // user specified uri and that uri is not trusted + throw new SemanticException( + getUnTrustedUriErrorMsg(createPipePluginStatement.getUriString())); + } } @Override @@ -467,12 +518,6 @@ public IConfigTask visitStopPipe(StopPipeStatement stopPipeStatement, MPPQueryCo return new StopPipeTask(stopPipeStatement); } - @Override - public IConfigTask visitShowSubscriptions( - ShowSubscriptionsStatement showSubscriptionsStatement, MPPQueryContext context) { - return new ShowSubscriptionTask(showSubscriptionsStatement); - } - public IConfigTask visitCreateTopic( CreateTopicStatement createTopicStatement, MPPQueryContext context) { return new CreateTopicTask(createTopicStatement); @@ -491,7 +536,19 @@ public IConfigTask visitShowTopics( } @Override - public IConfigTask visitDeleteTimeseries( + public IConfigTask visitShowSubscriptions( + ShowSubscriptionsStatement showSubscriptionsStatement, MPPQueryContext context) { + return new ShowSubscriptionsTask(showSubscriptionsStatement); + } + + @Override + public IConfigTask visitDropSubscription( + DropSubscriptionStatement dropSubscriptionStatement, MPPQueryContext context) { + return new DropSubscriptionTask(dropSubscriptionStatement); + } + + @Override + public IConfigTask visitDeleteTimeSeries( DeleteTimeSeriesStatement deleteTimeSeriesStatement, MPPQueryContext context) { return new DeleteTimeSeriesTask(context.getQueryId().getId(), deleteTimeSeriesStatement); } @@ -544,6 +601,36 @@ public IConfigTask visitMigrateRegion( return new MigrateRegionTask(migrateRegionStatement); } + @Override + public IConfigTask visitReconstructRegion( + ReconstructRegionStatement reconstructRegionStatement, MPPQueryContext context) { + return new ReconstructRegionTask(reconstructRegionStatement); + } + + @Override + public IConfigTask visitExtendRegion( + ExtendRegionStatement extendRegionStatement, MPPQueryContext context) { + return new ExtendRegionTask(extendRegionStatement); + } + + @Override + public IConfigTask visitRemoveRegion( + RemoveRegionStatement removeRegionStatement, MPPQueryContext context) { + return new RemoveRegionTask(removeRegionStatement); + } + + @Override + public IConfigTask visitRemoveDataNode( + RemoveDataNodeStatement removeDataNodeStatement, MPPQueryContext context) { + return new RemoveDataNodeTask(removeDataNodeStatement); + } + + @Override + public IConfigTask visitRemoveConfigNode( + RemoveConfigNodeStatement removeConfigNodeStatement, MPPQueryContext context) { + return new RemoveConfigNodeTask(removeConfigNodeStatement); + } + @Override public IConfigTask visitCreateContinuousQuery( CreateContinuousQueryStatement createContinuousQueryStatement, MPPQueryContext context) { @@ -585,4 +672,36 @@ public IConfigTask visitShowThrottleQuota( ShowThrottleQuotaStatement showThrottleQuotaStatement, MPPQueryContext context) { return new ShowThrottleQuotaTask(showThrottleQuotaStatement); } + + /** AI Model Management */ + @Override + public IConfigTask visitCreateModel( + CreateModelStatement createModelStatement, MPPQueryContext context) { + if (createModelStatement.getUri() != null && isUriTrusted(createModelStatement.getUri())) { + // 1. user specified uri and that uri is trusted + // 2. user doesn't specify uri + return new CreateModelTask(createModelStatement, context); + } else { + // user specified uri and that uri is not trusted + throw new SemanticException(getUnTrustedUriErrorMsg(createModelStatement.getUri())); + } + } + + @Override + public IConfigTask visitDropModel( + DropModelStatement dropModelStatement, MPPQueryContext context) { + return new DropModelTask(dropModelStatement.getModelName()); + } + + @Override + public IConfigTask visitShowModels( + ShowModelsStatement showModelsStatement, MPPQueryContext context) { + return new ShowModelsTask(showModelsStatement.getModelName()); + } + + @Override + public IConfigTask visitShowAINodes( + ShowAINodesStatement showAINodesStatement, MPPQueryContext context) { + return new ShowAINodesTask(showAINodesStatement); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/ClusterConfigTaskExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/ClusterConfigTaskExecutor.java index 8b6d53582ebec..3a3261276f259 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/ClusterConfigTaskExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/ClusterConfigTaskExecutor.java @@ -19,6 +19,9 @@ package org.apache.iotdb.db.queryengine.plan.execution.config.executor; +import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TDataNodeConfiguration; +import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TFlushReq; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.common.rpc.thrift.TSetConfigurationReq; @@ -43,12 +46,14 @@ import org.apache.iotdb.commons.path.MeasurementPath; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PathPatternTree; -import org.apache.iotdb.commons.pipe.connector.payload.airgap.AirGapPseudoTPipeTransferRequest; -import org.apache.iotdb.commons.pipe.plugin.service.PipePluginClassLoader; -import org.apache.iotdb.commons.pipe.plugin.service.PipePluginExecutableManager; -import org.apache.iotdb.commons.pipe.task.meta.PipeStaticMeta; +import org.apache.iotdb.commons.pipe.agent.plugin.service.PipePluginClassLoader; +import org.apache.iotdb.commons.pipe.agent.plugin.service.PipePluginExecutableManager; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStaticMeta; +import org.apache.iotdb.commons.pipe.sink.payload.airgap.AirGapPseudoTPipeTransferRequest; import org.apache.iotdb.commons.schema.view.LogicalViewSchema; import org.apache.iotdb.commons.schema.view.viewExpression.ViewExpression; +import org.apache.iotdb.commons.subscription.config.SubscriptionConfig; import org.apache.iotdb.commons.subscription.meta.topic.TopicMeta; import org.apache.iotdb.commons.trigger.service.TriggerExecutableManager; import org.apache.iotdb.commons.udf.service.UDFClassLoader; @@ -63,10 +68,13 @@ import org.apache.iotdb.confignode.rpc.thrift.TCountTimeSlotListResp; import org.apache.iotdb.confignode.rpc.thrift.TCreateCQReq; import org.apache.iotdb.confignode.rpc.thrift.TCreateFunctionReq; +import org.apache.iotdb.confignode.rpc.thrift.TCreateModelReq; import org.apache.iotdb.confignode.rpc.thrift.TCreatePipePluginReq; import org.apache.iotdb.confignode.rpc.thrift.TCreatePipeReq; import org.apache.iotdb.confignode.rpc.thrift.TCreateTopicReq; import org.apache.iotdb.confignode.rpc.thrift.TCreateTriggerReq; +import org.apache.iotdb.confignode.rpc.thrift.TDataNodeRemoveReq; +import org.apache.iotdb.confignode.rpc.thrift.TDataNodeRemoveResp; import org.apache.iotdb.confignode.rpc.thrift.TDatabaseSchema; import org.apache.iotdb.confignode.rpc.thrift.TDeactivateSchemaTemplateReq; import org.apache.iotdb.confignode.rpc.thrift.TDeleteDatabasesReq; @@ -74,10 +82,14 @@ import org.apache.iotdb.confignode.rpc.thrift.TDeleteTimeSeriesReq; import org.apache.iotdb.confignode.rpc.thrift.TDropCQReq; import org.apache.iotdb.confignode.rpc.thrift.TDropFunctionReq; +import org.apache.iotdb.confignode.rpc.thrift.TDropModelReq; import org.apache.iotdb.confignode.rpc.thrift.TDropPipePluginReq; import org.apache.iotdb.confignode.rpc.thrift.TDropPipeReq; +import org.apache.iotdb.confignode.rpc.thrift.TDropSubscriptionReq; import org.apache.iotdb.confignode.rpc.thrift.TDropTopicReq; import org.apache.iotdb.confignode.rpc.thrift.TDropTriggerReq; +import org.apache.iotdb.confignode.rpc.thrift.TExtendRegionReq; +import org.apache.iotdb.confignode.rpc.thrift.TGetAllPipeInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TGetDatabaseReq; import org.apache.iotdb.confignode.rpc.thrift.TGetPipePluginTableResp; import org.apache.iotdb.confignode.rpc.thrift.TGetRegionIdReq; @@ -91,12 +103,17 @@ import org.apache.iotdb.confignode.rpc.thrift.TMigrateRegionReq; import org.apache.iotdb.confignode.rpc.thrift.TPipeConfigTransferReq; import org.apache.iotdb.confignode.rpc.thrift.TPipeConfigTransferResp; +import org.apache.iotdb.confignode.rpc.thrift.TReconstructRegionReq; import org.apache.iotdb.confignode.rpc.thrift.TRegionInfo; +import org.apache.iotdb.confignode.rpc.thrift.TRemoveRegionReq; +import org.apache.iotdb.confignode.rpc.thrift.TShowAINodesResp; import org.apache.iotdb.confignode.rpc.thrift.TShowCQResp; import org.apache.iotdb.confignode.rpc.thrift.TShowClusterResp; import org.apache.iotdb.confignode.rpc.thrift.TShowConfigNodesResp; import org.apache.iotdb.confignode.rpc.thrift.TShowDataNodesResp; import org.apache.iotdb.confignode.rpc.thrift.TShowDatabaseResp; +import org.apache.iotdb.confignode.rpc.thrift.TShowModelReq; +import org.apache.iotdb.confignode.rpc.thrift.TShowModelResp; import org.apache.iotdb.confignode.rpc.thrift.TShowPipeInfo; import org.apache.iotdb.confignode.rpc.thrift.TShowPipeReq; import org.apache.iotdb.confignode.rpc.thrift.TShowRegionReq; @@ -111,7 +128,6 @@ import org.apache.iotdb.confignode.rpc.thrift.TSpaceQuotaResp; import org.apache.iotdb.confignode.rpc.thrift.TThrottleQuotaResp; import org.apache.iotdb.confignode.rpc.thrift.TUnsetSchemaTemplateReq; -import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.exception.BatchProcessException; import org.apache.iotdb.db.exception.StorageEngineException; @@ -150,6 +166,8 @@ import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.ShowTTLTask; import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.ShowTriggersTask; import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.ShowVariablesTask; +import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.model.ShowAINodesTask; +import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.model.ShowModelsTask; import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.template.ShowNodesInSchemaTemplateTask; import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.template.ShowPathSetTemplateTask; import org.apache.iotdb.db.queryengine.plan.execution.config.metadata.template.ShowSchemaTemplateTask; @@ -157,7 +175,7 @@ import org.apache.iotdb.db.queryengine.plan.execution.config.sys.pipe.ShowPipeTask; import org.apache.iotdb.db.queryengine.plan.execution.config.sys.quota.ShowSpaceQuotaTask; import org.apache.iotdb.db.queryengine.plan.execution.config.sys.quota.ShowThrottleQuotaTask; -import org.apache.iotdb.db.queryengine.plan.execution.config.sys.subscription.ShowSubscriptionTask; +import org.apache.iotdb.db.queryengine.plan.execution.config.sys.subscription.ShowSubscriptionsTask; import org.apache.iotdb.db.queryengine.plan.execution.config.sys.subscription.ShowTopicsTask; import org.apache.iotdb.db.queryengine.plan.expression.Expression; import org.apache.iotdb.db.queryengine.plan.expression.visitor.TransformToViewExpressionVisitor; @@ -173,13 +191,16 @@ import org.apache.iotdb.db.queryengine.plan.statement.metadata.GetRegionIdStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.GetSeriesSlotListStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.GetTimeSlotListStatement; -import org.apache.iotdb.db.queryengine.plan.statement.metadata.MigrateRegionStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.RemoveConfigNodeStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.RemoveDataNodeStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.SetTTLStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.ShowClusterStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.ShowDataNodesStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.ShowDatabaseStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.ShowRegionStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.ShowTTLStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.model.CreateModelStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.model.ShowAINodesStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.pipe.AlterPipeStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.pipe.CreatePipePluginStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.pipe.CreatePipeStatement; @@ -188,7 +209,12 @@ import org.apache.iotdb.db.queryengine.plan.statement.metadata.pipe.ShowPipesStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.pipe.StartPipeStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.pipe.StopPipeStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.region.ExtendRegionStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.region.MigrateRegionStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.region.ReconstructRegionStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.region.RemoveRegionStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.subscription.CreateTopicStatement; +import org.apache.iotdb.db.queryengine.plan.statement.metadata.subscription.DropSubscriptionStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.subscription.DropTopicStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.subscription.ShowSubscriptionsStatement; import org.apache.iotdb.db.queryengine.plan.statement.metadata.subscription.ShowTopicsStatement; @@ -220,8 +246,10 @@ import org.apache.iotdb.db.storageengine.StorageEngine; import org.apache.iotdb.db.storageengine.dataregion.compaction.repair.RepairTaskStatus; import org.apache.iotdb.db.storageengine.dataregion.compaction.schedule.CompactionScheduleTaskManager; +import org.apache.iotdb.db.storageengine.dataregion.compaction.schedule.CompactionTaskManager; import org.apache.iotdb.db.trigger.service.TriggerClassLoader; import org.apache.iotdb.pipe.api.PipePlugin; +import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.StatementExecutionException; import org.apache.iotdb.rpc.TSStatusCode; @@ -254,8 +282,10 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.TreeMap; import java.util.stream.Collectors; @@ -282,6 +312,16 @@ private static final class ClusterConfigTaskExecutorHolder { private ClusterConfigTaskExecutorHolder() {} } + private static final SettableFuture SUBSCRIPTION_NOT_ENABLED_ERROR_FUTURE; + + static { + SUBSCRIPTION_NOT_ENABLED_ERROR_FUTURE = SettableFuture.create(); + SUBSCRIPTION_NOT_ENABLED_ERROR_FUTURE.setException( + new IoTDBException( + "Subscription not enabled, please set config `subscription_enabled` to true.", + TSStatusCode.SUBSCRIPTION_NOT_ENABLED_ERROR.getStatusCode())); + } + public static ClusterConfigTaskExecutor getInstance() { return ClusterConfigTaskExecutor.ClusterConfigTaskExecutorHolder.INSTANCE; } @@ -461,7 +501,6 @@ public SettableFuture createFunction( String jarFilePathUnderTempDir = UDFExecutableManager.getInstance() .getDirStringUnderTempRootByRequestId(resource.getRequestId()) - + File.separator + jarFileName; // libRoot should be the path of the specified jar libRoot = jarFilePathUnderTempDir; @@ -494,12 +533,15 @@ public SettableFuture createFunction( tCreateFunctionReq.setJarFile(jarFile); tCreateFunctionReq.setJarMD5(jarMd5); tCreateFunctionReq.setIsUsingURI(true); - tCreateFunctionReq.setJarName( - String.format( - "%s-%s.%s", - jarFileName.substring(0, jarFileName.lastIndexOf(".")), - jarMd5, - jarFileName.substring(jarFileName.lastIndexOf(".") + 1))); + int index = jarFileName.lastIndexOf("."); + if (index < 0) { + tCreateFunctionReq.setJarName(String.format("%s-%s", jarFileName, jarMd5)); + } else { + tCreateFunctionReq.setJarName( + String.format( + "%s-%s.%s", + jarFileName.substring(0, index), jarMd5, jarFileName.substring(index + 1))); + } } // try to create instance, this request will fail if creation is not successful @@ -633,7 +675,6 @@ public SettableFuture createTrigger( String jarFilePathUnderTempDir = TriggerExecutableManager.getInstance() .getDirStringUnderTempRootByRequestId(resource.getRequestId()) - + File.separator + jarFileName; // libRoot should be the path of the specified jar libRoot = jarFilePathUnderTempDir; @@ -799,7 +840,6 @@ public SettableFuture createPipePlugin( final String jarFilePathUnderTempDir = PipePluginExecutableManager.getInstance() .getDirStringUnderTempRootByRequestId(resource.getRequestId()) - + File.separator + jarFileName; // libRoot should be the path of the specified jar libRoot = jarFilePathUnderTempDir; @@ -1049,11 +1089,14 @@ public SettableFuture setConfiguration(TSetConfigurationReq re TSStatus tsStatus = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); List ignoredConfigItems = - ConfigurationFileUtils.filterImmutableConfigItems(req.getConfigs()); + ConfigurationFileUtils.filterInvalidConfigItems(req.getConfigs()); TSStatus warningTsStatus = null; if (!ignoredConfigItems.isEmpty()) { warningTsStatus = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); - warningTsStatus.setMessage("ignored config items: " + ignoredConfigItems); + warningTsStatus.setMessage( + "ignored config items: " + + ignoredConfigItems + + " because they are immutable or undefined."); if (req.getConfigs().isEmpty()) { future.setException(new IoTDBException(warningTsStatus.message, warningTsStatus.code)); return future; @@ -1096,18 +1139,16 @@ public SettableFuture startRepairData(boolean onCluster) { future.setException(e); } } else { - if (!StorageEngine.getInstance().isAllSgReady()) { + if (!StorageEngine.getInstance().isReadyForNonReadWriteFunctions()) { future.setException( new IoTDBException( "not all sg is ready", TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode())); return future; } - IoTDBConfig iotdbConfig = IoTDBDescriptor.getInstance().getConfig(); - if (!iotdbConfig.isEnableSeqSpaceCompaction() - || !iotdbConfig.isEnableUnseqSpaceCompaction()) { + if (!CompactionTaskManager.getInstance().isInit()) { future.setException( new IoTDBException( - "cannot start repair task because inner space compaction is not enabled", + "cannot start repair task because compaction is not enabled", TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode())); return future; } @@ -1388,10 +1429,10 @@ public SettableFuture showRegion(ShowRegionStatement showRegio @Override public SettableFuture showDataNodes( - ShowDataNodesStatement showDataNodesStatement) { - SettableFuture future = SettableFuture.create(); + final ShowDataNodesStatement showDataNodesStatement) { + final SettableFuture future = SettableFuture.create(); TShowDataNodesResp showDataNodesResp = new TShowDataNodesResp(); - try (ConfigNodeClient client = + try (final ConfigNodeClient client = CONFIG_NODE_CLIENT_MANAGER.borrowClient(ConfigNodeInfo.CONFIG_REGION_ID)) { showDataNodesResp = client.showDataNodes(); if (showDataNodesResp.getStatus().getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { @@ -1400,7 +1441,7 @@ public SettableFuture showDataNodes( showDataNodesResp.getStatus().message, showDataNodesResp.getStatus().code)); return future; } - } catch (ClientManagerException | TException e) { + } catch (final ClientManagerException | TException e) { future.setException(e); } // build TSBlock @@ -1410,9 +1451,9 @@ public SettableFuture showDataNodes( @Override public SettableFuture showConfigNodes() { - SettableFuture future = SettableFuture.create(); + final SettableFuture future = SettableFuture.create(); TShowConfigNodesResp showConfigNodesResp = new TShowConfigNodesResp(); - try (ConfigNodeClient client = + try (final ConfigNodeClient client = CONFIG_NODE_CLIENT_MANAGER.borrowClient(ConfigNodeInfo.CONFIG_REGION_ID)) { showConfigNodesResp = client.showConfigNodes(); if (showConfigNodesResp.getStatus().getCode() @@ -1422,7 +1463,7 @@ public SettableFuture showConfigNodes() { showConfigNodesResp.getStatus().message, showConfigNodesResp.getStatus().code)); return future; } - } catch (ClientManagerException | TException e) { + } catch (final ClientManagerException | TException e) { future.setException(e); } // build TSBlock @@ -1432,12 +1473,12 @@ public SettableFuture showConfigNodes() { @Override public SettableFuture createSchemaTemplate( - CreateSchemaTemplateStatement createSchemaTemplateStatement) { - SettableFuture future = SettableFuture.create(); + final CreateSchemaTemplateStatement createSchemaTemplateStatement) { + final SettableFuture future = SettableFuture.create(); // Construct request using statement try { // Send request to some API server - TSStatus tsStatus = + final TSStatus tsStatus = ClusterTemplateManager.getInstance().createSchemaTemplate(createSchemaTemplateStatement); // Get response or throw exception if (TSStatusCode.SUCCESS_STATUS.getStatusCode() != tsStatus.getCode()) { @@ -1449,7 +1490,7 @@ public SettableFuture createSchemaTemplate( } else { future.set(new ConfigTaskResult(TSStatusCode.SUCCESS_STATUS)); } - } catch (Exception e) { + } catch (final Exception e) { future.setException(e.getCause()); } return future; @@ -1457,14 +1498,14 @@ public SettableFuture createSchemaTemplate( @Override public SettableFuture showSchemaTemplate( - ShowSchemaTemplateStatement showSchemaTemplateStatement) { - SettableFuture future = SettableFuture.create(); + final ShowSchemaTemplateStatement showSchemaTemplateStatement) { + final SettableFuture future = SettableFuture.create(); try { // Send request to some API server - List