From 495a047e9c42d7341742229c8c6c32cd2ef38db1 Mon Sep 17 00:00:00 2001 From: Andrei Shitov Date: Wed, 15 Apr 2026 11:22:50 +0300 Subject: [PATCH 01/14] chore: add ci --- .github/workflows/ci.yml | 122 +++++++++++++++ .github/workflows/yetus-general-check.yml | 141 ------------------ .../yetus-jdk11-hadoop3-compile-check.yml | 111 -------------- .../yetus-jdk11-hadoop3-unit-check.yml | 133 ----------------- .../yetus-jdk17-hadoop3-compile-check.yml | 110 -------------- .../yetus-jdk17-hadoop3-unit-check.yml | 132 ---------------- .../yetus-jdk8-hadoop2-compile-check.yml | 110 -------------- .../yetus-jdk8-hadoop2-unit-check.yml | 132 ---------------- hbase-annotations/pom.xml | 2 +- .../hbase-archetype-builder/pom.xml | 2 +- hbase-archetypes/hbase-client-project/pom.xml | 2 +- .../hbase-shaded-client-project/pom.xml | 2 +- hbase-archetypes/pom.xml | 2 +- hbase-assembly/pom.xml | 2 +- hbase-asyncfs/pom.xml | 2 +- hbase-backup/pom.xml | 2 +- hbase-build-configuration/pom.xml | 2 +- hbase-checkstyle/pom.xml | 4 +- hbase-client/pom.xml | 2 +- hbase-common/pom.xml | 2 +- .../hbase-compression-aircompressor/pom.xml | 2 +- .../hbase-compression-brotli/pom.xml | 2 +- .../hbase-compression-lz4/pom.xml | 2 +- .../hbase-compression-snappy/pom.xml | 2 +- .../hbase-compression-zstd/pom.xml | 2 +- hbase-compression/pom.xml | 2 +- hbase-endpoint/pom.xml | 2 +- hbase-examples/pom.xml | 2 +- hbase-extensions/hbase-openssl/pom.xml | 2 +- hbase-extensions/pom.xml | 2 +- hbase-external-blockcache/pom.xml | 2 +- hbase-hadoop-compat/pom.xml | 2 +- hbase-hadoop2-compat/pom.xml | 2 +- hbase-hbtop/pom.xml | 2 +- hbase-http/pom.xml | 2 +- hbase-it/pom.xml | 2 +- hbase-logging/pom.xml | 2 +- hbase-mapreduce/pom.xml | 2 +- hbase-metrics-api/pom.xml | 2 +- hbase-metrics/pom.xml | 2 +- hbase-procedure/pom.xml | 2 +- hbase-protocol-shaded/pom.xml | 2 +- hbase-protocol/pom.xml | 2 +- hbase-replication/pom.xml | 2 +- hbase-resource-bundle/pom.xml | 2 +- hbase-rest/pom.xml | 2 +- hbase-rsgroup/pom.xml | 2 +- hbase-server/pom.xml | 2 +- .../hbase-shaded-check-invariants/pom.xml | 2 +- .../hbase-shaded-client-byo-hadoop/pom.xml | 2 +- hbase-shaded/hbase-shaded-client/pom.xml | 2 +- hbase-shaded/hbase-shaded-mapreduce/pom.xml | 2 +- .../hbase-shaded-testing-util-tester/pom.xml | 2 +- .../hbase-shaded-testing-util/pom.xml | 2 +- .../pom.xml | 2 +- hbase-shaded/pom.xml | 2 +- hbase-shell/pom.xml | 2 +- hbase-testing-util/pom.xml | 2 +- hbase-thrift/pom.xml | 2 +- hbase-zookeeper/pom.xml | 2 +- pom.xml | 15 +- 61 files changed, 188 insertions(+), 924 deletions(-) create mode 100644 .github/workflows/ci.yml delete mode 100644 .github/workflows/yetus-general-check.yml delete mode 100644 .github/workflows/yetus-jdk11-hadoop3-compile-check.yml delete mode 100644 .github/workflows/yetus-jdk11-hadoop3-unit-check.yml delete mode 100644 .github/workflows/yetus-jdk17-hadoop3-compile-check.yml delete mode 100644 .github/workflows/yetus-jdk17-hadoop3-unit-check.yml delete mode 100644 .github/workflows/yetus-jdk8-hadoop2-compile-check.yml delete mode 100644 .github/workflows/yetus-jdk8-hadoop2-unit-check.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000000..b35747fc2c94 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,122 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +name: CI + +on: + push: + branches: ['develop/**', 'release/**'] + pull_request: + branches: ['develop/**', 'release/**'] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +env: + MAVEN_ARGS: -B -V -e -ntp + MAVEN_COMMON_FLAGS: -Dhadoop.profile=3.0 -Drat.skip=true -Dmaven.javadoc.skip=true + +jobs: + compile: + name: Compile + runs-on: ubuntu-24.04 + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: temurin + cache: maven + server-id: arenadata + server-username: GITHUB_ACTOR + server-password: GITHUB_TOKEN + - name: Compile + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: >- + mvn ${{ env.MAVEN_ARGS }} install -DskipTests + ${{ env.MAVEN_COMMON_FLAGS }} -Pbuild-with-jdk17 + - name: Save m2 repository + uses: actions/cache/save@v4 + with: + path: ~/.m2/repository + key: m2-${{ github.sha }} + + test: + name: "${{ matrix.name }}" + needs: compile + runs-on: ubuntu-24.04 + timeout-minutes: ${{ matrix.timeout }} + strategy: + fail-fast: false + matrix: + include: + - name: Small Tests + profile: runSmallTests + timeout: 60 + + - name: Medium Tests + profile: runMediumTests + timeout: 180 + + - name: Large Tests (Wave 1) + profile: runLargeTests-wave1 + timeout: 240 + + - name: Large Tests (Wave 2) + profile: runLargeTests-wave2 + timeout: 240 + + - name: Large Tests (Wave 3) + profile: runLargeTests-wave3 + timeout: 240 + + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: temurin + server-id: arenadata + server-username: GITHUB_ACTOR + server-password: GITHUB_TOKEN + - name: Restore m2 repository + uses: actions/cache/restore@v4 + with: + path: ~/.m2/repository + key: m2-${{ github.sha }} + - name: Run tests + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: >- + mvn ${{ env.MAVEN_ARGS }} test + ${{ env.MAVEN_COMMON_FLAGS }} + -Pbuild-with-jdk17,${{ matrix.profile }} + -Dsurefire.firstPartForkCount=0.5C + -Dsurefire.secondPartForkCount=0.5C + -Dsurefire.rerunFailingTestsCount=2 + -Dmaven.test.failure.ignore=false + -fae + - name: Upload surefire reports + if: failure() + uses: actions/upload-artifact@v4 + with: + name: surefire-${{ matrix.name }} + path: '**/target/surefire-reports/' + if-no-files-found: ignore diff --git a/.github/workflows/yetus-general-check.yml b/.github/workflows/yetus-general-check.yml deleted file mode 100644 index 40e9589d4020..000000000000 --- a/.github/workflows/yetus-general-check.yml +++ /dev/null @@ -1,141 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# yamllint disable rule:line-length ---- -name: Yetus General Check - -"on": - pull_request: - types: [opened, synchronize, reopened] - -permissions: {} - -jobs: - general-check: - runs-on: ubuntu-latest - timeout-minutes: 600 - permissions: - contents: read - statuses: write - - env: - YETUS_VERSION: '0.15.0' - - steps: - - name: Checkout HBase - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - with: - path: src - fetch-depth: 0 - persist-credentials: false - - - name: Set up JDK 11 - uses: actions/setup-java@be666c2fcd27ec809703dec50e508c2fdc7f6654 # v5.2.0 - with: - java-version: '11' - distribution: 'temurin' - - - name: Maven cache - uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 - with: - path: ~/.m2 - key: hbase-m2-${{ hashFiles('**/pom.xml') }} - restore-keys: | - hbase-m2- - - - name: Download Yetus - run: | - mkdir -p yetus - cd yetus - bash "${{ github.workspace }}/src/dev-support/jenkins-scripts/cache-apache-project-artifact.sh" \ - --keys 'https://downloads.apache.org/yetus/KEYS' \ - --verify-tar-gz \ - ./apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz \ - yetus/${{ env.YETUS_VERSION }}/apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz - tar --strip-components=1 -xzf apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz - rm apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz - - - name: Run Yetus General Check - env: - ARCHIVE_PATTERN_LIST: "TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump" - CHANGE_TARGET: "${{ github.base_ref }}" - DOCKERFILE: "${{ github.workspace }}/src/dev-support/docker/Dockerfile" - GITHUB_PASSWORD: ${{ secrets.GITHUB_TOKEN }} - GITHUB_USER: ${{ github.actor }} - HADOOP_PROFILE: "3.0" - JAVA8_HOME: "/usr/lib/jvm/java-8" - PATCHDIR: "${{ github.workspace }}/yetus-general-check/output" - PLUGINS: "all,-javadoc,-jira,-shadedjars,-unit" - SET_JAVA_HOME: "/usr/lib/jvm/java-11" - SOURCEDIR: "${{ github.workspace }}/src" - TESTS_FILTER: "checkstyle,javac,pylint,shellcheck,shelldocs,blanks,perlcritic,ruby-lint,rubocop" - YETUSDIR: "${{ github.workspace }}/yetus" - AUTHOR_IGNORE_LIST: "src/main/asciidoc/_chapters/developer.adoc" - BLANKS_EOL_IGNORE_FILE: "dev-support/blanks-eol-ignore.txt" - BLANKS_TABS_IGNORE_FILE: "dev-support/blanks-tabs-ignore.txt" - EXCLUDE_TESTS_URL: "https://ci-hbase.apache.org/job/HBase-Find-Flaky-Tests/job/${{ github.base_ref }}/lastSuccessfulBuild/artifact/output/excludes" - BUILD_THREAD: "4" - SUREFIRE_FIRST_PART_FORK_COUNT: "1.0C" - SUREFIRE_SECOND_PART_FORK_COUNT: "0.5C" - BRANCH_NAME: "${{ github.base_ref }}" - DEBUG: 'true' - run: | - cd "${{ github.workspace }}" - bash src/dev-support/jenkins_precommit_github_yetus.sh - - - name: Publish Job Summary - if: always() - run: | - cd "${{ github.workspace }}" - python3 src/dev-support/yetus_console_to_md.py yetus-general-check/output >> $GITHUB_STEP_SUMMARY - - - name: Publish Test Results - if: always() - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 - with: - name: yetus-general-check-output - path: ${{ github.workspace }}/yetus-general-check/output - retention-days: 7 - - zizmor: - runs-on: ubuntu-latest - timeout-minutes: 5 - permissions: - contents: read - - steps: - - name: Check for workflow changes - id: changes - env: - GH_TOKEN: ${{ github.token }} - run: | - if gh pr diff "${{ github.event.pull_request.number }}" --repo "${{ github.repository }}" --name-only | grep -q '^\.github/workflows/'; then - echo "changed=true" >> "$GITHUB_OUTPUT" - else - echo "changed=false" >> "$GITHUB_OUTPUT" - fi - - - name: Checkout HBase - if: steps.changes.outputs.changed == 'true' - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - with: - persist-credentials: false - - - name: Run zizmor - if: steps.changes.outputs.changed == 'true' - run: pipx run zizmor --min-severity=medium .github/workflows/ diff --git a/.github/workflows/yetus-jdk11-hadoop3-compile-check.yml b/.github/workflows/yetus-jdk11-hadoop3-compile-check.yml deleted file mode 100644 index 603066ff5e5d..000000000000 --- a/.github/workflows/yetus-jdk11-hadoop3-compile-check.yml +++ /dev/null @@ -1,111 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# yamllint disable rule:line-length ---- -name: Yetus JDK11 Hadoop3 Compile Check - -"on": - pull_request: - types: [opened, synchronize, reopened] - -permissions: {} - -jobs: - jdk11-hadoop3-compile-check: - runs-on: ubuntu-latest - timeout-minutes: 60 - permissions: - contents: read - statuses: write - - env: - YETUS_VERSION: '0.15.0' - - steps: - - name: Checkout HBase - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - with: - path: src - fetch-depth: 0 - persist-credentials: false - - - name: Set up JDK 11 - uses: actions/setup-java@be666c2fcd27ec809703dec50e508c2fdc7f6654 # v5.2.0 - with: - java-version: '11' - distribution: 'temurin' - - - name: Maven cache - uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 - with: - path: ~/.m2 - key: hbase-m2-${{ hashFiles('**/pom.xml') }} - restore-keys: | - hbase-m2- - - - name: Download Yetus - run: | - mkdir -p yetus - cd yetus - bash "${{ github.workspace }}/src/dev-support/jenkins-scripts/cache-apache-project-artifact.sh" \ - --keys 'https://downloads.apache.org/yetus/KEYS' \ - --verify-tar-gz \ - ./apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz \ - yetus/${{ env.YETUS_VERSION }}/apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz - tar --strip-components=1 -xzf apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz - rm apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz - - - name: Run Yetus JDK11 Hadoop3 Compile Check - env: - ARCHIVE_PATTERN_LIST: "TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump" - DOCKERFILE: "${{ github.workspace }}/src/dev-support/docker/Dockerfile" - GITHUB_PASSWORD: ${{ secrets.GITHUB_TOKEN }} - GITHUB_USER: ${{ github.actor }} - HADOOP_PROFILE: "3.0" - JAVA8_HOME: "/usr/lib/jvm/java-8" - PATCHDIR: "${{ github.workspace }}/yetus-jdk11-hadoop3-compile-check/output" - PLUGINS: "compile,github,htmlout,javac,javadoc,maven,mvninstall,shadedjars" - SET_JAVA_HOME: "/usr/lib/jvm/java-11" - SOURCEDIR: "${{ github.workspace }}/src" - TESTS_FILTER: "javac,javadoc" - YETUSDIR: "${{ github.workspace }}/yetus" - AUTHOR_IGNORE_LIST: "src/main/asciidoc/_chapters/developer.adoc" - BLANKS_EOL_IGNORE_FILE: "dev-support/blanks-eol-ignore.txt" - BLANKS_TABS_IGNORE_FILE: "dev-support/blanks-tabs-ignore.txt" - BUILD_THREAD: "4" - BRANCH_NAME: "${{ github.base_ref }}" - CHANGE_TARGET: "${{ github.base_ref }}" - SKIP_ERRORPRONE: 'true' - DEBUG: 'true' - run: | - cd "${{ github.workspace }}" - bash src/dev-support/jenkins_precommit_github_yetus.sh - - - name: Publish Job Summary - if: always() - run: | - cd "${{ github.workspace }}" - python3 src/dev-support/yetus_console_to_md.py yetus-jdk11-hadoop3-compile-check/output >> $GITHUB_STEP_SUMMARY - - - name: Publish Results - if: always() - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 - with: - name: yetus-jdk11-hadoop3-compile-check-output - path: ${{ github.workspace }}/yetus-jdk11-hadoop3-compile-check/output - retention-days: 7 diff --git a/.github/workflows/yetus-jdk11-hadoop3-unit-check.yml b/.github/workflows/yetus-jdk11-hadoop3-unit-check.yml deleted file mode 100644 index 627f610c95f5..000000000000 --- a/.github/workflows/yetus-jdk11-hadoop3-unit-check.yml +++ /dev/null @@ -1,133 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# yamllint disable rule:line-length ---- -name: Yetus JDK11 Hadoop3 Unit Check - -"on": - pull_request: - types: [opened, synchronize, reopened] - -permissions: {} - -jobs: - jdk11-hadoop3-unit-check: - runs-on: ubuntu-latest - timeout-minutes: 360 - permissions: - contents: read - statuses: write - - strategy: - fail-fast: false - matrix: - include: - - name: "small" - test_profile: "runSmallTests" - - name: "medium" - test_profile: "runMediumTests" - # Large tests split alphabetically by class name (after "Test" prefix) - # Wave 1: Test[A-H]*, Wave 2: Test[I-R]*, Wave 3: Test[S-Z]* - - name: "large-wave-1" - test_profile: "runLargeTests-wave1" - - name: "large-wave-2" - test_profile: "runLargeTests-wave2" - - name: "large-wave-3" - test_profile: "runLargeTests-wave3" - - name: ${{ matrix.name }} - - env: - YETUS_VERSION: '0.15.0' - - steps: - - name: Checkout HBase - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - with: - path: src - fetch-depth: 0 - persist-credentials: false - - - name: Set up JDK 11 - uses: actions/setup-java@be666c2fcd27ec809703dec50e508c2fdc7f6654 # v5.2.0 - with: - java-version: '11' - distribution: 'temurin' - - - name: Maven cache - uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 - with: - path: ~/.m2 - key: hbase-m2-${{ hashFiles('**/pom.xml') }} - restore-keys: | - hbase-m2- - - - name: Download Yetus - run: | - mkdir -p yetus - cd yetus - bash "${{ github.workspace }}/src/dev-support/jenkins-scripts/cache-apache-project-artifact.sh" \ - --keys 'https://downloads.apache.org/yetus/KEYS' \ - --verify-tar-gz \ - ./apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz \ - yetus/${{ env.YETUS_VERSION }}/apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz - tar --strip-components=1 -xzf apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz - rm apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz - - - name: Run Yetus JDK11 Hadoop3 Unit Check - env: - ARCHIVE_PATTERN_LIST: "TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump" - DOCKERFILE: "${{ github.workspace }}/src/dev-support/docker/Dockerfile" - GITHUB_PASSWORD: ${{ secrets.GITHUB_TOKEN }} - GITHUB_USER: ${{ github.actor }} - HADOOP_PROFILE: "3.0" - JAVA8_HOME: "/usr/lib/jvm/java-8" - PATCHDIR: "${{ github.workspace }}/yetus-jdk11-hadoop3-unit-check/output" - PLUGINS: "github,htmlout,maven,unit" - SET_JAVA_HOME: "/usr/lib/jvm/java-11" - SOURCEDIR: "${{ github.workspace }}/src" - YETUSDIR: "${{ github.workspace }}/yetus" - AUTHOR_IGNORE_LIST: "src/main/asciidoc/_chapters/developer.adoc" - BLANKS_EOL_IGNORE_FILE: "dev-support/blanks-eol-ignore.txt" - BLANKS_TABS_IGNORE_FILE: "dev-support/blanks-tabs-ignore.txt" - EXCLUDE_TESTS_URL: "https://ci-hbase.apache.org/job/HBase-Find-Flaky-Tests/job/${{ github.base_ref }}/lastSuccessfulBuild/artifact/output/excludes" - BUILD_THREAD: "4" - SUREFIRE_FIRST_PART_FORK_COUNT: "1.0C" - SUREFIRE_SECOND_PART_FORK_COUNT: "0.5C" - BRANCH_NAME: "${{ github.base_ref }}" - CHANGE_TARGET: "${{ github.base_ref }}" - SKIP_ERRORPRONE: 'true' - DEBUG: 'true' - TEST_PROFILE: ${{ matrix.test_profile }} - run: | - cd "${{ github.workspace }}" - bash src/dev-support/jenkins_precommit_github_yetus.sh - - - name: Publish Job Summary - if: always() - run: | - cd "${{ github.workspace }}" - python3 src/dev-support/yetus_console_to_md.py yetus-jdk11-hadoop3-unit-check/output >> $GITHUB_STEP_SUMMARY - - - name: Publish Test Results - if: always() - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 - with: - name: yetus-jdk11-hadoop3-unit-check-${{ matrix.name }} - path: ${{ github.workspace }}/yetus-jdk11-hadoop3-unit-check/output - retention-days: 7 diff --git a/.github/workflows/yetus-jdk17-hadoop3-compile-check.yml b/.github/workflows/yetus-jdk17-hadoop3-compile-check.yml deleted file mode 100644 index 17abf8990d9c..000000000000 --- a/.github/workflows/yetus-jdk17-hadoop3-compile-check.yml +++ /dev/null @@ -1,110 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# yamllint disable rule:line-length ---- -name: Yetus JDK17 Hadoop3 Compile Check - -"on": - pull_request: - types: [opened, synchronize, reopened] - -permissions: - contents: read - statuses: write - -jobs: - jdk17-hadoop3-compile-check: - runs-on: ubuntu-latest - timeout-minutes: 60 - - env: - YETUS_VERSION: '0.15.0' - - steps: - - name: Checkout HBase - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - with: - path: src - fetch-depth: 0 - persist-credentials: false - - - name: Set up JDK 17 - uses: actions/setup-java@be666c2fcd27ec809703dec50e508c2fdc7f6654 # v5.2.0 - with: - java-version: '17' - distribution: 'temurin' - - - name: Maven cache - uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 - with: - path: ~/.m2 - key: hbase-m2-${{ hashFiles('**/pom.xml') }} - restore-keys: | - hbase-m2- - - - name: Download Yetus - run: | - mkdir -p yetus - cd yetus - bash "${{ github.workspace }}/src/dev-support/jenkins-scripts/cache-apache-project-artifact.sh" \ - --keys 'https://downloads.apache.org/yetus/KEYS' \ - --verify-tar-gz \ - ./apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz \ - yetus/${{ env.YETUS_VERSION }}/apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz - tar --strip-components=1 -xzf apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz - rm apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz - - - name: Run Yetus JDK17 Hadoop3 Compile Check - env: - ARCHIVE_PATTERN_LIST: "TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump" - DOCKERFILE: "${{ github.workspace }}/src/dev-support/docker/Dockerfile" - GITHUB_PASSWORD: ${{ secrets.GITHUB_TOKEN }} - GITHUB_USER: ${{ github.actor }} - HADOOP_PROFILE: "3.0" - JAVA8_HOME: "/usr/lib/jvm/java-8" - PATCHDIR: "${{ github.workspace }}/yetus-jdk17-hadoop3-compile-check/output" - PLUGINS: "compile,github,htmlout,javac,javadoc,maven,mvninstall,shadedjars" - SET_JAVA_HOME: "/usr/lib/jvm/java-17" - SOURCEDIR: "${{ github.workspace }}/src" - TESTS_FILTER: "javac,javadoc" - YETUSDIR: "${{ github.workspace }}/yetus" - AUTHOR_IGNORE_LIST: "src/main/asciidoc/_chapters/developer.adoc" - BLANKS_EOL_IGNORE_FILE: "dev-support/blanks-eol-ignore.txt" - BLANKS_TABS_IGNORE_FILE: "dev-support/blanks-tabs-ignore.txt" - BUILD_THREAD: "4" - BRANCH_NAME: "${{ github.base_ref }}" - CHANGE_TARGET: "${{ github.base_ref }}" - SKIP_ERRORPRONE: 'true' - DEBUG: 'true' - run: | - cd "${{ github.workspace }}" - bash src/dev-support/jenkins_precommit_github_yetus.sh - - - name: Publish Job Summary - if: always() - run: | - cd "${{ github.workspace }}" - python3 src/dev-support/yetus_console_to_md.py yetus-jdk17-hadoop3-compile-check/output >> $GITHUB_STEP_SUMMARY - - - name: Publish Results - if: always() - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 - with: - name: yetus-jdk17-hadoop3-compile-check-output - path: ${{ github.workspace }}/yetus-jdk17-hadoop3-compile-check/output - retention-days: 7 diff --git a/.github/workflows/yetus-jdk17-hadoop3-unit-check.yml b/.github/workflows/yetus-jdk17-hadoop3-unit-check.yml deleted file mode 100644 index 571b6155dc37..000000000000 --- a/.github/workflows/yetus-jdk17-hadoop3-unit-check.yml +++ /dev/null @@ -1,132 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# yamllint disable rule:line-length ---- -name: Yetus JDK17 Hadoop3 Unit Check - -"on": - pull_request: - types: [opened, synchronize, reopened] - -permissions: - contents: read - statuses: write - -jobs: - jdk17-hadoop3-unit-check: - runs-on: ubuntu-latest - timeout-minutes: 360 - - strategy: - fail-fast: false - matrix: - include: - - name: "small" - test_profile: "runSmallTests" - - name: "medium" - test_profile: "runMediumTests" - # Large tests split alphabetically by class name (after "Test" prefix) - # Wave 1: Test[A-H]*, Wave 2: Test[I-R]*, Wave 3: Test[S-Z]* - - name: "large-wave-1" - test_profile: "runLargeTests-wave1" - - name: "large-wave-2" - test_profile: "runLargeTests-wave2" - - name: "large-wave-3" - test_profile: "runLargeTests-wave3" - - name: ${{ matrix.name }} - - env: - YETUS_VERSION: '0.15.0' - - steps: - - name: Checkout HBase - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - with: - path: src - fetch-depth: 0 - persist-credentials: false - - - name: Set up JDK 17 - uses: actions/setup-java@be666c2fcd27ec809703dec50e508c2fdc7f6654 # v5.2.0 - with: - java-version: '17' - distribution: 'temurin' - - - name: Maven cache - uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 - with: - path: ~/.m2 - key: hbase-m2-${{ hashFiles('**/pom.xml') }} - restore-keys: | - hbase-m2- - - - name: Download Yetus - run: | - mkdir -p yetus - cd yetus - bash "${{ github.workspace }}/src/dev-support/jenkins-scripts/cache-apache-project-artifact.sh" \ - --keys 'https://downloads.apache.org/yetus/KEYS' \ - --verify-tar-gz \ - ./apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz \ - yetus/${{ env.YETUS_VERSION }}/apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz - tar --strip-components=1 -xzf apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz - rm apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz - - - name: Run Yetus JDK17 Hadoop3 Unit Check - env: - ARCHIVE_PATTERN_LIST: "TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump" - DOCKERFILE: "${{ github.workspace }}/src/dev-support/docker/Dockerfile" - GITHUB_PASSWORD: ${{ secrets.GITHUB_TOKEN }} - GITHUB_USER: ${{ github.actor }} - HADOOP_PROFILE: "3.0" - JAVA8_HOME: "/usr/lib/jvm/java-8" - PATCHDIR: "${{ github.workspace }}/yetus-jdk17-hadoop3-unit-check/output" - PLUGINS: "github,htmlout,maven,unit" - SET_JAVA_HOME: "/usr/lib/jvm/java-17" - SOURCEDIR: "${{ github.workspace }}/src" - YETUSDIR: "${{ github.workspace }}/yetus" - AUTHOR_IGNORE_LIST: "src/main/asciidoc/_chapters/developer.adoc" - BLANKS_EOL_IGNORE_FILE: "dev-support/blanks-eol-ignore.txt" - BLANKS_TABS_IGNORE_FILE: "dev-support/blanks-tabs-ignore.txt" - EXCLUDE_TESTS_URL: "https://ci-hbase.apache.org/job/HBase-Find-Flaky-Tests/job/${{ github.base_ref }}/lastSuccessfulBuild/artifact/output/excludes" - BUILD_THREAD: "4" - SUREFIRE_FIRST_PART_FORK_COUNT: "1.0C" - SUREFIRE_SECOND_PART_FORK_COUNT: "0.5C" - BRANCH_NAME: "${{ github.base_ref }}" - CHANGE_TARGET: "${{ github.base_ref }}" - SKIP_ERRORPRONE: 'true' - DEBUG: 'true' - TEST_PROFILE: ${{ matrix.test_profile }} - run: | - cd "${{ github.workspace }}" - bash src/dev-support/jenkins_precommit_github_yetus.sh - - - name: Publish Job Summary - if: always() - run: | - cd "${{ github.workspace }}" - python3 src/dev-support/yetus_console_to_md.py yetus-jdk17-hadoop3-unit-check/output >> $GITHUB_STEP_SUMMARY - - - name: Publish Test Results - if: always() - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 - with: - name: yetus-jdk17-hadoop3-unit-check-${{ matrix.name }} - path: ${{ github.workspace }}/yetus-jdk17-hadoop3-unit-check/output - retention-days: 7 diff --git a/.github/workflows/yetus-jdk8-hadoop2-compile-check.yml b/.github/workflows/yetus-jdk8-hadoop2-compile-check.yml deleted file mode 100644 index 26b27d850356..000000000000 --- a/.github/workflows/yetus-jdk8-hadoop2-compile-check.yml +++ /dev/null @@ -1,110 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# yamllint disable rule:line-length ---- -name: Yetus JDK8 Hadoop2 Compile Check - -"on": - pull_request: - types: [opened, synchronize, reopened] - -permissions: {} - -jobs: - jdk8-hadoop2-compile-check: - runs-on: ubuntu-latest - timeout-minutes: 60 - permissions: - contents: read - statuses: write - - env: - YETUS_VERSION: '0.15.0' - - steps: - - name: Checkout HBase - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - with: - path: src - fetch-depth: 0 - persist-credentials: false - - - name: Set up JDK 8 - uses: actions/setup-java@be666c2fcd27ec809703dec50e508c2fdc7f6654 # v5.2.0 - with: - java-version: '8' - distribution: 'temurin' - - - name: Maven cache - uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 - with: - path: ~/.m2 - key: hbase-m2-${{ hashFiles('**/pom.xml') }} - restore-keys: | - hbase-m2- - - - name: Download Yetus - run: | - mkdir -p yetus - cd yetus - bash "${{ github.workspace }}/src/dev-support/jenkins-scripts/cache-apache-project-artifact.sh" \ - --keys 'https://downloads.apache.org/yetus/KEYS' \ - --verify-tar-gz \ - ./apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz \ - yetus/${{ env.YETUS_VERSION }}/apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz - tar --strip-components=1 -xzf apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz - rm apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz - - - name: Run Yetus JDK8 Hadoop2 Compile Check - env: - ARCHIVE_PATTERN_LIST: "TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump" - DOCKERFILE: "${{ github.workspace }}/src/dev-support/docker/Dockerfile" - GITHUB_PASSWORD: ${{ secrets.GITHUB_TOKEN }} - GITHUB_USER: ${{ github.actor }} - JAVA8_HOME: "/usr/lib/jvm/java-8" - PATCHDIR: "${{ github.workspace }}/yetus-jdk8-hadoop2-compile-check/output" - PLUGINS: "compile,github,htmlout,javac,javadoc,maven,mvninstall,shadedjars" - SET_JAVA_HOME: "/usr/lib/jvm/java-8" - SOURCEDIR: "${{ github.workspace }}/src" - TESTS_FILTER: "javac,javadoc" - YETUSDIR: "${{ github.workspace }}/yetus" - AUTHOR_IGNORE_LIST: "src/main/asciidoc/_chapters/developer.adoc" - BLANKS_EOL_IGNORE_FILE: "dev-support/blanks-eol-ignore.txt" - BLANKS_TABS_IGNORE_FILE: "dev-support/blanks-tabs-ignore.txt" - BUILD_THREAD: "4" - BRANCH_NAME: "${{ github.base_ref }}" - CHANGE_TARGET: "${{ github.base_ref }}" - SKIP_ERRORPRONE: 'true' - DEBUG: 'true' - run: | - cd "${{ github.workspace }}" - bash src/dev-support/jenkins_precommit_github_yetus.sh - - - name: Publish Job Summary - if: always() - run: | - cd "${{ github.workspace }}" - python3 src/dev-support/yetus_console_to_md.py yetus-jdk8-hadoop2-compile-check/output >> $GITHUB_STEP_SUMMARY - - - name: Publish Results - if: always() - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 - with: - name: yetus-jdk8-hadoop2-compile-check-output - path: ${{ github.workspace }}/yetus-jdk8-hadoop2-compile-check/output - retention-days: 7 diff --git a/.github/workflows/yetus-jdk8-hadoop2-unit-check.yml b/.github/workflows/yetus-jdk8-hadoop2-unit-check.yml deleted file mode 100644 index 60f040480db9..000000000000 --- a/.github/workflows/yetus-jdk8-hadoop2-unit-check.yml +++ /dev/null @@ -1,132 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# yamllint disable rule:line-length ---- -name: Yetus JDK8 Hadoop2 Unit Check - -"on": - pull_request: - types: [opened, synchronize, reopened] - -permissions: {} - -jobs: - jdk8-hadoop2-unit-check: - runs-on: ubuntu-latest - timeout-minutes: 360 - permissions: - contents: read - statuses: write - - strategy: - fail-fast: false - matrix: - include: - - name: "small" - test_profile: "runSmallTests" - - name: "medium" - test_profile: "runMediumTests" - # Large tests split alphabetically by class name (after "Test" prefix) - # Wave 1: Test[A-H]*, Wave 2: Test[I-R]*, Wave 3: Test[S-Z]* - - name: "large-wave-1" - test_profile: "runLargeTests-wave1" - - name: "large-wave-2" - test_profile: "runLargeTests-wave2" - - name: "large-wave-3" - test_profile: "runLargeTests-wave3" - - name: ${{ matrix.name }} - - env: - YETUS_VERSION: '0.15.0' - - steps: - - name: Checkout HBase - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - with: - path: src - fetch-depth: 0 - persist-credentials: false - - - name: Set up JDK 8 - uses: actions/setup-java@be666c2fcd27ec809703dec50e508c2fdc7f6654 # v5.2.0 - with: - java-version: '8' - distribution: 'temurin' - - - name: Maven cache - uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 - with: - path: ~/.m2 - key: hbase-m2-${{ hashFiles('**/pom.xml') }} - restore-keys: | - hbase-m2- - - - name: Download Yetus - run: | - mkdir -p yetus - cd yetus - bash "${{ github.workspace }}/src/dev-support/jenkins-scripts/cache-apache-project-artifact.sh" \ - --keys 'https://downloads.apache.org/yetus/KEYS' \ - --verify-tar-gz \ - ./apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz \ - yetus/${{ env.YETUS_VERSION }}/apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz - tar --strip-components=1 -xzf apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz - rm apache-yetus-${{ env.YETUS_VERSION }}-bin.tar.gz - - - name: Run Yetus JDK8 Hadoop2 Unit Check - env: - ARCHIVE_PATTERN_LIST: "TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump" - DOCKERFILE: "${{ github.workspace }}/src/dev-support/docker/Dockerfile" - GITHUB_PASSWORD: ${{ secrets.GITHUB_TOKEN }} - GITHUB_USER: ${{ github.actor }} - JAVA8_HOME: "/usr/lib/jvm/java-8" - PATCHDIR: "${{ github.workspace }}/yetus-jdk8-hadoop2-unit-check/output" - PLUGINS: "github,htmlout,maven,unit" - SET_JAVA_HOME: "/usr/lib/jvm/java-8" - SOURCEDIR: "${{ github.workspace }}/src" - YETUSDIR: "${{ github.workspace }}/yetus" - AUTHOR_IGNORE_LIST: "src/main/asciidoc/_chapters/developer.adoc" - BLANKS_EOL_IGNORE_FILE: "dev-support/blanks-eol-ignore.txt" - BLANKS_TABS_IGNORE_FILE: "dev-support/blanks-tabs-ignore.txt" - EXCLUDE_TESTS_URL: "https://ci-hbase.apache.org/job/HBase-Find-Flaky-Tests/job/${{ github.base_ref }}/lastSuccessfulBuild/artifact/output/excludes" - BUILD_THREAD: "4" - SUREFIRE_FIRST_PART_FORK_COUNT: "1.0C" - SUREFIRE_SECOND_PART_FORK_COUNT: "0.5C" - BRANCH_NAME: "${{ github.base_ref }}" - CHANGE_TARGET: "${{ github.base_ref }}" - SKIP_ERRORPRONE: 'true' - DEBUG: 'true' - TEST_PROFILE: ${{ matrix.test_profile }} - run: | - cd "${{ github.workspace }}" - bash src/dev-support/jenkins_precommit_github_yetus.sh - - - name: Publish Job Summary - if: always() - run: | - cd "${{ github.workspace }}" - python3 src/dev-support/yetus_console_to_md.py yetus-jdk8-hadoop2-unit-check/output >> $GITHUB_STEP_SUMMARY - - - name: Publish Test Results - if: always() - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 - with: - name: yetus-jdk8-hadoop2-unit-check-${{ matrix.name }} - path: ${{ github.workspace }}/yetus-jdk8-hadoop2-unit-check/output - retention-days: 7 diff --git a/hbase-annotations/pom.xml b/hbase-annotations/pom.xml index d977f3c78c6f..0e758eb72411 100644 --- a/hbase-annotations/pom.xml +++ b/hbase-annotations/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase - ${revision} + 2.6.5.1-4.3.0-0 ../pom.xml diff --git a/hbase-archetypes/hbase-archetype-builder/pom.xml b/hbase-archetypes/hbase-archetype-builder/pom.xml index 017e03227fa8..0fbdac0f6fe0 100644 --- a/hbase-archetypes/hbase-archetype-builder/pom.xml +++ b/hbase-archetypes/hbase-archetype-builder/pom.xml @@ -24,7 +24,7 @@ org.apache.hbase hbase-archetypes - ${revision} + 2.6.5.1-4.3.0-0 ../pom.xml diff --git a/hbase-archetypes/hbase-client-project/pom.xml b/hbase-archetypes/hbase-client-project/pom.xml index 67ca8d55ee7c..7df87e5914da 100644 --- a/hbase-archetypes/hbase-client-project/pom.xml +++ b/hbase-archetypes/hbase-client-project/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-archetypes - ${revision} + 2.6.5.1-4.3.0-0 ../pom.xml hbase-client-project diff --git a/hbase-archetypes/hbase-shaded-client-project/pom.xml b/hbase-archetypes/hbase-shaded-client-project/pom.xml index caec8fe032a7..5b5e16b6d80c 100644 --- a/hbase-archetypes/hbase-shaded-client-project/pom.xml +++ b/hbase-archetypes/hbase-shaded-client-project/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-archetypes - ${revision} + 2.6.5.1-4.3.0-0 ../pom.xml hbase-shaded-client-project diff --git a/hbase-archetypes/pom.xml b/hbase-archetypes/pom.xml index 98371265ebd4..d8098c09f7e4 100644 --- a/hbase-archetypes/pom.xml +++ b/hbase-archetypes/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml index 8ff8fcdf2a71..9cd8823454ae 100644 --- a/hbase-assembly/pom.xml +++ b/hbase-assembly/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration hbase-assembly diff --git a/hbase-asyncfs/pom.xml b/hbase-asyncfs/pom.xml index ad2e7d805df5..f8643fb745a7 100644 --- a/hbase-asyncfs/pom.xml +++ b/hbase-asyncfs/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration diff --git a/hbase-backup/pom.xml b/hbase-backup/pom.xml index 769e3c669c3f..a667da848483 100644 --- a/hbase-backup/pom.xml +++ b/hbase-backup/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration hbase-backup diff --git a/hbase-build-configuration/pom.xml b/hbase-build-configuration/pom.xml index 763275722c94..c76f12e09055 100644 --- a/hbase-build-configuration/pom.xml +++ b/hbase-build-configuration/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase - ${revision} + 2.6.5.1-4.3.0-0 ../pom.xml diff --git a/hbase-checkstyle/pom.xml b/hbase-checkstyle/pom.xml index 67fc77a4dc5f..1c6c9fdf04c3 100644 --- a/hbase-checkstyle/pom.xml +++ b/hbase-checkstyle/pom.xml @@ -25,12 +25,12 @@ org.apache.hbase hbase - ${revision} + 2.6.5.1-4.3.0-0 ../pom.xml org.apache.hbase hbase-checkstyle - ${revision} + 2.6.5.1-4.3.0-0 Apache HBase - Checkstyle Module to hold Checkstyle properties for HBase. diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml index e54ca13d63cd..cbe4931a6ab9 100644 --- a/hbase-client/pom.xml +++ b/hbase-client/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml index 7a6174553df2..8e7ffb20f3a0 100644 --- a/hbase-common/pom.xml +++ b/hbase-common/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration diff --git a/hbase-compression/hbase-compression-aircompressor/pom.xml b/hbase-compression/hbase-compression-aircompressor/pom.xml index b1e90583d533..8e8dd3505e32 100644 --- a/hbase-compression/hbase-compression-aircompressor/pom.xml +++ b/hbase-compression/hbase-compression-aircompressor/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-compression - ${revision} + 2.6.5.1-4.3.0-0 ../pom.xml hbase-compression-aircompressor diff --git a/hbase-compression/hbase-compression-brotli/pom.xml b/hbase-compression/hbase-compression-brotli/pom.xml index f8d3be671122..cbf4613f180f 100644 --- a/hbase-compression/hbase-compression-brotli/pom.xml +++ b/hbase-compression/hbase-compression-brotli/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-compression - ${revision} + 2.6.5.1-4.3.0-0 ../pom.xml hbase-compression-brotli diff --git a/hbase-compression/hbase-compression-lz4/pom.xml b/hbase-compression/hbase-compression-lz4/pom.xml index 5a1b9b4aff57..dc0053012a5f 100644 --- a/hbase-compression/hbase-compression-lz4/pom.xml +++ b/hbase-compression/hbase-compression-lz4/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-compression - ${revision} + 2.6.5.1-4.3.0-0 ../pom.xml hbase-compression-lz4 diff --git a/hbase-compression/hbase-compression-snappy/pom.xml b/hbase-compression/hbase-compression-snappy/pom.xml index 48c03fd58960..87f2b2ad304d 100644 --- a/hbase-compression/hbase-compression-snappy/pom.xml +++ b/hbase-compression/hbase-compression-snappy/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-compression - ${revision} + 2.6.5.1-4.3.0-0 ../pom.xml hbase-compression-snappy diff --git a/hbase-compression/hbase-compression-zstd/pom.xml b/hbase-compression/hbase-compression-zstd/pom.xml index b3ae7bd88122..f5d6339125da 100644 --- a/hbase-compression/hbase-compression-zstd/pom.xml +++ b/hbase-compression/hbase-compression-zstd/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-compression - ${revision} + 2.6.5.1-4.3.0-0 ../pom.xml hbase-compression-zstd diff --git a/hbase-compression/pom.xml b/hbase-compression/pom.xml index c2e4633b3987..f036f7d1b64d 100644 --- a/hbase-compression/pom.xml +++ b/hbase-compression/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration hbase-compression diff --git a/hbase-endpoint/pom.xml b/hbase-endpoint/pom.xml index 60d2a421b540..e8bb9c6e7cc2 100644 --- a/hbase-endpoint/pom.xml +++ b/hbase-endpoint/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration hbase-endpoint diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml index 24207cb24b23..53ba9fb6b748 100644 --- a/hbase-examples/pom.xml +++ b/hbase-examples/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration hbase-examples diff --git a/hbase-extensions/hbase-openssl/pom.xml b/hbase-extensions/hbase-openssl/pom.xml index b1183a1bec76..ac007a6ed07a 100644 --- a/hbase-extensions/hbase-openssl/pom.xml +++ b/hbase-extensions/hbase-openssl/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-extensions - ${revision} + 2.6.5.1-4.3.0-0 ../pom.xml diff --git a/hbase-extensions/pom.xml b/hbase-extensions/pom.xml index 8a11e7754ea2..dea7ee428a61 100644 --- a/hbase-extensions/pom.xml +++ b/hbase-extensions/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration diff --git a/hbase-external-blockcache/pom.xml b/hbase-external-blockcache/pom.xml index c764514801bb..b5fa25823dc2 100644 --- a/hbase-external-blockcache/pom.xml +++ b/hbase-external-blockcache/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration hbase-external-blockcache diff --git a/hbase-hadoop-compat/pom.xml b/hbase-hadoop-compat/pom.xml index 413107c77491..e5e88143bbd0 100644 --- a/hbase-hadoop-compat/pom.xml +++ b/hbase-hadoop-compat/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration diff --git a/hbase-hadoop2-compat/pom.xml b/hbase-hadoop2-compat/pom.xml index 2bab73cc723d..d95974108b5a 100644 --- a/hbase-hadoop2-compat/pom.xml +++ b/hbase-hadoop2-compat/pom.xml @@ -21,7 +21,7 @@ limitations under the License. org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration diff --git a/hbase-hbtop/pom.xml b/hbase-hbtop/pom.xml index 53d73a1b8aa9..6be4faade17d 100644 --- a/hbase-hbtop/pom.xml +++ b/hbase-hbtop/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration hbase-hbtop diff --git a/hbase-http/pom.xml b/hbase-http/pom.xml index 1774f29b9c5f..827d9f7f58e8 100644 --- a/hbase-http/pom.xml +++ b/hbase-http/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration hbase-http diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml index 61151ea68a68..e1489a809a11 100644 --- a/hbase-it/pom.xml +++ b/hbase-it/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration diff --git a/hbase-logging/pom.xml b/hbase-logging/pom.xml index 46ed6801c36b..9be6d6e74318 100644 --- a/hbase-logging/pom.xml +++ b/hbase-logging/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration diff --git a/hbase-mapreduce/pom.xml b/hbase-mapreduce/pom.xml index b458f7fcedaa..03e4ca70d90b 100644 --- a/hbase-mapreduce/pom.xml +++ b/hbase-mapreduce/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration hbase-mapreduce diff --git a/hbase-metrics-api/pom.xml b/hbase-metrics-api/pom.xml index c1a5af10d414..fdf10664095e 100644 --- a/hbase-metrics-api/pom.xml +++ b/hbase-metrics-api/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration diff --git a/hbase-metrics/pom.xml b/hbase-metrics/pom.xml index bfb422ae44af..955a7894973a 100644 --- a/hbase-metrics/pom.xml +++ b/hbase-metrics/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration diff --git a/hbase-procedure/pom.xml b/hbase-procedure/pom.xml index 046440a85bd0..f89772d999c6 100644 --- a/hbase-procedure/pom.xml +++ b/hbase-procedure/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration diff --git a/hbase-protocol-shaded/pom.xml b/hbase-protocol-shaded/pom.xml index ab2500dfdb6c..47d5eaa10f03 100644 --- a/hbase-protocol-shaded/pom.xml +++ b/hbase-protocol-shaded/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration hbase-protocol-shaded diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml index ed7b9e7332cd..139cff8a8f2b 100644 --- a/hbase-protocol/pom.xml +++ b/hbase-protocol/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration hbase-protocol diff --git a/hbase-replication/pom.xml b/hbase-replication/pom.xml index 21c14270c3e9..7156efa4c377 100644 --- a/hbase-replication/pom.xml +++ b/hbase-replication/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration hbase-replication diff --git a/hbase-resource-bundle/pom.xml b/hbase-resource-bundle/pom.xml index 41d11e2e4699..c1cab02a83c9 100644 --- a/hbase-resource-bundle/pom.xml +++ b/hbase-resource-bundle/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration diff --git a/hbase-rest/pom.xml b/hbase-rest/pom.xml index 8fdf48403ba3..5dcd3e63b641 100644 --- a/hbase-rest/pom.xml +++ b/hbase-rest/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration hbase-rest diff --git a/hbase-rsgroup/pom.xml b/hbase-rsgroup/pom.xml index 3fef72cb6fbb..79fb5101d710 100644 --- a/hbase-rsgroup/pom.xml +++ b/hbase-rsgroup/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration hbase-rsgroup diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index 70f4afcecde8..f544a3ef6b3a 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration hbase-server diff --git a/hbase-shaded/hbase-shaded-check-invariants/pom.xml b/hbase-shaded/hbase-shaded-check-invariants/pom.xml index a203e5b8dd64..012af304c1b8 100644 --- a/hbase-shaded/hbase-shaded-check-invariants/pom.xml +++ b/hbase-shaded/hbase-shaded-check-invariants/pom.xml @@ -15,7 +15,7 @@ org.apache.hbase hbase - ${revision} + 2.6.5.1-4.3.0-0 ../../pom.xml hbase-shaded-check-invariants diff --git a/hbase-shaded/hbase-shaded-client-byo-hadoop/pom.xml b/hbase-shaded/hbase-shaded-client-byo-hadoop/pom.xml index 1e6a3eb4e649..58b41499c339 100644 --- a/hbase-shaded/hbase-shaded-client-byo-hadoop/pom.xml +++ b/hbase-shaded/hbase-shaded-client-byo-hadoop/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-shaded - ${revision} + 2.6.5.1-4.3.0-0 ../pom.xml hbase-shaded-client-byo-hadoop diff --git a/hbase-shaded/hbase-shaded-client/pom.xml b/hbase-shaded/hbase-shaded-client/pom.xml index 9173e177dfd8..f959152ccfd0 100644 --- a/hbase-shaded/hbase-shaded-client/pom.xml +++ b/hbase-shaded/hbase-shaded-client/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-shaded - ${revision} + 2.6.5.1-4.3.0-0 ../pom.xml hbase-shaded-client diff --git a/hbase-shaded/hbase-shaded-mapreduce/pom.xml b/hbase-shaded/hbase-shaded-mapreduce/pom.xml index b81d31f171ac..acb4f83c5f12 100644 --- a/hbase-shaded/hbase-shaded-mapreduce/pom.xml +++ b/hbase-shaded/hbase-shaded-mapreduce/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-shaded - ${revision} + 2.6.5.1-4.3.0-0 ../pom.xml hbase-shaded-mapreduce diff --git a/hbase-shaded/hbase-shaded-testing-util-tester/pom.xml b/hbase-shaded/hbase-shaded-testing-util-tester/pom.xml index d387ed151de6..fda0825be17f 100644 --- a/hbase-shaded/hbase-shaded-testing-util-tester/pom.xml +++ b/hbase-shaded/hbase-shaded-testing-util-tester/pom.xml @@ -24,7 +24,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../../hbase-build-configuration diff --git a/hbase-shaded/hbase-shaded-testing-util/pom.xml b/hbase-shaded/hbase-shaded-testing-util/pom.xml index bb7c919dabe6..3bf23be06355 100644 --- a/hbase-shaded/hbase-shaded-testing-util/pom.xml +++ b/hbase-shaded/hbase-shaded-testing-util/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-shaded - ${revision} + 2.6.5.1-4.3.0-0 ../pom.xml diff --git a/hbase-shaded/hbase-shaded-with-hadoop-check-invariants/pom.xml b/hbase-shaded/hbase-shaded-with-hadoop-check-invariants/pom.xml index 8f6ceb1a0bb3..2aa0a085b336 100644 --- a/hbase-shaded/hbase-shaded-with-hadoop-check-invariants/pom.xml +++ b/hbase-shaded/hbase-shaded-with-hadoop-check-invariants/pom.xml @@ -15,7 +15,7 @@ org.apache.hbase hbase - ${revision} + 2.6.5.1-4.3.0-0 ../../pom.xml hbase-shaded-with-hadoop-check-invariants diff --git a/hbase-shaded/pom.xml b/hbase-shaded/pom.xml index 7654a1886108..f0d5311790e7 100644 --- a/hbase-shaded/pom.xml +++ b/hbase-shaded/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration hbase-shaded diff --git a/hbase-shell/pom.xml b/hbase-shell/pom.xml index 24be8d71128e..09f9e0775391 100644 --- a/hbase-shell/pom.xml +++ b/hbase-shell/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration hbase-shell diff --git a/hbase-testing-util/pom.xml b/hbase-testing-util/pom.xml index 443c4817557e..65e899509509 100644 --- a/hbase-testing-util/pom.xml +++ b/hbase-testing-util/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration hbase-testing-util diff --git a/hbase-thrift/pom.xml b/hbase-thrift/pom.xml index c8ea2d4dc751..04c1b5f68b1b 100644 --- a/hbase-thrift/pom.xml +++ b/hbase-thrift/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration hbase-thrift diff --git a/hbase-zookeeper/pom.xml b/hbase-zookeeper/pom.xml index 1bac78e22025..6eff404f7331 100644 --- a/hbase-zookeeper/pom.xml +++ b/hbase-zookeeper/pom.xml @@ -23,7 +23,7 @@ org.apache.hbase hbase-build-configuration - ${revision} + 2.6.5.1-4.3.0-0 ../hbase-build-configuration hbase-zookeeper diff --git a/pom.xml b/pom.xml index 4c534dbd212d..0626934465ed 100644 --- a/pom.xml +++ b/pom.xml @@ -38,7 +38,7 @@ org.apache.hbase hbase - ${revision} + 2.6.5.1-4.3.0-0 pom Apache HBase Apache HBase™ is the Hadoop database. Use it when you need @@ -513,6 +513,11 @@ https://issues.apache.org/jira/browse/HBASE + + github + GitHub arenadata Apache Maven Packages + https://maven.pkg.github.com/arenadata/hbase + hbase.apache.org HBase Website at hbase.apache.org @@ -522,8 +527,14 @@ file:///tmp + + + arenadata + https://maven.pkg.github.com/arenadata/* + + - 2.6.5 + 2.6.5.1-4.3.0-0 false From 14b64f6119d9db95c7c78deb99e29969eb3e495f Mon Sep 17 00:00:00 2001 From: Andrei Shitov Date: Thu, 4 Sep 2025 09:52:46 +0300 Subject: [PATCH 02/14] HBASE-29268 --- .../mapreduce/InputFileListingGenerator.java | 130 ++++++++++++++++++ .../mapreduce/MapReduceBackupCopyJob.java | 95 +------------ .../MapReduceRestoreToOriginalSplitsJob.java | 81 +++++------ 3 files changed, 169 insertions(+), 137 deletions(-) create mode 100644 hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/InputFileListingGenerator.java diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/InputFileListingGenerator.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/InputFileListingGenerator.java new file mode 100644 index 000000000000..8cd18c31c808 --- /dev/null +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/InputFileListingGenerator.java @@ -0,0 +1,130 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.mapreduce; + +import java.io.IOException; +import java.lang.reflect.Field; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.List; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.tools.CopyListingFileStatus; +import org.apache.hadoop.tools.DistCp; +import org.apache.hadoop.tools.DistCpConstants; +import org.apache.hadoop.tools.DistCpOptions; +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Private +public class InputFileListingGenerator { + + public static Path createInputFileListing(DistCp distCp, Job job, Configuration conf, + Path fileListingPath, int levelsToPreserve) throws IOException { + long totalBytesExpected = 0; + int totalRecords = 0; + try (SequenceFile.Writer writer = getWriter(fileListingPath, conf)) { + List srcFiles = getSourceFiles(distCp); + if (srcFiles.size() == 0) { + return fileListingPath; + } + totalRecords = srcFiles.size(); + FileSystem fs = srcFiles.get(0).getFileSystem(conf); + for (Path path : srcFiles) { + FileStatus fst = fs.getFileStatus(path); + totalBytesExpected += fst.getLen(); + Text key = getKey(path, levelsToPreserve); + writer.append(key, new CopyListingFileStatus(fst)); + } + writer.close(); + + // update jobs configuration + + Configuration cfg = job.getConfiguration(); + cfg.setLong(DistCpConstants.CONF_LABEL_TOTAL_BYTES_TO_BE_COPIED, totalBytesExpected); + cfg.set(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, fileListingPath.toString()); + cfg.setLong(DistCpConstants.CONF_LABEL_TOTAL_NUMBER_OF_RECORDS, totalRecords); + } catch (NoSuchFieldException | SecurityException | IllegalArgumentException + | IllegalAccessException | NoSuchMethodException | ClassNotFoundException + | InvocationTargetException e) { + throw new IOException(e); + } + return fileListingPath; + } + + @SuppressWarnings("unchecked") + public static List getSourcePaths(DistCp distCp, Field fieldInputOptions) + throws IOException { + Object options; + try { + options = fieldInputOptions.get(distCp); + if (options instanceof DistCpOptions) { + return ((DistCpOptions) options).getSourcePaths(); + } else { + // Hadoop 3 + Class classContext = Class.forName("org.apache.hadoop.tools.DistCpContext"); + Method methodGetSourcePaths = classContext.getDeclaredMethod("getSourcePaths"); + methodGetSourcePaths.setAccessible(true); + + return (List) methodGetSourcePaths.invoke(options); + } + } catch (IllegalArgumentException | IllegalAccessException | ClassNotFoundException + | NoSuchMethodException | SecurityException | InvocationTargetException e) { + throw new IOException(e); + } + } + + @SuppressWarnings("unchecked") + private static List getSourceFiles(DistCp distCp) throws NoSuchFieldException, + SecurityException, IllegalArgumentException, IllegalAccessException, NoSuchMethodException, + ClassNotFoundException, InvocationTargetException, IOException { + Field options = null; + try { + options = DistCp.class.getDeclaredField("inputOptions"); + } catch (NoSuchFieldException | SecurityException e) { + options = DistCp.class.getDeclaredField("context"); + } + options.setAccessible(true); + return getSourcePaths(distCp, options); + } + + private static SequenceFile.Writer getWriter(Path pathToListFile, Configuration conf) + throws IOException { + FileSystem fs = pathToListFile.getFileSystem(conf); + fs.delete(pathToListFile, false); + return SequenceFile.createWriter(conf, SequenceFile.Writer.file(pathToListFile), + SequenceFile.Writer.keyClass(Text.class), + SequenceFile.Writer.valueClass(CopyListingFileStatus.class), + SequenceFile.Writer.compression(SequenceFile.CompressionType.NONE)); + } + + private static Text getKey(Path path, int level) { + int count = 0; + String relPath = ""; + while (count++ < level) { + relPath = Path.SEPARATOR + path.getName() + relPath; + path = path.getParent(); + } + return new Text(relPath); + } + +} diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.java index 747bd3e217d9..0d34a854fbcc 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.lang.reflect.Field; -import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.math.BigDecimal; import java.util.ArrayList; @@ -27,7 +26,6 @@ import java.util.List; import java.util.Objects; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; @@ -37,13 +35,10 @@ import org.apache.hadoop.hbase.backup.impl.BackupManager; import org.apache.hadoop.hbase.backup.util.BackupUtils; import org.apache.hadoop.hbase.snapshot.ExportSnapshot; -import org.apache.hadoop.io.SequenceFile; -import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Cluster; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.JobID; -import org.apache.hadoop.tools.CopyListingFileStatus; import org.apache.hadoop.tools.DistCp; import org.apache.hadoop.tools.DistCpConstants; import org.apache.hadoop.tools.DistCpOptionSwitch; @@ -184,7 +179,7 @@ public Job execute() throws Exception { Job job = null; try { - List srcs = getSourcePaths(fieldInputOptions); + List srcs = InputFileListingGenerator.getSourcePaths(this, fieldInputOptions); long totalSrcLgth = 0; for (Path aSrc : srcs) { @@ -265,99 +260,17 @@ private Field getInputOptionsField(Class classDistCp) throws IOException { return f; } - @SuppressWarnings("unchecked") - private List getSourcePaths(Field fieldInputOptions) throws IOException { - Object options; - try { - options = fieldInputOptions.get(this); - if (options instanceof DistCpOptions) { - return ((DistCpOptions) options).getSourcePaths(); - } else { - // Hadoop 3 - Class classContext = Class.forName("org.apache.hadoop.tools.DistCpContext"); - Method methodGetSourcePaths = classContext.getDeclaredMethod("getSourcePaths"); - methodGetSourcePaths.setAccessible(true); - - return (List) methodGetSourcePaths.invoke(options); - } - } catch (IllegalArgumentException | IllegalAccessException | ClassNotFoundException - | NoSuchMethodException | SecurityException | InvocationTargetException e) { - throw new IOException(e); - } - - } - @Override protected Path createInputFileListing(Job job) throws IOException { if (conf.get(NUMBER_OF_LEVELS_TO_PRESERVE_KEY) == null) { return super.createInputFileListing(job); } - long totalBytesExpected = 0; - int totalRecords = 0; - Path fileListingPath = getFileListingPath(); - try (SequenceFile.Writer writer = getWriter(fileListingPath)) { - List srcFiles = getSourceFiles(); - if (srcFiles.size() == 0) { - return fileListingPath; - } - totalRecords = srcFiles.size(); - FileSystem fs = srcFiles.get(0).getFileSystem(conf); - for (Path path : srcFiles) { - FileStatus fst = fs.getFileStatus(path); - totalBytesExpected += fst.getLen(); - Text key = getKey(path); - writer.append(key, new CopyListingFileStatus(fst)); - } - writer.close(); - - // update jobs configuration - - Configuration cfg = job.getConfiguration(); - cfg.setLong(DistCpConstants.CONF_LABEL_TOTAL_BYTES_TO_BE_COPIED, totalBytesExpected); - cfg.set(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, fileListingPath.toString()); - cfg.setLong(DistCpConstants.CONF_LABEL_TOTAL_NUMBER_OF_RECORDS, totalRecords); - } catch (NoSuchFieldException | SecurityException | IllegalArgumentException - | IllegalAccessException | NoSuchMethodException | ClassNotFoundException - | InvocationTargetException e) { - throw new IOException(e); - } - return fileListingPath; - } - - private Text getKey(Path path) { - int level = conf.getInt(NUMBER_OF_LEVELS_TO_PRESERVE_KEY, 1); - int count = 0; - String relPath = ""; - while (count++ < level) { - relPath = Path.SEPARATOR + path.getName() + relPath; - path = path.getParent(); - } - return new Text(relPath); - } - - private List getSourceFiles() throws NoSuchFieldException, SecurityException, - IllegalArgumentException, IllegalAccessException, NoSuchMethodException, - ClassNotFoundException, InvocationTargetException, IOException { - Field options = null; - try { - options = DistCp.class.getDeclaredField("inputOptions"); - } catch (NoSuchFieldException | SecurityException e) { - options = DistCp.class.getDeclaredField("context"); - } - options.setAccessible(true); - return getSourcePaths(options); - } - private SequenceFile.Writer getWriter(Path pathToListFile) throws IOException { - FileSystem fs = pathToListFile.getFileSystem(conf); - fs.delete(pathToListFile, false); - return SequenceFile.createWriter(conf, SequenceFile.Writer.file(pathToListFile), - SequenceFile.Writer.keyClass(Text.class), - SequenceFile.Writer.valueClass(CopyListingFileStatus.class), - SequenceFile.Writer.compression(SequenceFile.CompressionType.NONE)); + int levels = conf.getInt(NUMBER_OF_LEVELS_TO_PRESERVE_KEY, 1); + return InputFileListingGenerator.createInputFileListing(this, job, conf, getFileListingPath(), + levels); } - } /** diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreToOriginalSplitsJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreToOriginalSplitsJob.java index 54859d427a8a..1dad51ec6785 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreToOriginalSplitsJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreToOriginalSplitsJob.java @@ -18,21 +18,17 @@ package org.apache.hadoop.hbase.backup.mapreduce; import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.RestoreJob; +import org.apache.hadoop.hbase.backup.util.BackupUtils; import org.apache.hadoop.hbase.tool.BulkLoadHFiles; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSVisitor; +import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.tools.DistCp; +import org.apache.hadoop.util.Tool; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; - @InterfaceAudience.Private public class MapReduceRestoreToOriginalSplitsJob implements RestoreJob { private Configuration conf; @@ -41,18 +37,29 @@ public class MapReduceRestoreToOriginalSplitsJob implements RestoreJob { public void run(Path[] dirPaths, TableName[] fromTables, Path restoreRootDir, TableName[] toTables, boolean fullBackupRestore) throws IOException { Configuration conf = getConf(); - - // We are using the files from the snapshot. We should copy them rather than move them over conf.setBoolean(BulkLoadHFiles.ALWAYS_COPY_FILES, true); - Path backupRootDir = new Path(conf.get(RestoreJob.BACKUP_ROOT_PATH_KEY)); + for (int i = 0; i < fromTables.length; ++i) { + Path bulkOutputPath = BackupUtils.getBulkOutputDir(restoreRootDir, + BackupUtils.getFileNameCompatibleString(toTables[i]), getConf()); + + for (Path dirPath : dirPaths) { + String[] args = new String[] { dirPath.toString(), bulkOutputPath.toString() }; - FileSystem fs = backupRootDir.getFileSystem(conf); - Map> family2Files = buildFamily2Files(fs, dirPaths, fullBackupRestore); + try { + Tool player = getDistCp(conf, fullBackupRestore); + int result = player.run(args); - BulkLoadHFiles bulkLoad = BulkLoadHFiles.create(conf); - for (int i = 0; i < fromTables.length; i++) { - bulkLoad.bulkLoad(toTables[i], family2Files); + if (!BackupUtils.succeeded(result)) { + throw new IOException("DistCp failed with exit code " + result); + } + } catch (Exception e) { + throw new IOException(e); + } + + BulkLoadHFiles loader = BackupUtils.createLoader(conf); + loader.bulkLoad(toTables[i], bulkOutputPath); + } } } @@ -66,41 +73,23 @@ public Configuration getConf() { return conf; } - private static Map> buildFamily2Files(FileSystem fs, Path[] dirs, - boolean isFullBackup) throws IOException { - if (isFullBackup) { - return buildFullBackupFamily2Files(fs, dirs); + private static DistCp getDistCp(Configuration conf, boolean fullBackupRestore) throws Exception { + if (fullBackupRestore) { + return new DistCp(conf, null); } - Map> family2Files = new HashMap<>(); + return new IncrementalBackupDistCp(conf); + } - for (Path dir : dirs) { - byte[] familyName = Bytes.toBytes(dir.getParent().getName()); - if (family2Files.containsKey(familyName)) { - family2Files.get(familyName).add(dir); - } else { - family2Files.put(familyName, Lists.newArrayList(dir)); - } + private static class IncrementalBackupDistCp extends DistCp { + public IncrementalBackupDistCp(Configuration conf) throws Exception { + super(conf, null); } - return family2Files; - } - - private static Map> buildFullBackupFamily2Files(FileSystem fs, Path[] dirs) - throws IOException { - Map> family2Files = new HashMap<>(); - for (Path regionPath : dirs) { - FSVisitor.visitRegionStoreFiles(fs, regionPath, (region, family, name) -> { - Path path = new Path(regionPath, new Path(family, name)); - byte[] familyName = Bytes.toBytes(family); - if (family2Files.containsKey(familyName)) { - family2Files.get(familyName).add(path); - } else { - family2Files.put(familyName, Lists.newArrayList(path)); - } - }); + @Override + protected Path createInputFileListing(Job job) throws IOException { + return InputFileListingGenerator.createInputFileListing(this, job, getConf(), + getFileListingPath(), 2); } - return family2Files; } - } From 5f1a2d7e5297dec057856a2bf0a11a3f36422a18 Mon Sep 17 00:00:00 2001 From: Andrei Shitov Date: Thu, 4 Sep 2025 09:53:46 +0300 Subject: [PATCH 03/14] HBASE-29240 --- .../hbase/backup/BackupClientFactory.java | 50 ++++++++++- .../hadoop/hbase/backup/BackupInfo.java | 12 +++ .../hbase/backup/BackupMasterObserver.java | 28 ++++++ .../hadoop/hbase/backup/BackupRequest.java | 10 +++ .../hbase/backup/impl/BackupManager.java | 4 + .../hbase/backup/impl/BackupSystemTable.java | 54 +++++++++++ ...IncrementalBackupsDisallowedException.java | 34 +++++++ .../hbase/backup/TestBackupSystemTable.java | 46 ++++++++++ .../TestDisallowIncrementalBackups.java | 89 +++++++++++++++++++ .../src/main/protobuf/Backup.proto | 1 + 10 files changed, 326 insertions(+), 2 deletions(-) create mode 100644 hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupsDisallowedException.java create mode 100644 hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestDisallowIncrementalBackups.java diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java index d710e82c4fd3..997f1492a800 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java @@ -18,15 +18,24 @@ package org.apache.hadoop.hbase.backup; import java.io.IOException; +import java.util.List; +import java.util.Objects; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; +import org.apache.hadoop.hbase.backup.impl.BackupManager; import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient; +import org.apache.hadoop.hbase.backup.impl.IncrementalBackupsDisallowedException; import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupClient; import org.apache.hadoop.hbase.backup.impl.TableBackupClient; import org.apache.hadoop.hbase.client.Connection; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @InterfaceAudience.Private public final class BackupClientFactory { + private static final Logger LOG = LoggerFactory.getLogger(BackupClientFactory.class); + private BackupClientFactory() { } @@ -49,8 +58,45 @@ public static TableBackupClient create(Connection conn, String backupId, BackupR BackupType type = request.getBackupType(); if (type == BackupType.FULL) { return new FullTableBackupClient(conn, backupId, request); - } else { - return new IncrementalTableBackupClient(conn, backupId, request); } + + String latestFullBackup = getLatestFullBackupId(conn, request); + + try (BackupAdmin admin = new BackupAdminImpl(conn)) { + boolean disallowFurtherIncrementals = + admin.getBackupInfo(latestFullBackup).isDisallowFurtherIncrementals(); + + if (!disallowFurtherIncrementals) { + return new IncrementalTableBackupClient(conn, backupId, request); + } + + if (request.getFailOnDisallowedIncrementals()) { + throw new IncrementalBackupsDisallowedException(request); + } + + LOG.info("Incremental backups disallowed for backupId {}, creating a full backup", + latestFullBackup); + return new FullTableBackupClient(conn, backupId, request); + } + } + + private static String getLatestFullBackupId(Connection conn, BackupRequest request) + throws IOException { + try (BackupManager backupManager = new BackupManager(conn, conn.getConfiguration())) { + // Sorted in desc order by time + List backups = backupManager.getBackupHistory(); + + for (BackupInfo info : backups) { + if ( + info.getType() == BackupType.FULL + && Objects.equals(info.getBackupRootDir(), request.getTargetRootDir()) + ) { + return info.getBackupId(); + } + } + } + throw new RuntimeException( + "Could not find a valid full backup for incremental request for tables" + + request.getTableList()); } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java index edf5a2b517ff..69c1e6d5e14c 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java @@ -182,6 +182,8 @@ public enum BackupPhase { */ private boolean noChecksumVerify; + private boolean disallowFurtherIncrementals = false; + public BackupInfo() { backupTableInfoMap = new HashMap<>(); } @@ -215,6 +217,14 @@ public void setBandwidth(long bandwidth) { this.bandwidth = bandwidth; } + public void setDisallowFurtherIncrementals(boolean disallowFurtherIncrementals) { + this.disallowFurtherIncrementals = disallowFurtherIncrementals; + } + + public boolean isDisallowFurtherIncrementals() { + return disallowFurtherIncrementals; + } + public void setNoChecksumVerify(boolean noChecksumVerify) { this.noChecksumVerify = noChecksumVerify; } @@ -435,6 +445,7 @@ public BackupProtos.BackupInfo toProtosBackupInfo() { builder.setBackupType(BackupProtos.BackupType.valueOf(getType().name())); builder.setWorkersNumber(workers); builder.setBandwidth(bandwidth); + builder.setDisallowFurtherIncrementals(disallowFurtherIncrementals); return builder.build(); } @@ -530,6 +541,7 @@ public static BackupInfo fromProto(BackupProtos.BackupInfo proto) { context.setType(BackupType.valueOf(proto.getBackupType().name())); context.setWorkers(proto.getWorkersNumber()); context.setBandwidth(proto.getBandwidth()); + context.setDisallowFurtherIncrementals(proto.getDisallowFurtherIncrementals()); return context; } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMasterObserver.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMasterObserver.java index 3e95e7bbcbc5..8eae12cc83d7 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMasterObserver.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMasterObserver.java @@ -68,6 +68,17 @@ public void postDeleteTable(ObserverContext ctx, deleteBulkLoads(cfg, tableName, (ignored) -> true); } + @Override + public void preTruncateTable(ObserverContext ctx, + TableName tableName) throws IOException { + Configuration cfg = ctx.getEnvironment().getConfiguration(); + if (!BackupManager.isBackupEnabled(cfg)) { + LOG.debug("Skipping preTruncateTable hook since backup is disabled"); + return; + } + disallowIncrementalBackups(ctx.getEnvironment(), tableName); + } + @Override public void postTruncateTable(ObserverContext ctx, TableName tableName) throws IOException { @@ -77,6 +88,7 @@ public void postTruncateTable(ObserverContext ctx, return; } deleteBulkLoads(cfg, tableName, (ignored) -> true); + disallowIncrementalBackups(ctx.getEnvironment(), tableName); } @Override @@ -114,4 +126,20 @@ private void deleteBulkLoads(Configuration config, TableName tableName, tbl.deleteBulkLoadedRows(rowsToDelete); } } + + private static void disallowIncrementalBackups(MasterCoprocessorEnvironment env, + TableName tableName) throws IOException { + Configuration conf = env.getConfiguration(); + if (tableName.equals(BackupSystemTable.getTableName(conf))) { + return; + } + + BackupSystemTable table = new BackupSystemTable(env.getConnection()); + try { + table.startBackupExclusiveOperation(); + table.disallowFurtherIncrementals(tableName); + } finally { + table.finishBackupExclusiveOperation(); + } + } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java index aa2d5b44259f..da466c828315 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java @@ -35,6 +35,11 @@ public Builder() { request = new BackupRequest(); } + public Builder withFailOnDisallowedIncrementals(boolean failOnDisallowedIncrementals) { + request.failOnDisallowedIncrementals = failOnDisallowedIncrementals; + return this; + } + public Builder withBackupType(BackupType type) { request.setBackupType(type); return this; @@ -89,10 +94,15 @@ public BackupRequest build() { private boolean noChecksumVerify = false; private String backupSetName; private String yarnPoolName; + private boolean failOnDisallowedIncrementals = false; private BackupRequest() { } + public boolean getFailOnDisallowedIncrementals() { + return failOnDisallowedIncrementals; + } + private BackupRequest setBackupType(BackupType type) { this.type = type; return this; diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java index dff3cc27b006..10b436b7bc95 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java @@ -135,6 +135,10 @@ public static void decorateMasterConfiguration(Configuration conf) { + " Added master procedure manager: {}. Added master observer: {}", cleanerClass, masterProcedureClass, BackupHFileCleaner.class.getName(), observerClass); } + + String observers = conf.get(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY); + conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + (observers == null ? "" : observers + ",") + BackupMasterObserver.class.getName()); } /** diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java index 22e4c9f47bd6..a2b2c86eb9c9 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java @@ -83,6 +83,7 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.base.Splitter; +import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hbase.thirdparty.com.google.common.collect.Iterators; import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos; @@ -285,6 +286,41 @@ public void close() { // do nothing } + /** + * @param toDisallow Any most recent full back containing this table will be marked as disallowing + * further incrementals + */ + public void disallowFurtherIncrementals(TableName toDisallow) throws IOException { + List fullTableBackups = getCompletedFullBackupsSortedByHistoryDesc(); + List invalidatePuts = new ArrayList<>(fullTableBackups.size()); + Set backupRootDirsSeen = new HashSet<>(fullTableBackups.size()); + + for (BackupInfo backupInfo : fullTableBackups) { + // to minimize the amount of mutations against the backup system table, we only + // need to update the most recent full backups that allow incremental backups + if ( + backupInfo.getTables().contains(toDisallow) && backupInfo.getType() == BackupType.FULL + && !backupInfo.isDisallowFurtherIncrementals() + && !backupRootDirsSeen.contains(backupInfo.getBackupRootDir()) + ) { + backupInfo.setDisallowFurtherIncrementals(true); + backupRootDirsSeen.add(backupInfo.getBackupRootDir()); + invalidatePuts.add(createPutForBackupInfo(backupInfo)); + LOG.info("Disallowing incremental backups for backup {} due to table {}", + backupInfo.getBackupId(), toDisallow); + } + } + + try (BufferedMutator mutator = connection.getBufferedMutator(tableName)) { + mutator.mutate(invalidatePuts); + } + + // Clean up bulkloaded HFiles associated with the table + List bulkloadedRows = readBulkloadRows(ImmutableList.of(toDisallow)).stream() + .map(BulkLoad::getRowKey).collect(Collectors.toList()); + deleteBulkLoadedRows(bulkloadedRows); + } + /** * Updates status (state) of a backup session in backup system table table * @param info backup info @@ -637,6 +673,24 @@ public List getBackupHistory(Order order, int n, BackupInfo.Filter.. } } + private List getCompletedFullBackupsSortedByHistoryDesc() throws IOException { + Scan scan = createScanForBackupHistory(Order.NEW_TO_OLD); + List backups = new ArrayList<>(); + + try (Table table = connection.getTable(tableName)) { + ResultScanner scanner = table.getScanner(scan); + Result res; + while ((res = scanner.next()) != null) { + res.advance(); + BackupInfo context = cellToBackupInfo(res.current()); + if (context.getState() == BackupState.COMPLETE && context.getType() == BackupType.FULL) { + backups.add(context); + } + } + } + return backups; + } + /** * Write the current timestamps for each regionserver to backup system table after a successful * full or incremental backup. The saved timestamp is of the last log file that was backed up diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupsDisallowedException.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupsDisallowedException.java new file mode 100644 index 000000000000..c847d3bfe301 --- /dev/null +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupsDisallowedException.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.impl; + +import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.backup.BackupRequest; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; + +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class IncrementalBackupsDisallowedException extends HBaseIOException { + public IncrementalBackupsDisallowedException(BackupRequest request) { + super(String.format( + "Could not take incremental backup for tables %s because is disallowed," + + " please take a full backup instead", + request.getTableList())); + } +} diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java index 039669e2a46f..89631fa8422f 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.backup; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -51,6 +52,8 @@ import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; + /** * Test cases for backup system table API */ @@ -464,6 +467,49 @@ public void testBackupSetList() throws IOException { } } + @Test + public void testDisallowFurtherIncrementals() throws Exception { + try (BackupSystemTable table = new BackupSystemTable(conn)) { + TableName toInvalidate = TableName.valueOf("t1"); + List t1 = Lists.newArrayList(toInvalidate, TableName.valueOf("t2")); + List t2 = Lists.newArrayList(toInvalidate, TableName.valueOf("t3")); + List t3 = Lists.newArrayList(TableName.valueOf("t2"), TableName.valueOf("t3")); + + BackupInfo backup = createBackupInfo(); + backup.setState(BackupState.COMPLETE); + + backup.setTables(t1); + backup.setBackupId("backup1"); + backup.setBackupRootDir("backup1"); + backup.setStartTs(0L); + table.updateBackupInfo(backup); + + backup.setTables(t2); + backup.setBackupId("backup2"); + backup.setBackupRootDir("backup2"); + backup.setStartTs(1L); + table.updateBackupInfo(backup); + + backup.setTables(t3); + backup.setBackupId("backup3"); + backup.setBackupRootDir("backup2"); + backup.setStartTs(2L); + table.updateBackupInfo(backup); + + table.disallowFurtherIncrementals(toInvalidate); + BackupInfo result = table.readBackupInfo("backup1"); + assertTrue(result.isDisallowFurtherIncrementals()); + + table.disallowFurtherIncrementals(toInvalidate); + result = table.readBackupInfo("backup2"); + assertTrue(result.isDisallowFurtherIncrementals()); + + table.disallowFurtherIncrementals(toInvalidate); + result = table.readBackupInfo("backup3"); + assertFalse(result.isDisallowFurtherIncrementals()); + } + } + private boolean compare(BackupInfo one, BackupInfo two) { return one.getBackupId().equals(two.getBackupId()) && one.getType().equals(two.getType()) && one.getBackupRootDir().equals(two.getBackupRootDir()) diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestDisallowIncrementalBackups.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestDisallowIncrementalBackups.java new file mode 100644 index 000000000000..c440660721ed --- /dev/null +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestDisallowIncrementalBackups.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +import java.io.IOException; +import java.util.List; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient; +import org.apache.hadoop.hbase.backup.impl.IncrementalBackupsDisallowedException; +import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupClient; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; + +@Tag(LargeTests.TAG) +public class TestDisallowIncrementalBackups extends TestBackupBase { + + static { + provider = "multiwal"; + } + + @Test + public void testItDisallowsIncrementalBackups() throws IOException { + List tables = Lists.newArrayList(table1, table2); + + try (Connection conn = ConnectionFactory.createConnection(conf1); + BackupSystemTable backupSystemTable = new BackupSystemTable(conn)) { + BackupAdminImpl client = new BackupAdminImpl(conn); + + insertIntoTable(conn, table1, famName, 1, 10); + BackupRequest req = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); + checkSucceeded(client.backupTables(req)); + // Invalidate the backup + conn.getAdmin().disableTable(table1); + conn.getAdmin().truncateTable(table1, true); + req = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); + + // Should fall back to full table backup class + assertEquals(BackupClientFactory.create(conn, "backupId", req).getClass(), + FullTableBackupClient.class); + + // Release the lock created by initializing the TableBackupClient. This lock is taken by + // instantiating the TableBackupClient when we call BackupClientFactory#create, and is never + // released because we don't call TableBackupClient#execute + backupSystemTable.finishBackupExclusiveOperation(); + + // should throw error + final BackupRequest failOnDisallowIncrementalsReq = + new BackupRequest.Builder().withBackupType(BackupType.INCREMENTAL).withTableList(tables) + .withTargetRootDir(BACKUP_ROOT_DIR).withNoChecksumVerify(true) + .withFailOnDisallowedIncrementals(true).build(); + assertThrows(IncrementalBackupsDisallowedException.class, + () -> BackupClientFactory.create(conn, "backupId", failOnDisallowIncrementalsReq)); + + req = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); + String fullBackupId = client.backupTables(req); + checkSucceeded(fullBackupId); + + // Check that the backup line allows for incremental backups + assertEquals( + BackupClientFactory.create(conn, "backupId", failOnDisallowIncrementalsReq).getClass(), + IncrementalTableBackupClient.class); + } + } +} diff --git a/hbase-protocol-shaded/src/main/protobuf/Backup.proto b/hbase-protocol-shaded/src/main/protobuf/Backup.proto index a114001ba504..05a269294abb 100644 --- a/hbase-protocol-shaded/src/main/protobuf/Backup.proto +++ b/hbase-protocol-shaded/src/main/protobuf/Backup.proto @@ -93,6 +93,7 @@ message BackupInfo { optional uint32 workers_number = 11; optional uint64 bandwidth = 12; map table_set_timestamp = 13; + optional bool disallowFurtherIncrementals = 14; message RSTimestampMap { map rs_timestamp = 1; From ff8a967d7e486fa31b72e2e997587a503b8916a3 Mon Sep 17 00:00:00 2001 From: "v.popov" Date: Wed, 17 Dec 2025 16:05:50 +0000 Subject: [PATCH 04/14] ADH-7177 Update bin/hbase --- bin/hbase | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/bin/hbase b/bin/hbase index c7425df84292..dbb67c5be3ac 100755 --- a/bin/hbase +++ b/bin/hbase @@ -425,16 +425,16 @@ unset IFS #Set the right GC options based on the what we are running declare -a server_cmds=("master" "regionserver" "thrift" "thrift2" "rest" "avro" "zookeeper") for cmd in ${server_cmds[@]}; do - if [[ $cmd == $COMMAND ]]; then - server=true - break - fi + if [[ $cmd == $COMMAND ]]; then + server=true + break + fi done if [[ $server ]]; then - HBASE_OPTS="$HBASE_OPTS $SERVER_GC_OPTS" + HBASE_OPTS="$HBASE_OPTS $SERVER_GC_OPTS" else - HBASE_OPTS="$HBASE_OPTS $CLIENT_GC_OPTS" + HBASE_OPTS="$HBASE_OPTS $CLIENT_GC_OPTS" fi if [ -n "$HBASE_SERVER_JAAS_OPTS" ]; then @@ -548,7 +548,7 @@ fi # figure out which class to run if [ "$COMMAND" = "shell" ] ; then - #find the hbase ruby sources + #find the hbase ruby sources # assume we are in a binary install if lib/ruby exists if [ -d "$HBASE_HOME/lib/ruby" ]; then # We want jruby to consume these things rather than our bootstrap script; @@ -644,12 +644,12 @@ elif [ "$COMMAND" = "regionserver" ] ; then elif [ "$COMMAND" = "thrift" ] ; then CLASS='org.apache.hadoop.hbase.thrift.ThriftServer' if [ "$1" != "stop" ] ; then - HBASE_OPTS="$HBASE_OPTS $HBASE_THRIFT_OPTS" + HBASE_OPTS="$HBASE_OPTS $HBASE_THRIFT1_OPTS" fi elif [ "$COMMAND" = "thrift2" ] ; then CLASS='org.apache.hadoop.hbase.thrift2.ThriftServer' if [ "$1" != "stop" ] ; then - HBASE_OPTS="$HBASE_OPTS $HBASE_THRIFT_OPTS" + HBASE_OPTS="$HBASE_OPTS $HBASE_THRIFT2_OPTS" fi elif [ "$COMMAND" = "rest" ] ; then CLASS='org.apache.hadoop.hbase.rest.RESTServer' From d301c61fc67f9575765433b46c084581ffe17ed5 Mon Sep 17 00:00:00 2001 From: Ivan Lapa Date: Tue, 3 Mar 2026 20:06:08 +0300 Subject: [PATCH 05/14] ADH-7814: add hbase-backup-shaded module to pom.xml --- hbase-shaded/hbase-backup-shaded/pom.xml | 219 +++++++++++++++++++++++ hbase-shaded/pom.xml | 4 + 2 files changed, 223 insertions(+) create mode 100644 hbase-shaded/hbase-backup-shaded/pom.xml diff --git a/hbase-shaded/hbase-backup-shaded/pom.xml b/hbase-shaded/hbase-backup-shaded/pom.xml new file mode 100644 index 000000000000..bbbd46bc7dc9 --- /dev/null +++ b/hbase-shaded/hbase-backup-shaded/pom.xml @@ -0,0 +1,219 @@ + + + 4.0.0 + + org.apache.hbase + hbase-shaded + 2.6.5.1-4.3.0-0 + ../pom.xml + + hbase-backup-shaded + Apache HBase - shaded - backup (with hadoop bundled) + + + 5.2.1 + ${revision} + 1.19.4 + + + + + + org.apache.hbase + hbase-backup + ${hbase.hadoop3.version} + + + org.apache.hbase + hbase-client + ${hbase.hadoop3.version} + + + org.apache.hbase + hbase-server + ${hbase.hadoop3.version} + + + org.apache.hbase + hbase-mapreduce + ${revision} + + + org.apache.hbase + hbase-shaded-mapreduce + ${revision} + + + org.apache.hbase + hbase-common + ${hbase.hadoop3.version} + + + org.apache.hbase + hbase-http + ${hbase.hadoop3.version} + + + org.apache.hbase + hbase-logging + ${hbase.hadoop3.version} + + + org.apache.hbase + hbase-protocol + ${hbase.hadoop3.version} + + + org.apache.hbase + hbase-protocol-shaded + ${hbase.hadoop3.version} + + + org.apache.hbase + hbase-procedure + ${hbase.hadoop3.version} + + + org.apache.hbase + hbase-zookeeper + ${hbase.hadoop3.version} + + + org.apache.hbase + hbase-replication + ${hbase.hadoop3.version} + + + org.apache.hbase + hbase-metrics + ${hbase.hadoop3.version} + + + org.apache.hbase + hbase-metrics-api + ${hbase.hadoop3.version} + + + org.apache.hbase + hbase-asyncfs + ${hbase.hadoop3.version} + + + org.apache.hbase + hbase-hadoop-compat + ${hbase.hadoop3.version} + + + org.apache.hbase + hbase-hadoop2-compat + ${hbase.hadoop3.version} + + + + + + + org.apache.hbase + hbase-backup + ${hbase.hadoop3.version} + + + org.apache.tomcat + tomcat-jasper + + + javax.servlet + javax.servlet-api + + + + + org.apache.hadoop + hadoop-mapreduce-client-jobclient + ${hadoop.version} + + + com.sun.jersey + jersey-core + ${jersey-core.version} + + + javax.ws.rs + javax.ws.rs-api + + + org.apache.phoenix + phoenix-mapreduce-byo-shaded-hbase-hbase-2.6 + ${phoenix.version} + + + + + + + org.apache.maven.plugins + maven-site-plugin + + true + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-shade-plugin + + + aggregate-into-a-jar-with-relocated-third-parties + + + + + javax.annotation:javax.annotation-api + javax.activation:javax.activation-api + jakarta.activation:jakarta.activation-api + jakarta.ws.rs:jakarta.ws.rs-api + jakarta.annotation:jakarta.annotation-api + jakarta.validation:jakarta.validation-api + org.glassfish.hk2.external:jakarta.inject + org.apache.hbase:hbase-resource-bundle + org.slf4j:* + com.google.code.findbugs:* + com.github.stephenc.findbugs:* + com.github.spotbugs:* + org.apache.htrace:* + org.apache.yetus:* + org.apache.logging.log4j:* + commons-logging:* + org.javassist:* + + + + + io.opentelemetry + ${shaded.prefix}.io.opentelemetry + + + + + *:* + + META-INF/services/javax.ws.rs.ext.MessageBodyReader + META-INF/services/javax.ws.rs.ext.MessageBodyWriter + META-INF/services/javax.ws.rs.ext.RuntimeDelegate + + + + + + + + + + diff --git a/hbase-shaded/pom.xml b/hbase-shaded/pom.xml index f0d5311790e7..e757a608bbbc 100644 --- a/hbase-shaded/pom.xml +++ b/hbase-shaded/pom.xml @@ -34,6 +34,7 @@ hbase-shaded-client-byo-hadoop hbase-shaded-client hbase-shaded-mapreduce + hbase-backup-shaded hbase-shaded-testing-util hbase-shaded-testing-util-tester hbase-shaded-check-invariants @@ -470,6 +471,9 @@ jnamed* lookup* update* + + META-INF/services/java.net.spi.InetAddressResolverProvider From b1cac9774ec202472bfe3663d33562b9a3faad04 Mon Sep 17 00:00:00 2001 From: Ivan Lapa Date: Tue, 3 Mar 2026 20:31:29 +0300 Subject: [PATCH 06/14] ADH-7814: update dnsjava filter to exclude InetAddressResolverProvider --- hbase-shaded/hbase-backup-shaded/pom.xml | 6 ++++++ hbase-shaded/pom.xml | 3 --- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/hbase-shaded/hbase-backup-shaded/pom.xml b/hbase-shaded/hbase-backup-shaded/pom.xml index bbbd46bc7dc9..aefd83336efd 100644 --- a/hbase-shaded/hbase-backup-shaded/pom.xml +++ b/hbase-shaded/hbase-backup-shaded/pom.xml @@ -201,6 +201,12 @@ + + dnsjava:dnsjava + + META-INF/services/java.net.spi.InetAddressResolverProvider + + *:* diff --git a/hbase-shaded/pom.xml b/hbase-shaded/pom.xml index e757a608bbbc..cdddc7022830 100644 --- a/hbase-shaded/pom.xml +++ b/hbase-shaded/pom.xml @@ -471,9 +471,6 @@ jnamed* lookup* update* - - META-INF/services/java.net.spi.InetAddressResolverProvider From e36da6a6d2d4b1b876f4e2cad1191bdb9a1c2470 Mon Sep 17 00:00:00 2001 From: Dev Hingu Date: Tue, 24 Mar 2026 15:39:01 +0530 Subject: [PATCH 07/14] HBASE-29933: update_all_config hangs indefinitely when balancing event is in progress(#7932) Signed-off-by: Wellington Chevreuil Reviewed-by: Vaibhav Joshi --- .../apache/hadoop/hbase/master/HMaster.java | 122 +++++++------- .../hadoop/hbase/master/LoadBalancer.java | 8 + .../balancer/CacheAwareLoadBalancer.java | 70 +++++++- .../balancer/TestCacheAwareLoadBalancer.java | 151 ++++++++++++++++++ 4 files changed, 288 insertions(+), 63 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 4a89ad7bc125..4e7bd84b22fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -2036,80 +2036,86 @@ public BalanceResponse balance(BalanceRequest request) throws IOException { } synchronized (this.balancer) { - // Only allow one balance run at at time. - if (this.assignmentManager.getRegionTransitScheduledCount() > 0) { - List regionsInTransition = assignmentManager.getRegionsInTransition(); - // if hbase:meta region is in transition, result of assignment cannot be recorded - // ignore the force flag in that case - boolean metaInTransition = assignmentManager.isMetaRegionInTransition(); - List toPrint = regionsInTransition; - int max = 5; - boolean truncated = false; - if (regionsInTransition.size() > max) { - toPrint = regionsInTransition.subList(0, max); - truncated = true; - } + try { + this.balancer.onBalancingStart(); + + // Only allow one balance run at at time. + if (this.assignmentManager.getRegionTransitScheduledCount() > 0) { + List regionsInTransition = assignmentManager.getRegionsInTransition(); + // if hbase:meta region is in transition, result of assignment cannot be recorded + // ignore the force flag in that case + boolean metaInTransition = assignmentManager.isMetaRegionInTransition(); + List toPrint = regionsInTransition; + int max = 5; + boolean truncated = false; + if (regionsInTransition.size() > max) { + toPrint = regionsInTransition.subList(0, max); + truncated = true; + } - if (!request.isIgnoreRegionsInTransition() || metaInTransition) { - LOG.info("Not running balancer (ignoreRIT=false" + ", metaRIT=" + metaInTransition - + ") because " + assignmentManager.getRegionTransitScheduledCount() - + " region(s) are scheduled to transit " + toPrint - + (truncated ? "(truncated list)" : "")); + if (!request.isIgnoreRegionsInTransition() || metaInTransition) { + LOG.info("Not running balancer (ignoreRIT=false" + ", metaRIT=" + metaInTransition + + ") because " + assignmentManager.getRegionTransitScheduledCount() + + " region(s) are scheduled to transit " + toPrint + + (truncated ? "(truncated list)" : "")); + return responseBuilder.build(); + } + } + if (this.serverManager.areDeadServersInProgress()) { + LOG.info("Not running balancer because processing dead regionserver(s): " + + this.serverManager.getDeadServers()); return responseBuilder.build(); } - } - if (this.serverManager.areDeadServersInProgress()) { - LOG.info("Not running balancer because processing dead regionserver(s): " - + this.serverManager.getDeadServers()); - return responseBuilder.build(); - } - if (this.cpHost != null) { - try { - if (this.cpHost.preBalance(request)) { - LOG.debug("Coprocessor bypassing balancer request"); + if (this.cpHost != null) { + try { + if (this.cpHost.preBalance(request)) { + LOG.debug("Coprocessor bypassing balancer request"); + return responseBuilder.build(); + } + } catch (IOException ioe) { + LOG.error("Error invoking master coprocessor preBalance()", ioe); return responseBuilder.build(); } - } catch (IOException ioe) { - LOG.error("Error invoking master coprocessor preBalance()", ioe); - return responseBuilder.build(); } - } - Map>> assignments = - this.assignmentManager.getRegionStates().getAssignmentsForBalancer(tableStateManager, - this.serverManager.getOnlineServersList()); - for (Map> serverMap : assignments.values()) { - serverMap.keySet().removeAll(this.serverManager.getDrainingServersList()); - } + Map>> assignments = + this.assignmentManager.getRegionStates().getAssignmentsForBalancer(tableStateManager, + this.serverManager.getOnlineServersList()); + for (Map> serverMap : assignments.values()) { + serverMap.keySet().removeAll(this.serverManager.getDrainingServersList()); + } - // Give the balancer the current cluster state. - this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor()); + // Give the balancer the current cluster state. + this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor()); - List plans = this.balancer.balanceCluster(assignments); + List plans = this.balancer.balanceCluster(assignments); - responseBuilder.setBalancerRan(true).setMovesCalculated(plans == null ? 0 : plans.size()); + responseBuilder.setBalancerRan(true).setMovesCalculated(plans == null ? 0 : plans.size()); - if (skipRegionManagementAction("balancer")) { - // make one last check that the cluster isn't shutting down before proceeding. - return responseBuilder.build(); - } + if (skipRegionManagementAction("balancer")) { + // make one last check that the cluster isn't shutting down before proceeding. + return responseBuilder.build(); + } - // For dry run we don't actually want to execute the moves, but we do want - // to execute the coprocessor below - List sucRPs = - request.isDryRun() ? Collections.emptyList() : executeRegionPlansWithThrottling(plans); + // For dry run we don't actually want to execute the moves, but we do want + // to execute the coprocessor below + List sucRPs = + request.isDryRun() ? Collections.emptyList() : executeRegionPlansWithThrottling(plans); - if (this.cpHost != null) { - try { - this.cpHost.postBalance(request, sucRPs); - } catch (IOException ioe) { - // balancing already succeeded so don't change the result - LOG.error("Error invoking master coprocessor postBalance()", ioe); + if (this.cpHost != null) { + try { + this.cpHost.postBalance(request, sucRPs); + } catch (IOException ioe) { + // balancing already succeeded so don't change the result + LOG.error("Error invoking master coprocessor postBalance()", ioe); + } } - } - responseBuilder.setMovesExecuted(sucRPs.size()); + responseBuilder.setMovesExecuted(sucRPs.size()); + } finally { + this.balancer.onBalancingComplete(); + } } // If LoadBalancer did not generate any plans, it means the cluster is already balanced. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java index 908e04e20516..0ffe19962266 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java @@ -188,6 +188,14 @@ default void throttle(RegionPlan plan) throws Exception { // noop } + default void onBalancingStart() { + // noop + } + + default void onBalancingComplete() { + // noop + } + /** * @return true if Master carries regions * @deprecated since 2.4.0, will be removed in 3.0.0. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/CacheAwareLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/CacheAwareLoadBalancer.java index d48dc518175f..d73109b1c088 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/CacheAwareLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/CacheAwareLoadBalancer.java @@ -37,6 +37,8 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.RegionMetrics; @@ -64,13 +66,36 @@ public class CacheAwareLoadBalancer extends StochasticLoadBalancer { private Long sleepTime; private Configuration configuration; + /** + * Tracks whether a balance run is currently in progress. + */ + private final AtomicBoolean isBalancing = new AtomicBoolean(false); + + /** + * Holds a configuration update that arrived while a balance run was in progress. + */ + private AtomicReference pendingConfiguration = new AtomicReference<>(); + public enum GeneratorFunctionType { LOAD, CACHE_RATIO } @Override - public synchronized void loadConf(Configuration configuration) { + public void loadConf(Configuration configuration) { + // If balance is running, store configuration in pendingConfiguration and return immediately. + // Defer the config update. + if (isBalancing.get()) { + LOG.debug( + "Balance is in progress, defer applying configuration change until balance completed."); + pendingConfiguration.set(configuration); + } else { + // Apply configuration change immediately. + updateConfiguration(configuration); + } + } + + public void updateConfiguration(Configuration configuration) { this.configuration = configuration; this.costFunctions = new ArrayList<>(); super.loadConf(configuration); @@ -79,6 +104,38 @@ public synchronized void loadConf(Configuration configuration) { sleepTime = configuration.getLong(MOVE_THROTTLING, MOVE_THROTTLING_DEFAULT.toMillis()); } + /** + * Sets {@link #isBalancing} to {@code true} before a balance run starts. + */ + @Override + public void onBalancingStart() { + LOG.debug("Setting isBalancing to true as balance is starting"); + isBalancing.set(true); + } + + /** + * Sets {@link #isBalancing} to {@code false} after a balance run completes and applies any + * pending configuration that arrived during balancing. + */ + @Override + public void onBalancingComplete() { + LOG.debug("Setting isBalancing to false as balance is completed"); + isBalancing.set(false); + applyPendingConfiguration(); + } + + /** + * If a pending configuration was stored during a balance run, apply it and clear the pending + * reference. + */ + public void applyPendingConfiguration() { + Configuration toApply = pendingConfiguration.getAndSet(null); + if (toApply != null) { + LOG.info("Applying pending configuration after balance completed."); + updateConfiguration(toApply); + } + } + @Override protected Map, CandidateGenerator> createCandidateGenerators() { @@ -193,10 +250,13 @@ public void throttle(RegionPlan plan) { + "Throttling move for {}ms.", plan.getRegionInfo().getEncodedName(), plan.getDestination(), sleepTime); } - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw new RuntimeException(e); + synchronized (this) { + try { + // Release the monitor while waiting to avoid blocking other threads. + wait(sleepTime); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestCacheAwareLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestCacheAwareLoadBalancer.java index 335a719a1f9e..485ae7544436 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestCacheAwareLoadBalancer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestCacheAwareLoadBalancer.java @@ -33,6 +33,10 @@ import java.util.Random; import java.util.Set; import java.util.TreeMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; import java.util.concurrent.ThreadLocalRandom; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics; @@ -599,4 +603,151 @@ public void testBalancerNotThrowNPEWhenBalancerPlansIsNull() throws Exception { fail("NPE should not be thrown"); } } + + /** + * This test verifies that when loadConf/onConfigurationChange is called on a + * CacheAwareLoadBalancer while a balance run is in progress, the configuration update: 1. Does + * not block (returns quickly without waiting for balancing to finish) 2. Does not affect the + * ongoing balance run (the configuration used during balancing remains the old one) 3. Is applied + * correctly after the balance run completes + */ + @Test(timeout = 60000) + public void testConfigUpdateDuringBalance() throws Exception { + Float expectedOldRatioThreshold = 0.8f; + Float expectedNewRatioThreshold = 0.95f; + long throttleTimeMs = 10000; + + // Actual old ratio threshold used during balance + float[] actualOldRatioThresholdDuringBalance = new float[1]; + + Configuration conf = HBaseConfiguration.create(); + conf.set(HConstants.BUCKET_CACHE_PERSISTENT_PATH_KEY, "prefetch_file_list"); + conf.setLong(CacheAwareLoadBalancer.MOVE_THROTTLING, throttleTimeMs); + conf.setFloat(CacheAwareLoadBalancer.CACHE_RATIO_THRESHOLD, expectedOldRatioThreshold); + + CacheAwareLoadBalancer balancer = new CacheAwareLoadBalancer(); + MasterServices services = mock(MasterServices.class); + when(services.getConfiguration()).thenReturn(conf); + balancer.setMasterServices(services); + balancer.loadConf(conf); + balancer.initialize(); + + Map> clusterState = new HashMap<>(); + ServerName server0 = servers.get(0); + ServerName server1 = servers.get(1); + ServerName server2 = servers.get(2); + + // Setup cluster: all 3 regions on server0 (unbalanced) + List regionsOnServer0 = randomRegions(3); + List regionsOnServer1 = randomRegions(0); + List regionsOnServer2 = randomRegions(0); + + clusterState.put(server0, regionsOnServer0); + clusterState.put(server1, regionsOnServer1); + clusterState.put(server2, regionsOnServer2); + + // Mock metrics: NO cache info for any region = all will be throttled + Map serverMetricsMap = new TreeMap<>(); + serverMetricsMap.put(server0, mockServerMetricsWithRegionCacheInfo(server0, regionsOnServer0, + 0.0f, new ArrayList<>(), 0, 10)); + serverMetricsMap.put(server1, mockServerMetricsWithRegionCacheInfo(server1, regionsOnServer1, + 0.0f, new ArrayList<>(), 0, 10)); + serverMetricsMap.put(server2, mockServerMetricsWithRegionCacheInfo(server2, regionsOnServer2, + 0.0f, new ArrayList<>(), 0, 10)); + + ClusterMetrics clusterMetrics = mock(ClusterMetrics.class); + when(clusterMetrics.getLiveServerMetrics()).thenReturn(serverMetricsMap); + balancer.updateClusterMetrics(clusterMetrics); + + final Map>> loadOfAllTable = + (Map) mockClusterServersWithTables(clusterState); + + // Verify initial configuration + assertEquals(expectedOldRatioThreshold, balancer.ratioThreshold, 0.001f); + + CountDownLatch balanceStarted = new CountDownLatch(1); + CountDownLatch updateConfigInitiated = new CountDownLatch(1); + + long[] configUpdateDuration = new long[1]; + long[] balanceDuration = new long[1]; + + ExecutorService executor = Executors.newFixedThreadPool(2); + + try { + // Thread 1 Simulate similar flow to HMaster.balance() which holds synchronized(balancer) for + // the duration of balance + Future balanceFuture = executor.submit(() -> { + try { + long start = EnvironmentEdgeManager.currentTime(); + synchronized (balancer) { + try { + // Simulate beginning of HMaster.balance() mark balancing window open + balancer.onBalancingStart(); + balanceStarted.countDown(); + List plans = balancer.balanceCluster(loadOfAllTable); + + LOG.info("Balance generated {} plans, executing with throttling", + plans != null ? plans.size() : 0); + + if (plans != null) { + for (int i = 0; i < plans.size(); i++) { + RegionPlan plan = plans.get(i); + balancer.throttle(plan); + } + } + // Wait until config update is initiated while balance is still in progress + updateConfigInitiated.await(); + + // Old config should still be visible during current balance run + actualOldRatioThresholdDuringBalance[0] = balancer.ratioThreshold; + } finally { + balancer.onBalancingComplete(); + } + } + return EnvironmentEdgeManager.currentTime() - start; + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + + // Thread 2: Simulate update_all_config / onConfigurationChange + Future configUpdateFuture = executor.submit(() -> { + try { + // Wait for balance to start + balanceStarted.await(); + long startTime = EnvironmentEdgeManager.currentTime(); + + // Call onConfigurationChange - should NOT hang + Configuration newConf = HBaseConfiguration.create(); + newConf.set(HConstants.BUCKET_CACHE_PERSISTENT_PATH_KEY, "prefetch_file_list"); + newConf.setLong(CacheAwareLoadBalancer.MOVE_THROTTLING, 10000); + newConf.setFloat(CacheAwareLoadBalancer.CACHE_RATIO_THRESHOLD, expectedNewRatioThreshold); + balancer.onConfigurationChange(newConf); + updateConfigInitiated.countDown(); + + return EnvironmentEdgeManager.currentTime() - startTime; + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + + // Wait for both threads to complete + configUpdateDuration[0] = configUpdateFuture.get(); + balanceDuration[0] = balanceFuture.get(); + System.out.println("Balance duration (ms): " + balanceDuration[0]); + System.out.println("Config update duration (ms): " + configUpdateDuration[0]); + + // Verify that ratio threshold used during balance is stll the old + assertEquals(expectedOldRatioThreshold, actualOldRatioThresholdDuringBalance[0], 0.001f); + + // Verify that config updated successfully after balance completed + assertEquals(expectedNewRatioThreshold, balancer.ratioThreshold, 0.001f); + + // Verify that config update didn't hang/timeout waiting for balance + assertTrue(configUpdateDuration[0] < balanceDuration[0]); + + } finally { + executor.shutdownNow(); + } + } } From f29dd083ca7a4bed678327ef01ab1f8bd0694da0 Mon Sep 17 00:00:00 2001 From: Dimas Shidqi Parikesit Date: Fri, 27 Mar 2026 02:23:23 -0400 Subject: [PATCH 08/14] HBASE-30007 Multiget with timestamp incorrectly returns 404 not found (#7956) Signed-off-by: Duo Zhang (cherry picked from commit 742a83e69899a2564fc25a903b15d637fc45b6ab) --- .../hbase/rest/MultiRowResultReader.java | 6 +- .../hbase/rest/MultiRowResourceTestBase.java | 74 +++++++++++++++++++ 2 files changed, 79 insertions(+), 1 deletion(-) diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResultReader.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResultReader.java index 2903c37edf92..28a63b5890cd 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResultReader.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResultReader.java @@ -58,7 +58,11 @@ public MultiRowResultReader(final String tableName, final Collection ro } } } - get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime()); + if (rowspec.isPartialTimeRange()) { + get.setTimestamp(rowspec.getTimestamp()); + } else { + get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime()); + } get.readVersions(rowspec.getMaxVersions()); if (filter != null) { get.setFilter(filter); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/MultiRowResourceTestBase.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/MultiRowResourceTestBase.java index 2eac0ad78aa6..104a68c326d4 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/MultiRowResourceTestBase.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/MultiRowResourceTestBase.java @@ -58,6 +58,14 @@ public class MultiRowResourceTestBase { private static final String VALUE_1 = "testvalue5"; private static final String ROW_2 = "testrow6"; private static final String VALUE_2 = "testvalue6"; + private static final String TIMESTAMPED_ROW_1 = "testrow7"; + private static final String TIMESTAMPED_ROW_2 = "testrow8"; + private static final String TIMESTAMPED_OLD_VALUE_1 = "testvalue7-old"; + private static final String TIMESTAMPED_NEW_VALUE_1 = "testvalue7-new"; + private static final String TIMESTAMPED_OLD_VALUE_2 = "testvalue8-old"; + private static final String TIMESTAMPED_NEW_VALUE_2 = "testvalue8-new"; + private static final long TIMESTAMP_1 = 1000L; + private static final long TIMESTAMP_2 = 2000L; private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); @@ -198,6 +206,72 @@ public void testMultiCellGetJSONB64() throws IOException { client.delete(row_6_url, extraHdr); } + private void postBinaryWithTimestamp(String path, String value, long timestamp) + throws IOException { + Header[] headers = new Header[] { new BasicHeader("Content-Type", Constants.MIMETYPE_BINARY), + new BasicHeader("X-Timestamp", Long.toString(timestamp)), extraHdr }; + Response response = client.post(path, headers, Bytes.toBytes(value)); + assertEquals(200, response.getCode()); + } + + @Test + public void testMultiCellGetWithExactTimestampJSON() throws IOException { + String row_7_url = "/" + TABLE + "/" + TIMESTAMPED_ROW_1 + "/" + COLUMN_1; + String row_8_url = "/" + TABLE + "/" + TIMESTAMPED_ROW_2 + "/" + COLUMN_2; + String row_7_delete_url = "/" + TABLE + "/" + TIMESTAMPED_ROW_1; + String row_8_delete_url = "/" + TABLE + "/" + TIMESTAMPED_ROW_2; + + postBinaryWithTimestamp(row_7_url, TIMESTAMPED_OLD_VALUE_1, TIMESTAMP_1); + postBinaryWithTimestamp(row_7_url, TIMESTAMPED_NEW_VALUE_1, TIMESTAMP_2); + postBinaryWithTimestamp(row_8_url, TIMESTAMPED_OLD_VALUE_2, TIMESTAMP_1); + postBinaryWithTimestamp(row_8_url, TIMESTAMPED_NEW_VALUE_2, TIMESTAMP_2); + + try { + StringBuilder path = new StringBuilder(); + path.append("/"); + path.append(TABLE); + path.append("/multiget/?row="); + path.append(TIMESTAMPED_ROW_1); + path.append("/"); + path.append("/"); + path.append(TIMESTAMP_1); + path.append("&row="); + path.append(TIMESTAMPED_ROW_2); + path.append("/"); + path.append("/"); + path.append(TIMESTAMP_1); + + Response response = client.get(path.toString(), Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); + + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); + CellSetModel cellSet = mapper.readValue(response.getBody(), CellSetModel.class); + + assertEquals(2, cellSet.getRows().size()); + + RowModel rowModel = cellSet.getRows().get(0); + assertEquals(TIMESTAMPED_ROW_1, Bytes.toString(rowModel.getKey())); + assertEquals(1, rowModel.getCells().size()); + CellModel cell = rowModel.getCells().get(0); + assertEquals(COLUMN_1, Bytes.toString(cell.getColumn())); + assertEquals(TIMESTAMPED_OLD_VALUE_1, Bytes.toString(cell.getValue())); + assertEquals(TIMESTAMP_1, cell.getTimestamp()); + + rowModel = cellSet.getRows().get(1); + assertEquals(TIMESTAMPED_ROW_2, Bytes.toString(rowModel.getKey())); + assertEquals(1, rowModel.getCells().size()); + cell = rowModel.getCells().get(0); + assertEquals(COLUMN_2, Bytes.toString(cell.getColumn())); + assertEquals(TIMESTAMPED_OLD_VALUE_2, Bytes.toString(cell.getValue())); + assertEquals(TIMESTAMP_1, cell.getTimestamp()); + } finally { + client.delete(row_7_delete_url, extraHdr); + client.delete(row_8_delete_url, extraHdr); + } + } + @Test public void testMultiCellGetNoKeys() throws IOException { StringBuilder path = new StringBuilder(); From 7e8f7dda91704a9929eb8f520fa811f28be90ff8 Mon Sep 17 00:00:00 2001 From: Arvind Kandpal Date: Mon, 6 Apr 2026 11:13:35 +0530 Subject: [PATCH 09/14] HBASE-28660 list_namespace not working after an incorrect user input (#7931) (#8015) Reviewed-by: Vaibhav Joshi (cherry picked from commit 2e42e4cdc9dbf8774aa63c305f024d5b16e3b5a4) Signed-off-by: Duo Zhang Signed-off-by: Nihal Jain Signed-off-by: Xiao Liu --- hbase-shell/src/main/ruby/irb/hirb.rb | 29 +++++++ .../test/ruby/shell/general_test_cluster.rb | 77 +++++++++++++++++++ 2 files changed, 106 insertions(+) diff --git a/hbase-shell/src/main/ruby/irb/hirb.rb b/hbase-shell/src/main/ruby/irb/hirb.rb index 73b0ee91a11d..34638f228a9e 100644 --- a/hbase-shell/src/main/ruby/irb/hirb.rb +++ b/hbase-shell/src/main/ruby/irb/hirb.rb @@ -23,6 +23,10 @@ module IRB # Subclass of IRB so can intercept methods class HIRB < Irb + def self.command_names + @command_names ||= ::Shell.commands.keys.map(&:to_sym).freeze + end + def initialize(workspace = nil, interactive = true, input_method = nil) # This is ugly. Our 'help' method above provokes the following message # on irb construction: 'irb: warn: can't alias help from irb_help.' @@ -53,6 +57,14 @@ def initialize(workspace = nil, interactive = true, input_method = nil) $stdout = STDOUT end + def set_context_workspace(workspace) + if @context.respond_to?(:workspace=) + @context.workspace = workspace + else + @context.instance_variable_set(:@workspace, workspace) + end + end + def output_value(omit = false) # Suppress output if last_value is 'nil' # Otherwise, when user types help, get ugly 'nil' @@ -163,6 +175,23 @@ def eval_input else exc = nil next + ensure + # HBASE-28660: Prevent command shadowing by incorrectly parsed local variables + cmd_names = self.class.command_names + workspace_binding = @context.workspace.binding + shadowing_vars = workspace_binding.local_variables & cmd_names + + if shadowing_vars.any? + shadowing_vars.each do |var| + warn "WARN: '#{var}' is a reserved HBase command. Local variable assignment ignored." + end + + new_binding = @context.workspace.main.get_binding + (workspace_binding.local_variables - shadowing_vars).each do |var| + new_binding.local_variable_set(var, workspace_binding.local_variable_get(var)) + end + set_context_workspace(::IRB::WorkSpace.new(new_binding)) + end end handle_exception(exc) @context.workspace.local_variable_set(:_, exc) diff --git a/hbase-shell/src/test/ruby/shell/general_test_cluster.rb b/hbase-shell/src/test/ruby/shell/general_test_cluster.rb index d6c60fd8d2a9..40fb8e81a4dc 100644 --- a/hbase-shell/src/test/ruby/shell/general_test_cluster.rb +++ b/hbase-shell/src/test/ruby/shell/general_test_cluster.rb @@ -19,6 +19,8 @@ require 'hbase_constants' require 'hbase_shell' +require 'irb/hirb' +require 'stringio' class ShellTest < Test::Unit::TestCase include Hbase::TestHelpers @@ -149,4 +151,79 @@ class TestException < RuntimeError; end # create a table that exists @shell.command('create', 'nothrow_table', 'family_1') end + + #----------------------------------------------------------------------------- + + class MockInputMethod < IRB::InputMethod + def initialize(lines) + super() + @lines = lines + end + def gets + @lines.shift + end + def eof? + @lines.empty? + end + def encoding + Encoding::UTF_8 + end + def readable_after_eof? + false + end + end + + define_test 'Shell::Shell should prevent HBase commands from being shadowed by local variables (HBASE-28660)' do + workspace = @shell.get_workspace + IRB.setup(__FILE__) unless IRB.conf[:IRB_NAME] + + lines = [ + "list = 10\n", + "list_namespace, 'ns.*'\n", + "list_snapshots, 'snap01'\n", + "scan = 20\n", + "processlist = 30\n", + "my_var = 5\n" + ] + + input_method = MockInputMethod.new(lines) + hirb = IRB::HIRB.new(workspace, true, input_method) + + hirb.context.prompt_i = "" + hirb.context.prompt_s = "" + hirb.context.prompt_c = "" + hirb.context.prompt_n = "" + hirb.context.return_format = "" + hirb.context.echo = false + + old_stderr = $stderr + $stderr = StringIO.new + err_output = "" + begin + capture_stdout do + hirb.eval_input + end + ensure + err_output = $stderr.string + $stderr = old_stderr + end + + final_workspace = hirb.context.workspace + final_vars = final_workspace.binding.local_variables + + assert(final_vars.include?(:my_var), "Valid variables should be preserved") + assert_equal(5, final_workspace.binding.local_variable_get(:my_var)) + + assert(!final_vars.include?(:list), "Command 'list' should not be shadowed") + assert(!final_vars.include?(:list_namespace), "Command 'list_namespace' should not be shadowed") + assert(!final_vars.include?(:list_snapshots), "Command 'list_snapshots' should not be shadowed") + assert(!final_vars.include?(:scan), "Command 'scan' should not be shadowed") + assert(!final_vars.include?(:processlist), "Command 'processlist' should not be shadowed") + + assert_match(/WARN: 'list' is a reserved HBase command/, err_output) + assert_match(/WARN: 'list_namespace' is a reserved HBase command/, err_output) + assert_match(/WARN: 'list_snapshots' is a reserved HBase command/, err_output) + assert_match(/WARN: 'scan' is a reserved HBase command/, err_output) + assert_match(/WARN: 'processlist' is a reserved HBase command/, err_output) + end end From 2ece30b8e06b8aa07f746912e896bcc6a3f9528c Mon Sep 17 00:00:00 2001 From: Jeongmin Ju Date: Tue, 7 Apr 2026 11:59:29 +0900 Subject: [PATCH 10/14] HBASE-30033 Scan.setFilter() should validate against existing batch setting (#7988) Scan.setBatch() validates that the scan does not have a filter with hasFilterRow()=true. However, Scan.setFilter() does not perform the reverse check. This allows creating an invalid Scan by calling setBatch() before setFilter(), bypassing the validation that setBatch() was designed to enforce. Added validation in setFilter() to throw IncompatibleFilterException when a filter with hasFilterRow()=true is set on a scan that already has batch configured. Signed-off-by: Junegunn Choi Signed-off-by: Pankaj Kumar Signed-off-by: Xiao Liu Reviewed-by: Vaibhav Joshi --- .../org/apache/hadoop/hbase/client/Scan.java | 4 +++ .../hadoop/hbase/client/TestOperation.java | 2 +- .../apache/hadoop/hbase/client/TestScan.java | 33 +++++++++++++++++++ 3 files changed, 38 insertions(+), 1 deletion(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java index c37ee1e35a5a..cb35f665d690 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -711,6 +711,10 @@ public Scan setMaxResultSize(long maxResultSize) { @Override public Scan setFilter(Filter filter) { + if (filter != null && filter.hasFilterRow() && this.batch > 0) { + throw new IncompatibleFilterException( + "Cannot set a filter that returns true for filter.hasFilterRow on a scan with batch set"); + } super.setFilter(filter); return this; } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java index 170a569d5baa..d019bba42169 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java @@ -370,7 +370,7 @@ public void testScanOperationToJSON() throws IOException { scan.setLimit(5); scan.setReadType(Scan.ReadType.PREAD); scan.setNeedCursorResult(true); - scan.setFilter(SCV_FILTER); + scan.setFilter(VALUE_FILTER); scan.setReplicaId(1); scan.setConsistency(Consistency.STRONG); scan.setLoadColumnFamiliesOnDemand(true); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java index 4bc1ad2c8b15..16bf7379decc 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java @@ -20,6 +20,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -30,6 +31,8 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Scan.ReadType; import org.apache.hadoop.hbase.filter.FilterList; +import org.apache.hadoop.hbase.filter.IncompatibleFilterException; +import org.apache.hadoop.hbase.filter.PageFilter; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -255,6 +258,36 @@ public void testScanCopyConstructor() throws Exception { "Make sure copy constructor adds all the fields in the copied object"); } + @Test + public void testSetFilterWithBatchThrows() { + Scan scan = new Scan(); + scan.setBatch(5); + assertThrows(IncompatibleFilterException.class, () -> scan.setFilter(new PageFilter(10))); + } + + @Test + public void testSetFilterWithoutBatchDoesNotThrow() { + Scan scan = new Scan(); + scan.setFilter(new PageFilter(10)); + // no exception expected + } + + @Test + public void testSetFilterWithBatchAndNonFilterRowFilter() { + Scan scan = new Scan(); + scan.setBatch(5); + scan.setFilter(new FilterList()); + // FilterList.hasFilterRow() returns false, so no exception expected + } + + @Test + public void testSetFilterWithBatchAndNullFilter() { + Scan scan = new Scan(); + scan.setBatch(5); + scan.setFilter(null); + // null filter should not throw + } + @Test public void testScanReadType() throws Exception { Scan scan = new Scan(); From 8a20115674858d55b8afbc004fd3b54ac39baffe Mon Sep 17 00:00:00 2001 From: Junegunn Choi Date: Wed, 8 Apr 2026 16:47:05 +0900 Subject: [PATCH 11/14] HBASE-30036 Skip redundant delete markers during flush and minor compaction (#7993) (#8035) Add DeleteTracker.isRedundantDelete() to detect when a delete marker is already covered by a previously tracked delete of equal or broader scope. ScanDeleteTracker implements this for all four delete types: - DeleteFamily/DeleteFamilyVersion: covered by a tracked DeleteFamily - DeleteColumn/Delete: covered by a tracked DeleteFamily or DeleteColumn MinorCompactionScanQueryMatcher calls this check before including a delete marker, returning SEEK_NEXT_COL to skip past all remaining cells covered by the previously tracked delete. Compatible with KEEP_DELETED_CELLS. When set to TRUE, trackDelete() does not populate the delete tracker, so isRedundantDelete() always returns false and all markers are retained. Signed-off-by: Charles Connell --- .../querymatcher/DeleteTracker.java | 14 +++ .../MinorCompactionScanQueryMatcher.java | 14 +++ .../querymatcher/ScanDeleteTracker.java | 22 ++++ .../regionserver/TestStoreFileWriter.java | 11 +- .../TestCompactionScanQueryMatcher.java | 116 ++++++++++++++++++ 5 files changed, 175 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java index 56ac265dd187..c6ddd443d35f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java @@ -83,6 +83,20 @@ enum DeleteResult { // deleted in strong semantics of versions(See MvccTracker) } + /** + * Check if the given delete marker is redundant, i.e., it is already covered by a previously + * tracked delete of equal or broader scope. A DeleteFamily is redundant if a DeleteFamily with a + * higher timestamp was already seen. A DeleteColumn is redundant if a DeleteColumn for the same + * qualifier with a higher timestamp, or a DeleteFamily with a higher timestamp, was already seen. + *

+ * This is a read-only check with no side effects on tracker state. + * @param cell the delete marker cell to check + * @return true if the delete marker is redundant and can be skipped + */ + default boolean isRedundantDelete(Cell cell) { + return false; + } + /** * Return the comparator passed to this delete tracker * @return the cell comparator diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java index 70e474e106b8..815249888d0c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java @@ -19,6 +19,7 @@ import java.io.IOException; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -47,6 +48,19 @@ public MatchCode match(Cell cell) throws IOException { // we should not use this delete marker to mask any cell yet. return MatchCode.INCLUDE; } + // Check before tracking: an older DeleteColumn or DeleteFamily is redundant if a newer + // one of equal or broader scope was already seen. Must check before trackDelete() since + // that overwrites tracker state. Seek past remaining cells for this column/row since + // they are all covered by the previously tracked delete. + if (deletes.isRedundantDelete(cell)) { + // Skip seeking for deletes with empty qualifier, not to skip a subsequent + // DeleteFamily marker that covers other qualifiers. DeleteFamily itself can seek + // safely because all remaining empty-qualifier cells are redundant under it. + if (cell.getQualifierLength() == 0 && typeByte != KeyValue.Type.DeleteFamily.getCode()) { + return MatchCode.SKIP; + } + return columns.getNextRowOrNextColumn(cell); + } trackDelete(cell); return MatchCode.INCLUDE; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java index 8fdee2da524e..90924be23049 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java @@ -142,6 +142,28 @@ public DeleteResult isDeleted(Cell cell) { return DeleteResult.NOT_DELETED; } + @Override + public boolean isRedundantDelete(Cell cell) { + byte type = cell.getTypeByte(); + boolean coveredByFamily = hasFamilyStamp && cell.getTimestamp() <= familyStamp; + + if ( + type == KeyValue.Type.DeleteFamily.getCode() + || type == KeyValue.Type.DeleteFamilyVersion.getCode() + ) { + return coveredByFamily; + } + + boolean coveredByColumn = + deleteCell != null && deleteType == KeyValue.Type.DeleteColumn.getCode() + && CellUtil.matchingQualifier(cell, deleteCell) && cell.getTimestamp() <= deleteTimestamp; + + if (type == KeyValue.Type.DeleteColumn.getCode() || type == KeyValue.Type.Delete.getCode()) { + return coveredByFamily || coveredByColumn; + } + return false; + } + @Override public boolean isEmpty() { return deleteCell == null && !hasFamilyStamp && familyVersionStamps.isEmpty(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileWriter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileWriter.java index 163e75da22e1..a70fe961db87 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileWriter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileWriter.java @@ -176,8 +176,15 @@ public void testCompactedFiles() throws Exception { stores[0].getStorefilesCount()); regions[1].compact(false); - assertEquals(flushCount - stores[1].getCompactedFiles().size() + 2, - stores[1].getStorefilesCount()); + // HBASE-30036 skips redundant delete markers during minor compaction, so the historical + // file may end up empty and not be created. The count can be +1 or +2. + int minorCompactedCount = stores[1].getStorefilesCount(); + int expectedMin = flushCount - stores[1].getCompactedFiles().size() + 1; + int expectedMax = flushCount - stores[1].getCompactedFiles().size() + 2; + assertTrue( + "Expected store file count between " + expectedMin + " and " + expectedMax + " but was " + + minorCompactedCount, + minorCompactedCount >= expectedMin && minorCompactedCount <= expectedMax); verifyCells(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestCompactionScanQueryMatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestCompactionScanQueryMatcher.java index 8c8a6ed7a930..fd1a4d84444b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestCompactionScanQueryMatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestCompactionScanQueryMatcher.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import static org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode.INCLUDE; +import static org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode.SEEK_NEXT_COL; import static org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode.SKIP; import static org.junit.Assert.assertEquals; @@ -74,6 +75,121 @@ public void testMatch_PartialRangeDropDeletes() throws Exception { testDropDeletes(row2, row3, new byte[][] { row1, row1 }, INCLUDE, INCLUDE); } + /** + * Test redundant delete marker handling with COMPACT_RETAIN_DELETES. Cells are auto-generated + * from the given types with decrementing timestamps. + */ + @Test + public void testSkipsRedundantDeleteMarkers() throws IOException { + // Interleaved DeleteColumn + Put. First DC included, put triggers SEEK_NEXT_COL. + assertRetainDeletes(new Type[] { Type.DeleteColumn, Type.Put, Type.DeleteColumn }, INCLUDE, + SEEK_NEXT_COL); + + // Contiguous DeleteColumn. First included, rest redundant. + assertRetainDeletes(new Type[] { Type.DeleteColumn, Type.DeleteColumn, Type.DeleteColumn }, + INCLUDE, SEEK_NEXT_COL, SEEK_NEXT_COL); + + // Contiguous DeleteFamily. First included, rest redundant. + assertRetainDeletes(new Type[] { Type.DeleteFamily, Type.DeleteFamily, Type.DeleteFamily }, + INCLUDE, SEEK_NEXT_COL, SEEK_NEXT_COL); + + // DF + DFV interleaved. DF included, DFV redundant (SKIP because empty qualifier), + // older DF redundant (SEEK_NEXT_COL), older DFV redundant (SKIP). + assertRetainDeletes(new Type[] { Type.DeleteFamily, Type.DeleteFamilyVersion, Type.DeleteFamily, + Type.DeleteFamilyVersion }, INCLUDE, SKIP, SEEK_NEXT_COL, SKIP); + + // Delete (version) covered by DeleteColumn. + assertRetainDeletes(new Type[] { Type.DeleteColumn, Type.Delete, Type.Delete, Type.Delete }, + INCLUDE, SEEK_NEXT_COL, SEEK_NEXT_COL, SEEK_NEXT_COL); + + // KEEP_DELETED_CELLS=TRUE: all markers retained. + assertRetainDeletes(KeepDeletedCells.TRUE, + new Type[] { Type.DeleteColumn, Type.DeleteColumn, Type.DeleteColumn }, INCLUDE, INCLUDE, + INCLUDE); + } + + /** + * Redundant column-level deletes with empty qualifier must not seek past a subsequent + * DeleteFamily. getKeyForNextColumn treats empty qualifier as "no column" and returns + * SEEK_NEXT_ROW, which would skip the DF and all remaining cells in the row. + */ + @Test + public void testEmptyQualifierDeleteDoesNotSkipDeleteFamily() throws IOException { + byte[] emptyQualifier = HConstants.EMPTY_BYTE_ARRAY; + + // DC(empty) + DC(empty) redundant + DF must still be reachable. + assertRetainDeletes(emptyQualifier, + new Type[] { Type.DeleteColumn, Type.DeleteColumn, Type.DeleteFamily }, INCLUDE, SKIP, + INCLUDE); + + // DC(empty) + Delete(empty) redundant + DF must still be reachable. + assertRetainDeletes(emptyQualifier, + new Type[] { Type.DeleteColumn, Type.Delete, Type.DeleteFamily }, INCLUDE, SKIP, INCLUDE); + } + + private void assertRetainDeletes(Type[] types, MatchCode... expected) throws IOException { + assertRetainDeletes(KeepDeletedCells.FALSE, types, expected); + } + + private void assertRetainDeletes(byte[] qualifier, Type[] types, MatchCode... expected) + throws IOException { + assertRetainDeletes(KeepDeletedCells.FALSE, qualifier, types, expected); + } + + /** + * Build cells from the given types with decrementing timestamps (same ts for adjacent + * family-level and column-level types at the same position). Family-level types (DeleteFamily, + * DeleteFamilyVersion) use empty qualifier; others use col1. + */ + private void assertRetainDeletes(KeepDeletedCells keepDeletedCells, Type[] types, + MatchCode... expected) throws IOException { + assertRetainDeletes(keepDeletedCells, null, types, expected); + } + + /** + * Build cells from the given types with decrementing timestamps. If qualifier is null, + * family-level types use empty qualifier and others use col1. If qualifier is specified, all + * types use that qualifier. + */ + private void assertRetainDeletes(KeepDeletedCells keepDeletedCells, byte[] qualifier, + Type[] types, MatchCode... expected) throws IOException { + long now = EnvironmentEdgeManager.currentTime(); + ScanInfo scanInfo = new ScanInfo(this.conf, fam1, 0, 1, ttl, keepDeletedCells, + HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false); + CompactionScanQueryMatcher qm = + CompactionScanQueryMatcher.create(scanInfo, ScanType.COMPACT_RETAIN_DELETES, 0L, + HConstants.OLDEST_TIMESTAMP, HConstants.OLDEST_TIMESTAMP, now, null, null, null); + qm.setToNewRow(KeyValueUtil.createFirstOnRow(row1)); + + long ts = now; + List actual = new ArrayList<>(expected.length); + for (int i = 0; i < types.length; i++) { + byte[] qual; + if (qualifier != null) { + qual = qualifier; + } else { + boolean familyLevel = types[i] == Type.DeleteFamily || types[i] == Type.DeleteFamilyVersion; + qual = familyLevel ? HConstants.EMPTY_BYTE_ARRAY : col1; + } + KeyValue kv = types[i] == Type.Put + ? new KeyValue(row1, fam1, qual, ts, types[i], data) + : new KeyValue(row1, fam1, qual, ts, types[i]); + actual.add(qm.match(kv)); + if (actual.size() >= expected.length) { + break; + } + // Decrement ts for next cell, but keep same ts when the next type has lower type code + // at the same logical position (e.g. DF then DFV at the same timestamp). + if (i + 1 < types.length && types[i + 1].getCode() < types[i].getCode()) { + continue; + } + ts--; + } + for (int i = 0; i < expected.length; i++) { + assertEquals("Mismatch at index " + i, expected[i], actual.get(i)); + } + } + private void testDropDeletes(byte[] from, byte[] to, byte[][] rows, MatchCode... expected) throws IOException { long now = EnvironmentEdgeManager.currentTime(); From ad252280f85e03f61ecb9296b0d1563fbac52496 Mon Sep 17 00:00:00 2001 From: Xiao Liu Date: Thu, 2 Apr 2026 10:23:58 +0800 Subject: [PATCH 12/14] HBASE-29970 SplitSuccess and SplitTime metrics are no longer used at RegionServer and Table level (#7866) (#8005) Signed-off-by: Duo Zhang (cherry picked from commit 65d99e6915940e4be0bcadf8c6dad5d7c725a400) --- .../MetricsRegionServerSource.java | 14 -------------- .../regionserver/MetricsTableSource.java | 11 ----------- .../MetricsRegionServerSourceImpl.java | 14 -------------- .../regionserver/MetricsTableSourceImpl.java | 19 ------------------- .../regionserver/MetricsRegionServer.java | 13 ++++--------- .../hbase/regionserver/MetricsTable.java | 8 -------- .../hbase/regionserver/SplitRequest.java | 2 +- .../regionserver/TestMetricsRegionServer.java | 6 ++++++ .../TestMetricsTableAggregate.java | 7 +++++++ 9 files changed, 18 insertions(+), 76 deletions(-) diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java index 8f3de3e3929b..a4c126b83686 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java @@ -150,22 +150,11 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo */ void incrSlowAppend(); - /** - * Update the split transaction time histogram - * @param t time it took, in milliseconds - */ - void updateSplitTime(long t); - /** * Increment number of a requested splits */ void incrSplitRequest(); - /** - * Increment number of successful splits - */ - void incrSplitSuccess(); - /** * Update the flush time histogram * @param t time it took, in milliseconds @@ -557,11 +546,8 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String BLOCKED_REQUESTS_COUNT_DESC = "The number of blocked requests because of memstore size is " + "larger than blockingMemStoreSize"; - String SPLIT_KEY = "splitTime"; String SPLIT_REQUEST_KEY = "splitRequestCount"; String SPLIT_REQUEST_DESC = "Number of splits requested"; - String SPLIT_SUCCESS_KEY = "splitSuccessCount"; - String SPLIT_SUCCESS_DESC = "Number of successfully executed splits"; String FLUSH_TIME = "flushTime"; String FLUSH_TIME_DESC = "Histogram for the time in millis for memstore flush"; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java index b65457a87147..99d2ca5469a4 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java @@ -45,22 +45,11 @@ public interface MetricsTableSource extends Comparable, Clos */ MetricsTableAggregateSource getAggregateSource(); - /** - * Update the split transaction time histogram - * @param t time it took, in milliseconds - */ - void updateSplitTime(long t); - /** * Increment number of a requested splits */ void incrSplitRequest(); - /** - * Increment number of successful splits - */ - void incrSplitSuccess(); - /** * Update the flush time histogram * @param t time it took, in milliseconds diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java index 681e0617e26b..4a0ddcfc9c0e 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java @@ -63,8 +63,6 @@ public class MetricsRegionServerSourceImpl extends BaseSourceImpl // split related metrics private final MutableFastCounter splitRequest; - private final MutableFastCounter splitSuccess; - private final MetricHistogram splitTimeHisto; // flush related metrics private final MetricHistogram flushTimeHisto; @@ -182,9 +180,7 @@ public MetricsRegionServerSourceImpl(String metricsName, String metricsDescripti majorCompactedOutputBytes = getMetricsRegistry().newCounter(MAJOR_COMPACTED_OUTPUT_BYTES, MAJOR_COMPACTED_OUTPUT_BYTES_DESC, 0L); - splitTimeHisto = getMetricsRegistry().newTimeHistogram(SPLIT_KEY); splitRequest = getMetricsRegistry().newCounter(SPLIT_REQUEST_KEY, SPLIT_REQUEST_DESC, 0L); - splitSuccess = getMetricsRegistry().newCounter(SPLIT_SUCCESS_KEY, SPLIT_SUCCESS_DESC, 0L); // pause monitor metrics infoPauseThresholdExceeded = @@ -280,16 +276,6 @@ public void incrSplitRequest() { splitRequest.incr(); } - @Override - public void incrSplitSuccess() { - splitSuccess.incr(); - } - - @Override - public void updateSplitTime(long t) { - splitTimeHisto.add(t); - } - @Override public void updateFlushTime(long t) { flushTimeHisto.add(t); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java index 05e07e95ae6c..ed2c08f3dccb 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java @@ -55,11 +55,8 @@ import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.MAJOR_COMPACTION_OUTPUT_SIZE_DESC; import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.MAJOR_COMPACTION_TIME; import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.MAJOR_COMPACTION_TIME_DESC; -import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.SPLIT_KEY; import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.SPLIT_REQUEST_DESC; import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.SPLIT_REQUEST_KEY; -import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.SPLIT_SUCCESS_DESC; -import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.SPLIT_SUCCESS_KEY; import java.util.Map; import java.util.Map.Entry; @@ -101,8 +98,6 @@ public class MetricsTableSourceImpl implements MetricsTableSource { // split related metrics private MutableFastCounter splitRequest; - private MutableFastCounter splitSuccess; - private MetricHistogram splitTimeHisto; // flush related metrics private MetricHistogram flushTimeHisto; @@ -186,9 +181,7 @@ public synchronized void registerMetrics() { majorCompactedOutputBytes = registry.newCounter(tableNamePrefix + MAJOR_COMPACTED_OUTPUT_BYTES, MAJOR_COMPACTED_OUTPUT_BYTES_DESC, 0L); - splitTimeHisto = registry.newTimeHistogram(tableNamePrefix + SPLIT_KEY); splitRequest = registry.newCounter(tableNamePrefix + SPLIT_REQUEST_KEY, SPLIT_REQUEST_DESC, 0L); - splitSuccess = registry.newCounter(tableNamePrefix + SPLIT_SUCCESS_KEY, SPLIT_SUCCESS_DESC, 0L); } private void deregisterMetrics() { @@ -211,9 +204,7 @@ private void deregisterMetrics() { registry.removeHistogramMetrics(tableNamePrefix + MAJOR_COMPACTION_OUTPUT_SIZE); registry.removeMetric(tableNamePrefix + MAJOR_COMPACTED_INPUT_BYTES); registry.removeMetric(tableNamePrefix + MAJOR_COMPACTED_OUTPUT_BYTES); - registry.removeHistogramMetrics(tableNamePrefix + SPLIT_KEY); registry.removeMetric(tableNamePrefix + SPLIT_REQUEST_KEY); - registry.removeMetric(tableNamePrefix + SPLIT_SUCCESS_KEY); } @Override @@ -424,16 +415,6 @@ public void incrSplitRequest() { splitRequest.incr(); } - @Override - public void incrSplitSuccess() { - splitSuccess.incr(); - } - - @Override - public void updateSplitTime(long t) { - splitTimeHisto.add(t); - } - @Override public void updateFlushTime(long t) { flushTimeHisto.add(t); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java index 580f77874992..6cccc8e45bb9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java @@ -210,16 +210,11 @@ public void updateScan(HRegion region, long time, long responseCellSize, long bl userAggregate.updateScan(time, blockBytesScanned); } - public void updateSplitTime(long t) { - serverSource.updateSplitTime(t); - } - - public void incrSplitRequest() { + public void incrSplitRequest(String table) { serverSource.incrSplitRequest(); - } - - public void incrSplitSuccess() { - serverSource.incrSplitSuccess(); + if (table != null) { + metricsTable.incrSplitRequest(table); + } } public void updateFlush(String table, long t, long memstoreSize, long fileSize) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTable.java index 48d06ad26a89..427fd6a0b64b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTable.java @@ -43,14 +43,6 @@ public void incrSplitRequest(String table) { tableSourceAgg.getOrCreateTableSource(table, wrapper).incrSplitRequest(); } - public void incrSplitSuccess(String table) { - tableSourceAgg.getOrCreateTableSource(table, wrapper).incrSplitSuccess(); - } - - public void updateSplitTime(String table, long t) { - tableSourceAgg.getOrCreateTableSource(table, wrapper).updateSplitTime(t); - } - public void updateFlushTime(String table, long t) { tableSourceAgg.getOrCreateTableSource(table, wrapper).updateFlushTime(t); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java index d979a3ac82e2..c16a993e8729 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java @@ -58,7 +58,7 @@ public String toString() { } private void doSplitting() { - server.getMetrics().incrSplitRequest(); + server.getMetrics().incrSplitRequest(this.parent.getTable().getNameAsString()); if (user != null && user.getUGI() != null) { user.getUGI().doAs((PrivilegedAction) () -> { requestRegionSplit(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java index f7ada213d98c..b99c93261a37 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java @@ -364,4 +364,10 @@ public void testThrottleExceptionMetricsIntegration() { "RpcThrottlingException_Type_ReadCapacityUnitExceeded_User_unknown_Table_unknown", 1L, serverSource); } + + @Test + public void testSplitRequest() { + rsm.incrSplitRequest(null); + HELPER.assertCounter("splitRequestCount", 1, serverSource); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableAggregate.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableAggregate.java index bd01808e6bb7..bee3fe7c363c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableAggregate.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableAggregate.java @@ -161,6 +161,13 @@ public void testCompaction() { HELPER.assertCounter(pre + "majorCompactedoutputBytes", 500, agg); } + @Test + public void testSplitRequest() { + rsm.incrSplitRequest(null); + rsm.incrSplitRequest(tableName); + HELPER.assertCounter(pre + "splitRequestCount", 1, agg); + } + private void update(AtomicBoolean succ, int round, CyclicBarrier barrier) { try { for (int i = 0; i < round; i++) { From 8224e6a1764e73fcc55c24e22d2234585403a038 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 28 Mar 2026 22:05:32 +0800 Subject: [PATCH 13/14] HBASE-29770 Exclude commons-logging from HBase (#7539) (#7990) Co-authored-by: Istvan Toth Signed-off-by: Nihal Jain Signed-off-by: Xiao Liu Reviewed-by: Shanmukha Haripriya Kota (cherry picked from commit a07d8b52b79fcd352212308647371dde5966d54d) (cherry picked from commit 9ed6fe05c4474200d2ea339fb946b5bb778ae162) --- hbase-rsgroup/pom.xml | 1 + pom.xml | 88 ++++++++++++++++++++++++++++++++++++++----- 2 files changed, 79 insertions(+), 10 deletions(-) diff --git a/hbase-rsgroup/pom.xml b/hbase-rsgroup/pom.xml index 79fb5101d710..44dd21838ec1 100644 --- a/hbase-rsgroup/pom.xml +++ b/hbase-rsgroup/pom.xml @@ -178,6 +178,7 @@ org.junit.vintage junit-vintage-engine + test diff --git a/pom.xml b/pom.xml index 0626934465ed..2a9512a952cc 100644 --- a/pom.xml +++ b/pom.xml @@ -1229,6 +1229,12 @@ org.apache.httpcomponents httpclient ${httpclient.version} + + + commons-logging + commons-logging + + org.apache.httpcomponents @@ -1240,6 +1246,17 @@ commons-codec ${commons-codec.version} + + commons-validator + commons-validator + ${commons-validator.version} + + + commons-logging + commons-logging + + + commons-io commons-io @@ -1260,14 +1277,6 @@ commons-cli ${commons-cli.version} - - - commons-logging - commons-logging - 1.2 - org.apache.zookeeper zookeeper @@ -2194,8 +2203,7 @@ commons-logging:commons-logging - We don't use commons-logging any more, so do not depend on it directly. - false + We don't use commons-logging any more, so do not depend on it directly. We are also using jcl-over-slf4j for libraries so exclude any transitive commons-logging dependencies. @@ -3472,6 +3480,10 @@ org.slf4j slf4j-reload4j + + commons-logging + commons-logging + @@ -3523,6 +3535,10 @@ com.codahale.metrics metrics-core + + commons-logging + commons-logging + @@ -3560,6 +3576,10 @@ com.codahale.metrics metrics-core + + commons-logging + commons-logging + @@ -3616,6 +3636,10 @@ com.codahale.metrics metrics-core + + commons-logging + commons-logging + @@ -3671,6 +3695,10 @@ org.slf4j slf4j-reload4j + + commons-logging + commons-logging + @@ -3728,6 +3756,10 @@ org.slf4j slf4j-reload4j + + commons-logging + commons-logging + @@ -3826,6 +3858,10 @@ org.slf4j slf4j-reload4j + + commons-logging + commons-logging + @@ -3891,6 +3927,10 @@ org.slf4j slf4j-reload4j + + commons-logging + commons-logging + @@ -3924,6 +3964,10 @@ org.slf4j slf4j-reload4j + + commons-logging + commons-logging +