diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index b2ba6e51c774..29f036323a7e 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -1,66 +1,66 @@
-name: ReleaseWorkflow
-# - Gets artifacts from S3
-# - Sends it to JFROG Artifactory
-# - Adds them to the release assets
+# name: ReleaseWorkflow
+# # - Gets artifacts from S3
+# # - Sends it to JFROG Artifactory
+# # - Adds them to the release assets
-on: # yamllint disable-line rule:truthy
- release:
- types:
- - published
+# on: # yamllint disable-line rule:truthy
+# release:
+# types:
+# - published
-jobs:
- ReleasePublish:
- runs-on: [self-hosted, style-checker]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- JFROG_API_KEY=${{ secrets.JFROG_KEY_API_PACKAGES }}
- TEMP_PATH=${{runner.temp}}/release_packages
- REPO_COPY=${{runner.temp}}/release_packages/ClickHouse
- EOF
- - name: Check out repository code
- uses: actions/checkout@v2
- with:
- # Always use the most recent script version
- ref: master
- - name: Download packages and push to Artifactory
- run: |
- rm -rf "$TEMP_PATH" && mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY"
- python3 ./tests/ci/push_to_artifactory.py --release "${{ github.ref }}" \
- --commit '${{ github.sha }}' --all
- - name: Upload packages to release assets
- uses: svenstaro/upload-release-action@v2
- with:
- repo_token: ${{ secrets.GITHUB_TOKEN }}
- file: ${{runner.temp}}/push_to_artifactory/*
- overwrite: true
- tag: ${{ github.ref }}
- file_glob: true
- ############################################################################################
- ##################################### Docker images #######################################
- ############################################################################################
- DockerServerImages:
- runs-on: [self-hosted, style-checker]
- steps:
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- with:
- fetch-depth: 0 # otherwise we will have no version info
- - name: Check docker clickhouse/clickhouse-server building
- run: |
- cd "$GITHUB_WORKSPACE/tests/ci"
- python3 docker_server.py --release-type auto --version "${{ github.ref }}"
- python3 docker_server.py --release-type auto --version "${{ github.ref }}" --no-ubuntu \
- --image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- - name: Cleanup
- if: always()
- run: |
- docker kill "$(docker ps -q)" ||:
- docker rm -f "$(docker ps -a -q)" ||:
- sudo rm -fr "$TEMP_PATH"
+# jobs:
+# ReleasePublish:
+# runs-on: [self-hosted, style-checker]
+# steps:
+# - name: Set envs
+# run: |
+# cat >> "$GITHUB_ENV" << 'EOF'
+# JFROG_API_KEY=${{ secrets.JFROG_KEY_API_PACKAGES }}
+# TEMP_PATH=${{runner.temp}}/release_packages
+# REPO_COPY=${{runner.temp}}/release_packages/ClickHouse
+# EOF
+# - name: Check out repository code
+# uses: actions/checkout@v2
+# with:
+# # Always use the most recent script version
+# ref: master
+# - name: Download packages and push to Artifactory
+# run: |
+# rm -rf "$TEMP_PATH" && mkdir -p "$TEMP_PATH"
+# cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+# cd "$REPO_COPY"
+# python3 ./tests/ci/push_to_artifactory.py --release "${{ github.ref }}" \
+# --commit '${{ github.sha }}' --all
+# - name: Upload packages to release assets
+# uses: svenstaro/upload-release-action@v2
+# with:
+# repo_token: ${{ secrets.GITHUB_TOKEN }}
+# file: ${{runner.temp}}/push_to_artifactory/*
+# overwrite: true
+# tag: ${{ github.ref }}
+# file_glob: true
+# ############################################################################################
+# ##################################### Docker images #######################################
+# ############################################################################################
+# DockerServerImages:
+# runs-on: [self-hosted, style-checker]
+# steps:
+# - name: Clear repository
+# run: |
+# sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
+# - name: Check out repository code
+# uses: actions/checkout@v2
+# with:
+# fetch-depth: 0 # otherwise we will have no version info
+# - name: Check docker clickhouse/clickhouse-server building
+# run: |
+# cd "$GITHUB_WORKSPACE/tests/ci"
+# python3 docker_server.py --release-type auto --version "${{ github.ref }}"
+# python3 docker_server.py --release-type auto --version "${{ github.ref }}" --no-ubuntu \
+# --image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
+# - name: Cleanup
+# if: always()
+# run: |
+# docker kill "$(docker ps -q)" ||:
+# docker rm -f "$(docker ps -a -q)" ||:
+# sudo rm -fr "$TEMP_PATH"
diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml
index 272aabf86286..d0175ae02166 100644
--- a/.github/workflows/release_branches.yml
+++ b/.github/workflows/release_branches.yml
@@ -5,11 +5,20 @@ env:
PYTHONUNBUFFERED: 1
on: # yamllint disable-line rule:truthy
- push:
+ pull_request:
+ types:
+ - synchronize
+ - reopened
+ - opened
branches:
- # 22.1 and 22.10
- - '2[1-9].[1-9][0-9]'
- - '2[1-9].[1-9]'
+ - '**/22.3*'
+ release:
+ types:
+ - published
+ # push:
+ # branches:
+ # # Anything/22.3 (e.g customizations/22.3)
+ # - '**/22.3*'
jobs:
DockerHubPushAarch64:
@@ -108,9 +117,9 @@ jobs:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
-#########################################################################################
-#################################### ORDINARY BUILDS ####################################
-#########################################################################################
+ #########################################################################################
+ #################################### ORDINARY BUILDS ####################################
+ #########################################################################################
BuilderDebRelease:
needs: [DockerHubPush]
runs-on: [self-hosted, builder]
@@ -124,6 +133,7 @@ jobs:
CACHES_PATH=${{runner.temp}}/../ccaches
CHECK_NAME=ClickHouse build check (actions)
BUILD_NAME=package_release
+ CLICKHOUSE_STABLE_VERSION_SUFFIX=altinitystable
EOF
- name: Download changed images
uses: actions/download-artifact@v2
@@ -133,6 +143,8 @@ jobs:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
+ - name: Trust My Directory
+ run: git config --global --add safe.directory * # https://stackoverflow.com/a/71940133
- name: Check out repository code
uses: actions/checkout@v2
with:
@@ -159,1432 +171,30 @@ jobs:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
- BuilderDebAarch64:
- needs: [DockerHubPush]
- runs-on: [self-hosted, builder]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/build_check
- IMAGES_PATH=${{runner.temp}}/images_path
- REPO_COPY=${{runner.temp}}/build_check/ClickHouse
- CACHES_PATH=${{runner.temp}}/../ccaches
- CHECK_NAME=ClickHouse build check (actions)
- BUILD_NAME=package_aarch64
- EOF
- - name: Download changed images
- uses: actions/download-artifact@v2
- with:
- name: changed_images
- path: ${{ runner.temp }}/images_path
- - name: Check out repository code
- uses: actions/checkout@v2
- with:
- fetch-depth: 0 # otherwise we will have no info about contributors
- - name: Build
- run: |
- git -C "$GITHUB_WORKSPACE" submodule sync --recursive
- git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
- - name: Upload build URLs to artifacts
- uses: actions/upload-artifact@v2
- with:
- name: ${{ env.BUILD_URLS }}
- path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
- BuilderDebAsan:
- needs: [DockerHubPush]
- runs-on: [self-hosted, builder]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/build_check
- IMAGES_PATH=${{runner.temp}}/images_path
- REPO_COPY=${{runner.temp}}/build_check/ClickHouse
- CACHES_PATH=${{runner.temp}}/../ccaches
- CHECK_NAME=ClickHouse build check (actions)
- BUILD_NAME=package_asan
- EOF
- - name: Download changed images
- uses: actions/download-artifact@v2
- with:
- name: changed_images
- path: ${{ env.IMAGES_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- with:
- fetch-depth: 0 # otherwise we will have no info about contributors
- - name: Build
- run: |
- git -C "$GITHUB_WORKSPACE" submodule sync --recursive
- git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
- - name: Upload build URLs to artifacts
- if: ${{ success() || failure() }}
- uses: actions/upload-artifact@v2
- with:
- name: ${{ env.BUILD_URLS }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
- BuilderDebUBsan:
- needs: [DockerHubPush]
- runs-on: [self-hosted, builder]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/build_check
- IMAGES_PATH=${{runner.temp}}/images_path
- REPO_COPY=${{runner.temp}}/build_check/ClickHouse
- CACHES_PATH=${{runner.temp}}/../ccaches
- CHECK_NAME=ClickHouse build check (actions)
- BUILD_NAME=package_ubsan
- EOF
- - name: Download changed images
- uses: actions/download-artifact@v2
- with:
- name: changed_images
- path: ${{ env.IMAGES_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- with:
- fetch-depth: 0 # otherwise we will have no info about contributors
- - name: Build
- run: |
- git -C "$GITHUB_WORKSPACE" submodule sync --recursive
- git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
- - name: Upload build URLs to artifacts
- if: ${{ success() || failure() }}
- uses: actions/upload-artifact@v2
- with:
- name: ${{ env.BUILD_URLS }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
- BuilderDebTsan:
- needs: [DockerHubPush]
- runs-on: [self-hosted, builder]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/build_check
- IMAGES_PATH=${{runner.temp}}/images_path
- REPO_COPY=${{runner.temp}}/build_check/ClickHouse
- CACHES_PATH=${{runner.temp}}/../ccaches
- CHECK_NAME=ClickHouse build check (actions)
- BUILD_NAME=package_tsan
- EOF
- - name: Download changed images
- uses: actions/download-artifact@v2
- with:
- name: changed_images
- path: ${{ env.IMAGES_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- with:
- fetch-depth: 0 # otherwise we will have no info about contributors
- - name: Build
- run: |
- git -C "$GITHUB_WORKSPACE" submodule sync --recursive
- git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
- - name: Upload build URLs to artifacts
- if: ${{ success() || failure() }}
- uses: actions/upload-artifact@v2
- with:
- name: ${{ env.BUILD_URLS }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
- BuilderDebMsan:
- needs: [DockerHubPush]
- runs-on: [self-hosted, builder]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/build_check
- IMAGES_PATH=${{runner.temp}}/images_path
- REPO_COPY=${{runner.temp}}/build_check/ClickHouse
- CACHES_PATH=${{runner.temp}}/../ccaches
- CHECK_NAME=ClickHouse build check (actions)
- BUILD_NAME=package_msan
- EOF
- - name: Download changed images
- uses: actions/download-artifact@v2
- with:
- name: changed_images
- path: ${{ env.IMAGES_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- with:
- fetch-depth: 0 # otherwise we will have no info about contributors
- - name: Build
- run: |
- git -C "$GITHUB_WORKSPACE" submodule sync --recursive
- git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
- - name: Upload build URLs to artifacts
- if: ${{ success() || failure() }}
- uses: actions/upload-artifact@v2
- with:
- name: ${{ env.BUILD_URLS }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
- BuilderDebDebug:
- needs: [DockerHubPush]
- runs-on: [self-hosted, builder]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/build_check
- IMAGES_PATH=${{runner.temp}}/images_path
- REPO_COPY=${{runner.temp}}/build_check/ClickHouse
- CACHES_PATH=${{runner.temp}}/../ccaches
- CHECK_NAME=ClickHouse build check (actions)
- BUILD_NAME=package_debug
- EOF
- - name: Download changed images
- uses: actions/download-artifact@v2
- with:
- name: changed_images
- path: ${{ env.IMAGES_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- with:
- fetch-depth: 0 # otherwise we will have no info about contributors
- - name: Build
- run: |
- git -C "$GITHUB_WORKSPACE" submodule sync --recursive
- git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
- - name: Upload build URLs to artifacts
- if: ${{ success() || failure() }}
- uses: actions/upload-artifact@v2
- with:
- name: ${{ env.BUILD_URLS }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
-############################################################################################
-##################################### Docker images #######################################
-############################################################################################
- DockerServerImages:
- needs:
- - BuilderDebRelease
- - BuilderDebAarch64
- runs-on: [self-hosted, style-checker]
- steps:
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- with:
- fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
- - name: Check docker clickhouse/clickhouse-server building
- run: |
- cd "$GITHUB_WORKSPACE/tests/ci"
- python3 docker_server.py --release-type head --no-push
- python3 docker_server.py --release-type head --no-push --no-ubuntu \
- --image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
-############################################################################################
-##################################### BUILD REPORTER #######################################
-############################################################################################
- BuilderReport:
- needs:
- - BuilderDebRelease
- - BuilderDebAarch64
- - BuilderDebAsan
- - BuilderDebTsan
- - BuilderDebUBsan
- - BuilderDebMsan
- - BuilderDebDebug
- runs-on: [self-hosted, style-checker]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- CHECK_NAME=ClickHouse build check (actions)
- REPORTS_PATH=${{runner.temp}}/reports_dir
- REPORTS_PATH=${{runner.temp}}/reports_dir
- TEMP_PATH=${{runner.temp}}/report_check
- NEEDS_DATA_PATH=${{runner.temp}}/needs.json
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Report Builder
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cat > "$NEEDS_DATA_PATH" << 'EOF'
- ${{ toJSON(needs) }}
- EOF
- cd "$GITHUB_WORKSPACE/tests/ci"
- python3 build_report_check.py "$CHECK_NAME"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
-##############################################################################################
-########################### FUNCTIONAl STATELESS TESTS #######################################
-##############################################################################################
- FunctionalStatelessTestRelease:
- needs: [BuilderDebRelease]
- runs-on: [self-hosted, func-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stateless_debug
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stateless tests (release, actions)
- REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
- KILL_TIMEOUT=10800
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Functional test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- FunctionalStatelessTestAarch64:
- needs: [BuilderDebAarch64]
- runs-on: [self-hosted, func-tester-aarch64]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stateless_release
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stateless tests (aarch64, actions)
- REPO_COPY=${{runner.temp}}/stateless_release/ClickHouse
- KILL_TIMEOUT=10800
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Functional test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- FunctionalStatelessTestAsan0:
- needs: [BuilderDebAsan]
- runs-on: [self-hosted, func-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stateless_debug
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stateless tests (address, actions)
- REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
- KILL_TIMEOUT=10800
- RUN_BY_HASH_NUM=0
- RUN_BY_HASH_TOTAL=2
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Functional test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- FunctionalStatelessTestAsan1:
- needs: [BuilderDebAsan]
- runs-on: [self-hosted, func-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stateless_debug
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stateless tests (address, actions)
- REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
- KILL_TIMEOUT=10800
- RUN_BY_HASH_NUM=1
- RUN_BY_HASH_TOTAL=2
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Functional test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- FunctionalStatelessTestTsan0:
- needs: [BuilderDebTsan]
- runs-on: [self-hosted, func-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stateless_tsan
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stateless tests (thread, actions)
- REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
- KILL_TIMEOUT=10800
- RUN_BY_HASH_NUM=0
- RUN_BY_HASH_TOTAL=3
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Functional test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- FunctionalStatelessTestTsan1:
- needs: [BuilderDebTsan]
- runs-on: [self-hosted, func-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stateless_tsan
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stateless tests (thread, actions)
- REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
- KILL_TIMEOUT=10800
- RUN_BY_HASH_NUM=1
- RUN_BY_HASH_TOTAL=3
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Functional test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- FunctionalStatelessTestTsan2:
- needs: [BuilderDebTsan]
- runs-on: [self-hosted, func-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stateless_tsan
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stateless tests (thread, actions)
- REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
- KILL_TIMEOUT=10800
- RUN_BY_HASH_NUM=2
- RUN_BY_HASH_TOTAL=3
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Functional test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- FunctionalStatelessTestUBsan:
- needs: [BuilderDebUBsan]
- runs-on: [self-hosted, func-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stateless_ubsan
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stateless tests (ubsan, actions)
- REPO_COPY=${{runner.temp}}/stateless_ubsan/ClickHouse
- KILL_TIMEOUT=10800
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Functional test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- FunctionalStatelessTestMsan0:
- needs: [BuilderDebMsan]
- runs-on: [self-hosted, func-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stateless_memory
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stateless tests (memory, actions)
- REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
- KILL_TIMEOUT=10800
- RUN_BY_HASH_NUM=0
- RUN_BY_HASH_TOTAL=3
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Functional test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- FunctionalStatelessTestMsan1:
- needs: [BuilderDebMsan]
- runs-on: [self-hosted, func-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stateless_memory
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stateless tests (memory, actions)
- REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
- KILL_TIMEOUT=10800
- RUN_BY_HASH_NUM=1
- RUN_BY_HASH_TOTAL=3
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Functional test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- FunctionalStatelessTestMsan2:
- needs: [BuilderDebMsan]
- runs-on: [self-hosted, func-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stateless_memory
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stateless tests (memory, actions)
- REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
- KILL_TIMEOUT=10800
- RUN_BY_HASH_NUM=2
- RUN_BY_HASH_TOTAL=3
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Functional test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- FunctionalStatelessTestDebug0:
- needs: [BuilderDebDebug]
- runs-on: [self-hosted, func-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stateless_debug
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stateless tests (debug, actions)
- REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
- KILL_TIMEOUT=10800
- RUN_BY_HASH_NUM=0
- RUN_BY_HASH_TOTAL=3
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Functional test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- FunctionalStatelessTestDebug1:
- needs: [BuilderDebDebug]
- runs-on: [self-hosted, func-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stateless_debug
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stateless tests (debug, actions)
- REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
- KILL_TIMEOUT=10800
- RUN_BY_HASH_NUM=1
- RUN_BY_HASH_TOTAL=3
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Functional test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- FunctionalStatelessTestDebug2:
- needs: [BuilderDebDebug]
- runs-on: [self-hosted, func-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stateless_debug
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stateless tests (debug, actions)
- REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
- KILL_TIMEOUT=10800
- RUN_BY_HASH_NUM=2
- RUN_BY_HASH_TOTAL=3
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Functional test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
-##############################################################################################
-############################ FUNCTIONAl STATEFUL TESTS #######################################
-##############################################################################################
- FunctionalStatefulTestRelease:
- needs: [BuilderDebRelease]
- runs-on: [self-hosted, func-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stateful_debug
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stateful tests (release, actions)
- REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse
- KILL_TIMEOUT=3600
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Functional test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- FunctionalStatefulTestAarch64:
- needs: [BuilderDebAarch64]
- runs-on: [self-hosted, func-tester-aarch64]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stateful_release
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stateful tests (aarch64, actions)
- REPO_COPY=${{runner.temp}}/stateful_release/ClickHouse
- KILL_TIMEOUT=3600
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Functional test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- FunctionalStatefulTestAsan:
- needs: [BuilderDebAsan]
- runs-on: [self-hosted, func-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stateful_debug
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stateful tests (address, actions)
- REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse
- KILL_TIMEOUT=3600
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Functional test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- FunctionalStatefulTestTsan:
- needs: [BuilderDebTsan]
- runs-on: [self-hosted, func-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stateful_tsan
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stateful tests (thread, actions)
- REPO_COPY=${{runner.temp}}/stateful_tsan/ClickHouse
- KILL_TIMEOUT=3600
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Functional test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- FunctionalStatefulTestMsan:
- needs: [BuilderDebMsan]
- runs-on: [self-hosted, func-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stateful_msan
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stateful tests (memory, actions)
- REPO_COPY=${{runner.temp}}/stateful_msan/ClickHouse
- KILL_TIMEOUT=3600
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Functional test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- FunctionalStatefulTestUBsan:
- needs: [BuilderDebUBsan]
- runs-on: [self-hosted, func-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stateful_ubsan
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stateful tests (ubsan, actions)
- REPO_COPY=${{runner.temp}}/stateful_ubsan/ClickHouse
- KILL_TIMEOUT=3600
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Functional test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- FunctionalStatefulTestDebug:
- needs: [BuilderDebDebug]
- runs-on: [self-hosted, func-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stateful_debug
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stateful tests (debug, actions)
- REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse
- KILL_TIMEOUT=3600
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Functional test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
-##############################################################################################
-######################################### STRESS TESTS #######################################
-##############################################################################################
- StressTestAsan:
- needs: [BuilderDebAsan]
- runs-on: [self-hosted, stress-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stress_thread
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stress test (address, actions)
- REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Stress test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 stress_check.py "$CHECK_NAME"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- StressTestTsan:
- needs: [BuilderDebTsan]
- # func testers have 16 cores + 128 GB memory
- # while stress testers have 36 cores + 72 memory
- # It would be better to have something like 32 + 128,
- # but such servers almost unavailable as spot instances.
- runs-on: [self-hosted, func-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stress_thread
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stress test (thread, actions)
- REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Stress test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 stress_check.py "$CHECK_NAME"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- StressTestMsan:
- needs: [BuilderDebMsan]
- runs-on: [self-hosted, stress-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stress_memory
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stress test (memory, actions)
- REPO_COPY=${{runner.temp}}/stress_memory/ClickHouse
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Stress test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 stress_check.py "$CHECK_NAME"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- StressTestUBsan:
- needs: [BuilderDebUBsan]
- runs-on: [self-hosted, stress-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stress_undefined
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stress test (undefined, actions)
- REPO_COPY=${{runner.temp}}/stress_undefined/ClickHouse
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Stress test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 stress_check.py "$CHECK_NAME"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- StressTestDebug:
- needs: [BuilderDebDebug]
- runs-on: [self-hosted, stress-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/stress_debug
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Stress test (debug, actions)
- REPO_COPY=${{runner.temp}}/stress_debug/ClickHouse
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Stress test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 stress_check.py "$CHECK_NAME"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
-#############################################################################################
-############################# INTEGRATION TESTS #############################################
-#############################################################################################
- IntegrationTestsAsan0:
- needs: [BuilderDebAsan]
- runs-on: [self-hosted, stress-tester]
+ git -C "$GITHUB_WORKSPACE" submodule sync --recursive
+ git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
+ ############################################################################################
+##################################### Docker images #######################################
+############################################################################################
+ DockerServerImages:
+ needs:
+ - BuilderDebRelease
+ - BuilderDebAarch64
+ runs-on: [self-hosted, style-checker]
steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/integration_tests_asan
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Integration tests (asan, actions)
- REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
- RUN_BY_HASH_NUM=0
- RUN_BY_HASH_TOTAL=3
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
- - name: Integration test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 integration_test_check.py "$CHECK_NAME"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- IntegrationTestsAsan1:
- needs: [BuilderDebAsan]
- runs-on: [self-hosted, stress-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/integration_tests_asan
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Integration tests (asan, actions)
- REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
- RUN_BY_HASH_NUM=1
- RUN_BY_HASH_TOTAL=3
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Integration test
+ fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
+ - name: Check docker clickhouse/clickhouse-server building
run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 integration_test_check.py "$CHECK_NAME"
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ python3 docker_server.py --release-type head --no-push
+ python3 docker_server.py --release-type head --no-push --no-ubuntu \
+ --image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- name: Cleanup
if: always()
run: |
@@ -1593,57 +203,22 @@ jobs:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
- IntegrationTestsAsan2:
- needs: [BuilderDebAsan]
- runs-on: [self-hosted, stress-tester]
+############################################################################################
+ ##################################### BUILD REPORTER #######################################
+ ############################################################################################
+ BuilderReport:
+ needs:
+ - BuilderDebRelease
+ runs-on: [self-hosted, style-checker]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/integration_tests_asan
+ CHECK_NAME=ClickHouse build check (actions)
REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Integration tests (asan, actions)
- REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
- RUN_BY_HASH_NUM=2
- RUN_BY_HASH_TOTAL=3
- EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Integration test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 integration_test_check.py "$CHECK_NAME"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- IntegrationTestsTsan0:
- needs: [BuilderDebTsan]
- runs-on: [self-hosted, stress-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/integration_tests_tsan
REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Integration tests (thread, actions)
- REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
- RUN_BY_HASH_NUM=0
- RUN_BY_HASH_TOTAL=4
+ TEMP_PATH=${{runner.temp}}/report_check
+ NEEDS_DATA_PATH=${{runner.temp}}/needs.json
EOF
- name: Download json reports
uses: actions/download-artifact@v2
@@ -1654,51 +229,15 @@ jobs:
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
- - name: Integration test
+ - name: Report Builder
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 integration_test_check.py "$CHECK_NAME"
- - name: Cleanup
- if: always()
- run: |
- # shellcheck disable=SC2046
- docker kill $(docker ps -q) ||:
- # shellcheck disable=SC2046
- docker rm -f $(docker ps -a -q) ||:
- sudo rm -fr "$TEMP_PATH"
- IntegrationTestsTsan1:
- needs: [BuilderDebTsan]
- runs-on: [self-hosted, stress-tester]
- steps:
- - name: Set envs
- run: |
- cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/integration_tests_tsan
- REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Integration tests (thread, actions)
- REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
- RUN_BY_HASH_NUM=1
- RUN_BY_HASH_TOTAL=4
+ cat > "$NEEDS_DATA_PATH" << 'EOF'
+ ${{ toJSON(needs) }}
EOF
- - name: Download json reports
- uses: actions/download-artifact@v2
- with:
- path: ${{ env.REPORTS_PATH }}
- - name: Clear repository
- run: |
- sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- - name: Check out repository code
- uses: actions/checkout@v2
- - name: Integration test
- run: |
- sudo rm -fr "$TEMP_PATH"
- mkdir -p "$TEMP_PATH"
- cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci"
- python3 integration_test_check.py "$CHECK_NAME"
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ python3 build_report_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
@@ -1707,19 +246,21 @@ jobs:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
- IntegrationTestsTsan2:
- needs: [BuilderDebTsan]
- runs-on: [self-hosted, stress-tester]
+ ##############################################################################################
+ ########################### FUNCTIONAl STATELESS TESTS #######################################
+ ##############################################################################################
+ FunctionalStatelessTestRelease:
+ needs: [BuilderDebRelease]
+ runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/integration_tests_tsan
+ TEMP_PATH=${{runner.temp}}/stateless_debug
REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Integration tests (thread, actions)
- REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
- RUN_BY_HASH_NUM=2
- RUN_BY_HASH_TOTAL=4
+ CHECK_NAME=Stateless tests (release, actions)
+ REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
+ KILL_TIMEOUT=10800
EOF
- name: Download json reports
uses: actions/download-artifact@v2
@@ -1730,13 +271,13 @@ jobs:
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
- - name: Integration test
+ - name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
- python3 integration_test_check.py "$CHECK_NAME"
+ python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
@@ -1745,19 +286,21 @@ jobs:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
- IntegrationTestsTsan3:
- needs: [BuilderDebTsan]
- runs-on: [self-hosted, stress-tester]
+ ##############################################################################################
+ ############################ FUNCTIONAl STATEFUL TESTS #######################################
+ ##############################################################################################
+ FunctionalStatefulTestRelease:
+ needs: [BuilderDebRelease]
+ runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
- TEMP_PATH=${{runner.temp}}/integration_tests_tsan
+ TEMP_PATH=${{runner.temp}}/stateful_debug
REPORTS_PATH=${{runner.temp}}/reports_dir
- CHECK_NAME=Integration tests (thread, actions)
- REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
- RUN_BY_HASH_NUM=3
- RUN_BY_HASH_TOTAL=4
+ CHECK_NAME=Stateful tests (release, actions)
+ REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse
+ KILL_TIMEOUT=3600
EOF
- name: Download json reports
uses: actions/download-artifact@v2
@@ -1768,13 +311,13 @@ jobs:
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
- - name: Integration test
+ - name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
- python3 integration_test_check.py "$CHECK_NAME"
+ python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
@@ -1783,6 +326,9 @@ jobs:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
+ #############################################################################################
+ ############################# INTEGRATION TESTS #############################################
+ #############################################################################################
IntegrationTestsRelease0:
needs: [BuilderDebRelease]
runs-on: [self-hosted, stress-tester]
@@ -1864,41 +410,10 @@ jobs:
- DockerHubPush
- DockerServerImages
- BuilderReport
- - FunctionalStatelessTestDebug0
- - FunctionalStatelessTestDebug1
- - FunctionalStatelessTestDebug2
- FunctionalStatelessTestRelease
- - FunctionalStatelessTestAarch64
- - FunctionalStatelessTestAsan0
- - FunctionalStatelessTestAsan1
- - FunctionalStatelessTestTsan0
- - FunctionalStatelessTestTsan1
- - FunctionalStatelessTestTsan2
- - FunctionalStatelessTestMsan0
- - FunctionalStatelessTestMsan1
- - FunctionalStatelessTestMsan2
- - FunctionalStatelessTestUBsan
- - FunctionalStatefulTestDebug
- FunctionalStatefulTestRelease
- - FunctionalStatefulTestAarch64
- - FunctionalStatefulTestAsan
- - FunctionalStatefulTestTsan
- - FunctionalStatefulTestMsan
- - FunctionalStatefulTestUBsan
- - StressTestDebug
- - StressTestAsan
- - StressTestTsan
- - StressTestMsan
- - StressTestUBsan
- - IntegrationTestsAsan0
- - IntegrationTestsAsan1
- - IntegrationTestsAsan2
- IntegrationTestsRelease0
- IntegrationTestsRelease1
- - IntegrationTestsTsan0
- - IntegrationTestsTsan1
- - IntegrationTestsTsan2
- - IntegrationTestsTsan3
- CompatibilityCheck
runs-on: [self-hosted, style-checker]
steps:
diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt
index 305902936cdf..3c235b85c3c7 100644
--- a/cmake/autogenerated_versions.txt
+++ b/cmake/autogenerated_versions.txt
@@ -7,6 +7,8 @@ SET(VERSION_MAJOR 22)
SET(VERSION_MINOR 3)
SET(VERSION_PATCH 10)
SET(VERSION_GITHASH 7976930b82eed26e8728897d530e044774e0cded)
-SET(VERSION_DESCRIBE v22.3.10.19-lts)
-SET(VERSION_STRING 22.3.10.19)
+SET(VERSION_TWEAK 23)
+SET(VERSION_FLAVOUR altinitystable)
+SET(VERSION_DESCRIBE v22.3.10.23-altinitystable)
+SET(VERSION_STRING 22.3.10.23.altinitystable)
# end of autochange
diff --git a/cmake/version.cmake b/cmake/version.cmake
index acaa772ff2ff..d785da5fe9b1 100644
--- a/cmake/version.cmake
+++ b/cmake/version.cmake
@@ -19,5 +19,5 @@ set (VERSION_STRING_SHORT "${VERSION_MAJOR}.${VERSION_MINOR}")
math (EXPR VERSION_INTEGER "${VERSION_PATCH} + ${VERSION_MINOR}*1000 + ${VERSION_MAJOR}*1000000")
if(CLICKHOUSE_OFFICIAL_BUILD)
- set(VERSION_OFFICIAL " (official build)")
+ set(VERSION_OFFICIAL " (altinity build)")
endif()
diff --git a/docker/images.json b/docker/images.json
index 06d689e8f7cc..baf88e9e610e 100644
--- a/docker/images.json
+++ b/docker/images.json
@@ -1,164 +1,149 @@
{
"docker/packager/deb": {
- "name": "clickhouse/deb-builder",
+ "name": "altinityinfra/deb-builder",
"dependent": []
},
"docker/packager/binary": {
- "name": "clickhouse/binary-builder",
+ "name": "altinityinfra/binary-builder",
"dependent": [
"docker/test/split_build_smoke_test",
"docker/test/codebrowser"
]
},
"docker/test/compatibility/centos": {
- "name": "clickhouse/test-old-centos",
+ "name": "altinityinfra/test-old-centos",
"dependent": []
},
"docker/test/compatibility/ubuntu": {
- "name": "clickhouse/test-old-ubuntu",
+ "name": "altinityinfra/test-old-ubuntu",
"dependent": []
},
"docker/test/integration/base": {
- "name": "clickhouse/integration-test",
+ "name": "altinityinfra/integration-test",
"dependent": []
},
"docker/test/fuzzer": {
- "name": "clickhouse/fuzzer",
+ "name": "altinityinfra/fuzzer",
"dependent": []
},
"docker/test/performance-comparison": {
- "name": "clickhouse/performance-comparison",
+ "name": "altinityinfra/performance-comparison",
"dependent": []
},
"docker/test/util": {
- "name": "clickhouse/test-util",
+ "name": "altinityinfra/test-util",
"dependent": [
"docker/test/base",
"docker/test/fasttest"
]
},
"docker/test/stateless": {
- "name": "clickhouse/stateless-test",
+ "name": "altinityinfra/stateless-test",
"dependent": [
"docker/test/stateful",
"docker/test/unit"
]
},
"docker/test/stateful": {
- "name": "clickhouse/stateful-test",
+ "name": "altinityinfra/stateful-test",
"dependent": [
"docker/test/stress"
]
},
"docker/test/unit": {
- "name": "clickhouse/unit-test",
+ "name": "altinityinfra/unit-test",
"dependent": []
},
"docker/test/stress": {
- "name": "clickhouse/stress-test",
+ "name": "altinityinfra/stress-test",
"dependent": []
},
"docker/test/split_build_smoke_test": {
- "name": "clickhouse/split-build-smoke-test",
+ "name": "altinityinfra/split-build-smoke-test",
"dependent": []
},
"docker/test/codebrowser": {
- "name": "clickhouse/codebrowser",
+ "name": "altinityinfra/codebrowser",
"dependent": []
},
"docker/test/integration/runner": {
"only_amd64": true,
- "name": "clickhouse/integration-tests-runner",
+ "name": "altinityinfra/integration-tests-runner",
"dependent": []
},
"docker/test/testflows/runner": {
- "name": "clickhouse/testflows-runner",
+ "name": "altinityinfra/testflows-runner",
"dependent": []
},
"docker/test/fasttest": {
- "name": "clickhouse/fasttest",
+ "name": "altinityinfra/fasttest",
"dependent": []
},
"docker/test/style": {
- "name": "clickhouse/style-test",
+ "name": "altinityinfra/style-test",
"dependent": []
},
"docker/test/integration/s3_proxy": {
- "name": "clickhouse/s3-proxy",
+ "name": "altinityinfra/s3-proxy",
"dependent": []
},
"docker/test/integration/resolver": {
- "name": "clickhouse/python-bottle",
+ "name": "altinityinfra/python-bottle",
"dependent": []
},
"docker/test/integration/helper_container": {
- "name": "clickhouse/integration-helper",
+ "name": "altinityinfra/integration-helper",
"dependent": []
},
"docker/test/integration/mysql_golang_client": {
- "name": "clickhouse/mysql-golang-client",
+ "name": "altinityinfra/mysql-golang-client",
"dependent": []
},
"docker/test/integration/dotnet_client": {
- "name": "clickhouse/dotnet-client",
+ "name": "altinityinfra/dotnet-client",
"dependent": []
},
"docker/test/integration/mysql_java_client": {
- "name": "clickhouse/mysql-java-client",
+ "name": "altinityinfra/mysql-java-client",
"dependent": []
},
"docker/test/integration/mysql_js_client": {
- "name": "clickhouse/mysql-js-client",
+ "name": "altinityinfra/mysql-js-client",
"dependent": []
},
"docker/test/integration/mysql_php_client": {
- "name": "clickhouse/mysql-php-client",
+ "name": "altinityinfra/mysql-php-client",
"dependent": []
},
"docker/test/integration/postgresql_java_client": {
- "name": "clickhouse/postgresql-java-client",
+ "name": "altinityinfra/postgresql-java-client",
"dependent": []
},
"docker/test/integration/kerberos_kdc": {
"only_amd64": true,
- "name": "clickhouse/kerberos-kdc",
+ "name": "altinityinfra/kerberos-kdc",
"dependent": []
},
"docker/test/base": {
- "name": "clickhouse/test-base",
- "dependent": [
+ "name": "altinityinfra/test-base",
+ "dependent": [
"docker/test/stateless",
"docker/test/integration/base",
"docker/test/fuzzer",
"docker/test/keeper-jepsen"
- ]
+ ]
},
"docker/test/integration/kerberized_hadoop": {
"only_amd64": true,
- "name": "clickhouse/kerberized-hadoop",
+ "name": "altinityinfra/kerberized-hadoop",
"dependent": []
},
"docker/test/sqlancer": {
- "name": "clickhouse/sqlancer-test",
+ "name": "altinityinfra/sqlancer-test",
"dependent": []
},
"docker/test/keeper-jepsen": {
- "name": "clickhouse/keeper-jepsen-test",
- "dependent": []
- },
- "docker/docs/builder": {
- "name": "clickhouse/docs-builder",
- "dependent": [
- "docker/docs/check",
- "docker/docs/release"
- ]
- },
- "docker/docs/check": {
- "name": "clickhouse/docs-check",
- "dependent": []
- },
- "docker/docs/release": {
- "name": "clickhouse/docs-release",
+ "name": "altinityinfra/keeper-jepsen-test",
"dependent": []
}
}
diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh
index 943b92abdda1..7ed58b3d82a1 100755
--- a/docker/packager/binary/build.sh
+++ b/docker/packager/binary/build.sh
@@ -19,6 +19,9 @@ ln -sf darwin-x86_64 build/cmake/toolchain/darwin-aarch64
# export CCACHE_LOGFILE=/build/ccache.log
# export CCACHE_DEBUG=1
+# https://stackoverflow.com/a/71940133
+git config --global --add safe.directory '*'
+
mkdir -p build/build_docker
cd build/build_docker
rm -f CMakeCache.txt
diff --git a/docker/packager/packager b/docker/packager/packager
index f82d402d613a..6235ee7ad10e 100755
--- a/docker/packager/packager
+++ b/docker/packager/packager
@@ -331,7 +331,7 @@ if __name__ == "__main__":
args.output_dir = os.path.abspath(os.path.join(os.getcwd(), args.output_dir))
image_type = "binary" if args.package_type == "performance" else args.package_type
- image_name = "clickhouse/binary-builder"
+ image_name = "altinityinfra/binary-builder"
if not os.path.isabs(args.clickhouse_repo_path):
ch_root = os.path.abspath(os.path.join(os.getcwd(), args.clickhouse_repo_path))
diff --git a/docker/test/base/Dockerfile b/docker/test/base/Dockerfile
index 6beab2e5bb70..58fa01241e1f 100644
--- a/docker/test/base/Dockerfile
+++ b/docker/test/base/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
-# docker build -t clickhouse/test-base .
+# docker build -t altinityinfra/test-base .
ARG FROM_TAG=latest
-FROM clickhouse/test-util:$FROM_TAG
+FROM altinityinfra/test-util:$FROM_TAG
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
diff --git a/docker/test/codebrowser/Dockerfile b/docker/test/codebrowser/Dockerfile
index 97f3f54ad987..86147635373f 100644
--- a/docker/test/codebrowser/Dockerfile
+++ b/docker/test/codebrowser/Dockerfile
@@ -2,7 +2,7 @@
# docker build --network=host -t clickhouse/codebrowser .
# docker run --volume=path_to_repo:/repo_folder --volume=path_to_result:/test_output clickhouse/codebrowser
ARG FROM_TAG=latest
-FROM clickhouse/binary-builder:$FROM_TAG
+FROM altinityinfra/binary-builder:$FROM_TAG
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
diff --git a/docker/test/fasttest/Dockerfile b/docker/test/fasttest/Dockerfile
index 46b74d89e13f..f61b0d11057f 100644
--- a/docker/test/fasttest/Dockerfile
+++ b/docker/test/fasttest/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/fasttest .
ARG FROM_TAG=latest
-FROM clickhouse/test-util:$FROM_TAG
+FROM altinityinfra/test-util:$FROM_TAG
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
diff --git a/docker/test/fuzzer/Dockerfile b/docker/test/fuzzer/Dockerfile
index eb4b09c173f6..2aec54bd1719 100644
--- a/docker/test/fuzzer/Dockerfile
+++ b/docker/test/fuzzer/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/fuzzer .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
diff --git a/docker/test/integration/base/Dockerfile b/docker/test/integration/base/Dockerfile
index 9b6318a5426b..ff5c4b2982b5 100644
--- a/docker/test/integration/base/Dockerfile
+++ b/docker/test/integration/base/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/integration-test .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
SHELL ["/bin/bash", "-c"]
diff --git a/docker/test/integration/mysql_php_client/Dockerfile b/docker/test/integration/mysql_php_client/Dockerfile
index 0fb77bf8ffb7..55db4d15a7f3 100644
--- a/docker/test/integration/mysql_php_client/Dockerfile
+++ b/docker/test/integration/mysql_php_client/Dockerfile
@@ -1,7 +1,7 @@
# docker build -t clickhouse/mysql-php-client .
# MySQL PHP client docker container
-FROM php:7.3-cli
+FROM php:8.0.18-cli
COPY ./client.crt client.crt
COPY ./client.key client.key
diff --git a/docker/test/integration/runner/compose/docker_compose_dotnet_client.yml b/docker/test/integration/runner/compose/docker_compose_dotnet_client.yml
index b63dac51522c..e5746fa209fb 100644
--- a/docker/test/integration/runner/compose/docker_compose_dotnet_client.yml
+++ b/docker/test/integration/runner/compose/docker_compose_dotnet_client.yml
@@ -1,6 +1,6 @@
version: '2.3'
services:
dotnet1:
- image: clickhouse/dotnet-client:${DOCKER_DOTNET_CLIENT_TAG:-latest}
+ image: altinityinfra/dotnet-client:${DOCKER_DOTNET_CLIENT_TAG:-latest}
# to keep container running
command: sleep infinity
diff --git a/docker/test/integration/runner/compose/docker_compose_keeper.yml b/docker/test/integration/runner/compose/docker_compose_keeper.yml
index 134ffbff1f74..375003d5e14f 100644
--- a/docker/test/integration/runner/compose/docker_compose_keeper.yml
+++ b/docker/test/integration/runner/compose/docker_compose_keeper.yml
@@ -1,7 +1,7 @@
version: '2.3'
services:
zoo1:
- image: ${image:-clickhouse/integration-test}
+ image: ${image:-altinityinfra/integration-test}
restart: always
user: ${user:-}
volumes:
@@ -31,7 +31,7 @@ services:
- inet6
- rotate
zoo2:
- image: ${image:-clickhouse/integration-test}
+ image: ${image:-altinityinfra/integration-test}
restart: always
user: ${user:-}
volumes:
@@ -61,7 +61,7 @@ services:
- inet6
- rotate
zoo3:
- image: ${image:-clickhouse/integration-test}
+ image: ${image:-altinityinfra/integration-test}
restart: always
user: ${user:-}
volumes:
diff --git a/docker/test/integration/runner/compose/docker_compose_kerberized_hdfs.yml b/docker/test/integration/runner/compose/docker_compose_kerberized_hdfs.yml
index e1b4d393169a..365821b3f5ea 100644
--- a/docker/test/integration/runner/compose/docker_compose_kerberized_hdfs.yml
+++ b/docker/test/integration/runner/compose/docker_compose_kerberized_hdfs.yml
@@ -4,7 +4,7 @@ services:
kerberizedhdfs1:
cap_add:
- DAC_READ_SEARCH
- image: clickhouse/kerberized-hadoop:${DOCKER_KERBERIZED_HADOOP_TAG:-latest}
+ image: altinityinfra/kerberized-hadoop:${DOCKER_KERBERIZED_HADOOP_TAG:-latest}
hostname: kerberizedhdfs1
restart: always
volumes:
@@ -22,7 +22,7 @@ services:
entrypoint: /etc/bootstrap.sh -d
hdfskerberos:
- image: clickhouse/kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}
+ image: altinityinfra/kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}
hostname: hdfskerberos
volumes:
- ${KERBERIZED_HDFS_DIR}/secrets:/tmp/keytab
diff --git a/docker/test/integration/runner/compose/docker_compose_kerberized_kafka.yml b/docker/test/integration/runner/compose/docker_compose_kerberized_kafka.yml
index d57e4e4d5bea..8dbdd9c74c0c 100644
--- a/docker/test/integration/runner/compose/docker_compose_kerberized_kafka.yml
+++ b/docker/test/integration/runner/compose/docker_compose_kerberized_kafka.yml
@@ -50,7 +50,7 @@ services:
- label:disable
kafka_kerberos:
- image: clickhouse/kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}
+ image: altinityinfra/kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}
hostname: kafka_kerberos
volumes:
- ${KERBERIZED_KAFKA_DIR}/secrets:/tmp/keytab
diff --git a/docker/test/integration/runner/compose/docker_compose_minio.yml b/docker/test/integration/runner/compose/docker_compose_minio.yml
index 6e8c826b2346..438f3486e177 100644
--- a/docker/test/integration/runner/compose/docker_compose_minio.yml
+++ b/docker/test/integration/runner/compose/docker_compose_minio.yml
@@ -21,14 +21,14 @@ services:
# HTTP proxies for Minio.
proxy1:
- image: clickhouse/s3-proxy
+ image: altinityinfra/s3-proxy
expose:
- "8080" # Redirect proxy port
- "80" # Reverse proxy port
- "443" # Reverse proxy port (secure)
proxy2:
- image: clickhouse/s3-proxy
+ image: altinityinfra/s3-proxy
expose:
- "8080"
- "80"
@@ -36,7 +36,7 @@ services:
# Empty container to run proxy resolver.
resolver:
- image: clickhouse/python-bottle
+ image: altinityinfra/python-bottle
expose:
- "8080"
tty: true
diff --git a/docker/test/integration/runner/compose/docker_compose_mysql_golang_client.yml b/docker/test/integration/runner/compose/docker_compose_mysql_golang_client.yml
index 56cc04105740..09154b584244 100644
--- a/docker/test/integration/runner/compose/docker_compose_mysql_golang_client.yml
+++ b/docker/test/integration/runner/compose/docker_compose_mysql_golang_client.yml
@@ -1,6 +1,6 @@
version: '2.3'
services:
golang1:
- image: clickhouse/mysql-golang-client:${DOCKER_MYSQL_GOLANG_CLIENT_TAG:-latest}
+ image: altinityinfra/mysql-golang-client:${DOCKER_MYSQL_GOLANG_CLIENT_TAG:-latest}
# to keep container running
command: sleep infinity
diff --git a/docker/test/integration/runner/compose/docker_compose_mysql_java_client.yml b/docker/test/integration/runner/compose/docker_compose_mysql_java_client.yml
index eb5ffb01baa2..a84cef915df2 100644
--- a/docker/test/integration/runner/compose/docker_compose_mysql_java_client.yml
+++ b/docker/test/integration/runner/compose/docker_compose_mysql_java_client.yml
@@ -1,6 +1,6 @@
version: '2.3'
services:
java1:
- image: clickhouse/mysql-java-client:${DOCKER_MYSQL_JAVA_CLIENT_TAG:-latest}
+ image: altinityinfra/mysql-java-client:${DOCKER_MYSQL_JAVA_CLIENT_TAG:-latest}
# to keep container running
command: sleep infinity
diff --git a/docker/test/integration/runner/compose/docker_compose_mysql_js_client.yml b/docker/test/integration/runner/compose/docker_compose_mysql_js_client.yml
index 90939449c5f3..b46eb2706c47 100644
--- a/docker/test/integration/runner/compose/docker_compose_mysql_js_client.yml
+++ b/docker/test/integration/runner/compose/docker_compose_mysql_js_client.yml
@@ -1,6 +1,6 @@
version: '2.3'
services:
mysqljs1:
- image: clickhouse/mysql-js-client:${DOCKER_MYSQL_JS_CLIENT_TAG:-latest}
+ image: altinityinfra/mysql-js-client:${DOCKER_MYSQL_JS_CLIENT_TAG:-latest}
# to keep container running
command: sleep infinity
diff --git a/docker/test/integration/runner/compose/docker_compose_mysql_php_client.yml b/docker/test/integration/runner/compose/docker_compose_mysql_php_client.yml
index 408b8ff089a9..662783a00a1f 100644
--- a/docker/test/integration/runner/compose/docker_compose_mysql_php_client.yml
+++ b/docker/test/integration/runner/compose/docker_compose_mysql_php_client.yml
@@ -1,6 +1,6 @@
version: '2.3'
services:
php1:
- image: clickhouse/mysql-php-client:${DOCKER_MYSQL_PHP_CLIENT_TAG:-latest}
+ image: altinityinfra/mysql-php-client:${DOCKER_MYSQL_PHP_CLIENT_TAG:-latest}
# to keep container running
command: sleep infinity
diff --git a/docker/test/integration/runner/compose/docker_compose_postgresql_java_client.yml b/docker/test/integration/runner/compose/docker_compose_postgresql_java_client.yml
index 904bfffdfd5b..5c8673ae3eeb 100644
--- a/docker/test/integration/runner/compose/docker_compose_postgresql_java_client.yml
+++ b/docker/test/integration/runner/compose/docker_compose_postgresql_java_client.yml
@@ -1,6 +1,6 @@
version: '2.2'
services:
java:
- image: clickhouse/postgresql-java-client:${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG:-latest}
+ image: altinityinfra/postgresql-java-client:${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG:-latest}
# to keep container running
command: sleep infinity
diff --git a/docker/test/keeper-jepsen/Dockerfile b/docker/test/keeper-jepsen/Dockerfile
index a794e076ec02..b93b07189012 100644
--- a/docker/test/keeper-jepsen/Dockerfile
+++ b/docker/test/keeper-jepsen/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/keeper-jepsen-test .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
ENV DEBIAN_FRONTEND=noninteractive
ENV CLOJURE_VERSION=1.10.3.814
diff --git a/docker/test/split_build_smoke_test/Dockerfile b/docker/test/split_build_smoke_test/Dockerfile
index 5f84eb42216c..cb41859fb118 100644
--- a/docker/test/split_build_smoke_test/Dockerfile
+++ b/docker/test/split_build_smoke_test/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
-# docker build -t clickhouse/split-build-smoke-test .
+# docker build -t altinityinfra/split-build-smoke-test .
ARG FROM_TAG=latest
-FROM clickhouse/binary-builder:$FROM_TAG
+FROM altinityinfra/binary-builder:$FROM_TAG
COPY run.sh /run.sh
COPY process_split_build_smoke_test_result.py /
diff --git a/docker/test/stateful/Dockerfile b/docker/test/stateful/Dockerfile
index 543cf113b2b2..a68168b1271f 100644
--- a/docker/test/stateful/Dockerfile
+++ b/docker/test/stateful/Dockerfile
@@ -1,13 +1,16 @@
# rebuild in #33610
# docker build -t clickhouse/stateful-test .
ARG FROM_TAG=latest
-FROM clickhouse/stateless-test:$FROM_TAG
+# TODO consider replacing clickhouse with altinityinfra dockerhub account
+FROM altinityinfra/stateless-test:$FROM_TAG
RUN apt-get update -y \
&& env DEBIAN_FRONTEND=noninteractive \
apt-get install --yes --no-install-recommends \
python3-requests \
- llvm-9
+ llvm-9 \
+ rpm2cpio \
+ cpio
COPY s3downloader /s3downloader
@@ -17,8 +20,7 @@ ENV EXPORT_S3_STORAGE_POLICIES=1
# Download Minio-related binaries
RUN arch=${TARGETARCH:-amd64} \
- && wget "https://dl.min.io/server/minio/release/linux-${arch}/minio" \
- && chmod +x ./minio \
+ && wget "https://dl.min.io/server/minio/release/linux-${arch}/archive/minio-20220103182258.0.0.x86_64.rpm" \
&& wget "https://dl.min.io/client/mc/release/linux-${arch}/mc" \
&& chmod +x ./mc
ENV MINIO_ROOT_USER="clickhouse"
diff --git a/docker/test/stateful/setup_minio.sh b/docker/test/stateful/setup_minio.sh
index 5758d905197b..d077dea920c6 100755
--- a/docker/test/stateful/setup_minio.sh
+++ b/docker/test/stateful/setup_minio.sh
@@ -9,6 +9,10 @@
set -e -x -a -u
+rpm2cpio ./minio-20220103182258.0.0.x86_64.rpm | cpio -i --make-directories
+find -name minio
+cp ./usr/local/bin/minio ./
+
ls -lha
mkdir -p ./minio_data
@@ -27,12 +31,19 @@ fi
MINIO_ROOT_USER=${MINIO_ROOT_USER:-clickhouse}
MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD:-clickhouse}
+./minio --version
./minio server --address ":11111" ./minio_data &
+i=0
while ! curl -v --silent http://localhost:11111 2>&1 | grep AccessDenied
do
+ if [[ $i == 60 ]]; then
+ echo "Failed to setup minio"
+ exit 0
+ fi
echo "Trying to connect to minio"
sleep 1
+ i=$((i + 1))
done
lsof -i :11111
diff --git a/docker/test/stateless/Dockerfile b/docker/test/stateless/Dockerfile
index 68c08c23b3ff..975a1da8d36e 100644
--- a/docker/test/stateless/Dockerfile
+++ b/docker/test/stateless/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/stateless-test .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz"
@@ -32,7 +32,9 @@ RUN apt-get update -y \
mysql-client=8.0* \
postgresql-client \
sqlite3 \
- awscli
+ awscli \
+ rpm2cpio \
+ cpio
RUN pip3 install numpy scipy pandas Jinja2
@@ -53,8 +55,7 @@ ARG TARGETARCH
# Download Minio-related binaries
RUN arch=${TARGETARCH:-amd64} \
- && wget "https://dl.min.io/server/minio/release/linux-${arch}/minio" \
- && chmod +x ./minio \
+ && wget "https://dl.min.io/server/minio/release/linux-${arch}/archive/minio-20220103182258.0.0.x86_64.rpm" \
&& wget "https://dl.min.io/client/mc/release/linux-${arch}/mc" \
&& chmod +x ./mc
diff --git a/docker/test/stateless/setup_minio.sh b/docker/test/stateless/setup_minio.sh
index df27b21b05b7..031a54639e92 100755
--- a/docker/test/stateless/setup_minio.sh
+++ b/docker/test/stateless/setup_minio.sh
@@ -16,21 +16,32 @@ if [ ! -f ./minio ]; then
BINARY_TYPE=$(uname -s | tr '[:upper:]' '[:lower:]')
- wget "https://dl.min.io/server/minio/release/${BINARY_TYPE}-amd64/minio" \
- && chmod +x ./minio \
+ wget "https://dl.min.io/server/minio/release/${BINARY_TYPE}-amd64/archive/minio-20220103182258.0.0.x86_64.rpm" \
&& wget "https://dl.min.io/client/mc/release/${BINARY_TYPE}-amd64/mc" \
&& chmod +x ./mc
fi
+rpm2cpio ./minio-20220103182258.0.0.x86_64.rpm | cpio -i --make-directories
+find -name minio
+cp ./usr/local/bin/minio ./
+
MINIO_ROOT_USER=${MINIO_ROOT_USER:-clickhouse}
MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD:-clickhouse}
+./minio --version
+
./minio server --address ":11111" ./minio_data &
+i=0
while ! curl -v --silent http://localhost:11111 2>&1 | grep AccessDenied
do
+ if [[ $i == 60 ]]; then
+ echo "Failed to setup minio"
+ exit 0
+ fi
echo "Trying to connect to minio"
sleep 1
+ i=$((i + 1))
done
lsof -i :11111
diff --git a/docker/test/stateless_pytest/Dockerfile b/docker/test/stateless_pytest/Dockerfile
index 789ee0e9b308..c148b6212417 100644
--- a/docker/test/stateless_pytest/Dockerfile
+++ b/docker/test/stateless_pytest/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/stateless-pytest .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
RUN apt-get update -y && \
apt-get install -y --no-install-recommends \
diff --git a/docker/test/stress/Dockerfile b/docker/test/stress/Dockerfile
index 393508fd551b..4f6834fff737 100644
--- a/docker/test/stress/Dockerfile
+++ b/docker/test/stress/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/stress-test .
ARG FROM_TAG=latest
-FROM clickhouse/stateful-test:$FROM_TAG
+FROM altinityinfra/stateful-test:$FROM_TAG
RUN apt-get update -y \
&& env DEBIAN_FRONTEND=noninteractive \
diff --git a/docker/test/unit/Dockerfile b/docker/test/unit/Dockerfile
index b75bfb6661cc..378341ab8b69 100644
--- a/docker/test/unit/Dockerfile
+++ b/docker/test/unit/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/unit-test .
ARG FROM_TAG=latest
-FROM clickhouse/stateless-test:$FROM_TAG
+FROM altinityinfra/stateless-test:$FROM_TAG
RUN apt-get install gdb
diff --git a/packages/clickhouse-client.yaml b/packages/clickhouse-client.yaml
index 6d1233e7c7a5..efb509e622e6 100644
--- a/packages/clickhouse-client.yaml
+++ b/packages/clickhouse-client.yaml
@@ -4,8 +4,8 @@ name: "clickhouse-client"
arch: "${DEB_ARCH}" # amd64, arm64
platform: "linux"
version: "${CLICKHOUSE_VERSION_STRING}"
-vendor: "ClickHouse Inc."
-homepage: "https://clickhouse.com"
+vendor: "Altinity Inc."
+homepage: "https://altinity.com/"
license: "Apache"
section: "database"
priority: "optional"
diff --git a/packages/clickhouse-common-static-dbg.yaml b/packages/clickhouse-common-static-dbg.yaml
index 12a1594bd301..78f74ed15cb8 100644
--- a/packages/clickhouse-common-static-dbg.yaml
+++ b/packages/clickhouse-common-static-dbg.yaml
@@ -4,8 +4,8 @@ name: "clickhouse-common-static-dbg"
arch: "${DEB_ARCH}" # amd64, arm64
platform: "linux"
version: "${CLICKHOUSE_VERSION_STRING}"
-vendor: "ClickHouse Inc."
-homepage: "https://clickhouse.com"
+vendor: "Altinity Inc."
+homepage: "https://altinity.com/"
license: "Apache"
section: "database"
priority: "optional"
diff --git a/packages/clickhouse-common-static.yaml b/packages/clickhouse-common-static.yaml
index 269d4318e5e0..07dee0d326bb 100644
--- a/packages/clickhouse-common-static.yaml
+++ b/packages/clickhouse-common-static.yaml
@@ -4,8 +4,8 @@ name: "clickhouse-common-static"
arch: "${DEB_ARCH}" # amd64, arm64
platform: "linux"
version: "${CLICKHOUSE_VERSION_STRING}"
-vendor: "ClickHouse Inc."
-homepage: "https://clickhouse.com"
+vendor: "Altinity Inc."
+homepage: "https://altinity.com/"
license: "Apache"
section: "database"
priority: "optional"
diff --git a/packages/clickhouse-keeper-dbg.yaml b/packages/clickhouse-keeper-dbg.yaml
index 2c70b7ad4aa4..55bf5da74f20 100644
--- a/packages/clickhouse-keeper-dbg.yaml
+++ b/packages/clickhouse-keeper-dbg.yaml
@@ -4,8 +4,8 @@ name: "clickhouse-keeper-dbg"
arch: "${DEB_ARCH}" # amd64, arm64
platform: "linux"
version: "${CLICKHOUSE_VERSION_STRING}"
-vendor: "ClickHouse Inc."
-homepage: "https://clickhouse.com"
+vendor: "Altinity Inc."
+homepage: "https://altinity.com/"
license: "Apache"
section: "database"
priority: "optional"
diff --git a/packages/clickhouse-keeper.yaml b/packages/clickhouse-keeper.yaml
index e99ac30f9443..c8c0b1ad2b33 100644
--- a/packages/clickhouse-keeper.yaml
+++ b/packages/clickhouse-keeper.yaml
@@ -4,8 +4,8 @@ name: "clickhouse-keeper"
arch: "${DEB_ARCH}" # amd64, arm64
platform: "linux"
version: "${CLICKHOUSE_VERSION_STRING}"
-vendor: "ClickHouse Inc."
-homepage: "https://clickhouse.com"
+vendor: "Altinity Inc."
+homepage: "https://altinity.com/"
license: "Apache"
section: "database"
priority: "optional"
diff --git a/packages/clickhouse-server.yaml b/packages/clickhouse-server.yaml
index 289956897549..e437deba8790 100644
--- a/packages/clickhouse-server.yaml
+++ b/packages/clickhouse-server.yaml
@@ -4,8 +4,8 @@ name: "clickhouse-server"
arch: "${DEB_ARCH}" # amd64, arm64
platform: "linux"
version: "${CLICKHOUSE_VERSION_STRING}"
-vendor: "ClickHouse Inc."
-homepage: "https://clickhouse.com"
+vendor: "Altinity Inc."
+homepage: "https://altinity.com/"
license: "Apache"
section: "database"
priority: "optional"
diff --git a/src/Core/Settings.h b/src/Core/Settings.h
index 0c69b864d528..b40d88f769f0 100644
--- a/src/Core/Settings.h
+++ b/src/Core/Settings.h
@@ -396,6 +396,7 @@ class IColumn;
M(Bool, parallel_view_processing, false, "Enables pushing to attached views concurrently instead of sequentially.", 0) \
M(Bool, enable_unaligned_array_join, false, "Allow ARRAY JOIN with multiple arrays that have different sizes. When this settings is enabled, arrays will be resized to the longest one.", 0) \
M(Bool, optimize_read_in_order, true, "Enable ORDER BY optimization for reading data in corresponding order in MergeTree tables.", 0) \
+ M(Bool, optimize_read_in_window_order, true, "Enable ORDER BY optimization in window clause for reading data in corresponding order in MergeTree tables.", 0) \
M(Bool, optimize_aggregation_in_order, false, "Enable GROUP BY optimization for aggregating data in corresponding order in MergeTree tables.", 0) \
M(UInt64, aggregation_in_order_max_block_bytes, 50000000, "Maximal size of block in bytes accumulated during aggregation in order of primary key. Lower block size allows to parallelize more final merge stage of aggregation.", 0) \
M(UInt64, read_in_order_two_level_merge_threshold, 100, "Minimal number of parts to read to run preliminary merge step during multithread reading in order of primary key.", 0) \
diff --git a/src/Disks/IDiskRemote.h b/src/Disks/IDiskRemote.h
index 82e76b8f68d4..4c91400c94c5 100644
--- a/src/Disks/IDiskRemote.h
+++ b/src/Disks/IDiskRemote.h
@@ -165,9 +165,10 @@ friend class DiskRemoteReservation;
DiskPtr metadata_disk;
FileCachePtr cache;
-private:
+public:
void removeMetadata(const String & path, RemoteFSPathKeeperPtr fs_paths_keeper);
+private:
void removeMetadataRecursive(const String & path, RemoteFSPathKeeperPtr fs_paths_keeper);
bool tryReserve(UInt64 bytes);
diff --git a/src/Disks/S3/DiskS3.cpp b/src/Disks/S3/DiskS3.cpp
index e46620d9d1f0..723b1e7373c1 100644
--- a/src/Disks/S3/DiskS3.cpp
+++ b/src/Disks/S3/DiskS3.cpp
@@ -11,7 +11,6 @@
#include
#include
-#include
#include
#include
@@ -35,6 +34,7 @@
#include
#include
+#include
#include
#include
#include
@@ -57,52 +57,6 @@ namespace ErrorCodes
extern const int LOGICAL_ERROR;
}
-/// Helper class to collect keys into chunks of maximum size (to prepare batch requests to AWS API)
-/// see https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html
-class S3PathKeeper : public RemoteFSPathKeeper
-{
-public:
- using Chunk = Aws::Vector;
- using Chunks = std::list;
-
- explicit S3PathKeeper(size_t chunk_limit_) : RemoteFSPathKeeper(chunk_limit_) {}
-
- void addPath(const String & path) override
- {
- if (chunks.empty() || chunks.back().size() >= chunk_limit)
- {
- /// add one more chunk
- chunks.push_back(Chunks::value_type());
- chunks.back().reserve(chunk_limit);
- }
- Aws::S3::Model::ObjectIdentifier obj;
- obj.SetKey(path);
- chunks.back().push_back(obj);
- }
-
- void removePaths(Fn auto && remove_chunk_func)
- {
- for (auto & chunk : chunks)
- remove_chunk_func(std::move(chunk));
- }
-
- static String getChunkKeys(const Chunk & chunk)
- {
- String res;
- for (const auto & obj : chunk)
- {
- const auto & key = obj.GetKey();
- if (!res.empty())
- res.append(", ");
- res.append(key.c_str(), key.size());
- }
- return res;
- }
-
-private:
- Chunks chunks;
-};
-
template
void throwIfError(Aws::Utils::Outcome & response)
{
@@ -155,12 +109,14 @@ DiskS3::DiskS3(
DiskPtr metadata_disk_,
FileCachePtr cache_,
ContextPtr context_,
+ const S3Capabilities & s3_capabilities_,
SettingsPtr settings_,
GetDiskSettings settings_getter_)
: IDiskRemote(name_, s3_root_path_, metadata_disk_, std::move(cache_), "DiskS3", settings_->thread_pool_size)
, bucket(std::move(bucket_))
, current_settings(std::move(settings_))
, settings_getter(settings_getter_)
+ , s3_capabilities(s3_capabilities_)
, context(context_)
{
}
@@ -180,15 +136,31 @@ void DiskS3::removeFromRemoteFS(RemoteFSPathKeeperPtr fs_paths_keeper)
s3_paths_keeper->removePaths([&](S3PathKeeper::Chunk && chunk)
{
String keys = S3PathKeeper::getChunkKeys(chunk);
- LOG_TRACE(log, "Remove AWS keys {}", keys);
- Aws::S3::Model::Delete delkeys;
- delkeys.SetObjects(chunk);
- Aws::S3::Model::DeleteObjectsRequest request;
- request.SetBucket(bucket);
- request.SetDelete(delkeys);
- auto outcome = settings->client->DeleteObjects(request);
- // Do not throw here, continue deleting other chunks
- logIfError(outcome, [&](){return "Can't remove AWS keys: " + keys;});
+ if (!s3_capabilities.support_batch_delete)
+ {
+ LOG_TRACE(log, "Remove AWS keys {} one by one", keys);
+ for (const auto & obj : chunk)
+ {
+ Aws::S3::Model::DeleteObjectRequest request;
+ request.SetBucket(bucket);
+ request.SetKey(obj.GetKey());
+ auto outcome = settings->client->DeleteObject(request);
+ // Do not throw here, continue deleting other keys and chunks
+ logIfError(outcome, [&](){return "Can't remove AWS key: " + obj.GetKey();});
+ }
+ }
+ else
+ {
+ LOG_TRACE(log, "Remove AWS keys {}", keys);
+ Aws::S3::Model::Delete delkeys;
+ delkeys.SetObjects(chunk);
+ Aws::S3::Model::DeleteObjectsRequest request;
+ request.SetBucket(bucket);
+ request.SetDelete(delkeys);
+ auto outcome = settings->client->DeleteObjects(request);
+ // Do not throw here, continue deleting other chunks
+ logIfError(outcome, [&](){return "Can't remove AWS keys: " + keys;});
+ }
});
}
diff --git a/src/Disks/S3/DiskS3.h b/src/Disks/S3/DiskS3.h
index 2de1600d906f..473dc6a7a751 100644
--- a/src/Disks/S3/DiskS3.h
+++ b/src/Disks/S3/DiskS3.h
@@ -7,12 +7,15 @@
#include
#include
#include
+#include
#include "Disks/DiskFactory.h"
#include "Disks/Executor.h"
+#include
#include
#include
#include
+#include
#include
#include
@@ -76,6 +79,7 @@ class DiskS3 final : public IDiskRemote
DiskPtr metadata_disk_,
FileCachePtr cache_,
ContextPtr context_,
+ const S3Capabilities & s3_capabilities_,
SettingsPtr settings_,
GetDiskSettings settings_getter_);
@@ -119,6 +123,8 @@ class DiskS3 final : public IDiskRemote
void applyNewSettings(const Poco::Util::AbstractConfiguration & config, ContextPtr context, const String &, const DisksMap &) override;
+ void setCapabilitiesSupportBatchDelete(bool value) { s3_capabilities.support_batch_delete = value; }
+
private:
void createFileOperationObject(const String & operation_name, UInt64 revision, const ObjectMetadata & metadata);
/// Converts revision to binary string with leading zeroes (64 bit).
@@ -166,6 +172,7 @@ class DiskS3 final : public IDiskRemote
MultiVersion current_settings;
/// Gets disk settings from context.
GetDiskSettings settings_getter;
+ S3Capabilities s3_capabilities;
std::atomic revision_counter = 0;
static constexpr UInt64 LATEST_REVISION = std::numeric_limits::max();
@@ -187,6 +194,57 @@ class DiskS3 final : public IDiskRemote
ContextPtr context;
};
+/// Helper class to collect keys into chunks of maximum size (to prepare batch requests to AWS API)
+/// see https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html
+class S3PathKeeper : public RemoteFSPathKeeper
+{
+public:
+ using Chunk = Aws::Vector;
+ using Chunks = std::list;
+
+ explicit S3PathKeeper(size_t chunk_limit_) : RemoteFSPathKeeper(chunk_limit_) {}
+
+ void addPath(const String & path) override
+ {
+ if (chunks.empty() || chunks.back().size() >= chunk_limit)
+ {
+ /// add one more chunk
+ chunks.push_back(Chunks::value_type());
+ chunks.back().reserve(chunk_limit);
+ }
+ Aws::S3::Model::ObjectIdentifier obj;
+ obj.SetKey(path);
+ chunks.back().push_back(obj);
+ }
+
+ void removePaths(Fn auto && remove_chunk_func)
+ {
+ for (auto & chunk : chunks)
+ remove_chunk_func(std::move(chunk));
+ }
+
+ Chunks getChunks() const
+ {
+ return chunks;
+ }
+
+ static String getChunkKeys(const Chunk & chunk)
+ {
+ String res;
+ for (const auto & obj : chunk)
+ {
+ const auto & key = obj.GetKey();
+ if (!res.empty())
+ res.append(", ");
+ res.append(key.c_str(), key.size());
+ }
+ return res;
+ }
+
+private:
+ Chunks chunks;
+};
+
}
#endif
diff --git a/src/Disks/S3/S3Capabilities.cpp b/src/Disks/S3/S3Capabilities.cpp
new file mode 100644
index 000000000000..f96f0b5539a6
--- /dev/null
+++ b/src/Disks/S3/S3Capabilities.cpp
@@ -0,0 +1,15 @@
+#include
+
+namespace DB
+{
+
+S3Capabilities getCapabilitiesFromConfig(const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix)
+{
+ return S3Capabilities
+ {
+ .support_batch_delete = config.getBool(config_prefix + ".support_batch_delete", true),
+ .support_proxy = config.getBool(config_prefix + ".support_proxy", config.has(config_prefix + ".proxy")),
+ };
+}
+
+}
diff --git a/src/Disks/S3/S3Capabilities.h b/src/Disks/S3/S3Capabilities.h
new file mode 100644
index 000000000000..46e647da89e5
--- /dev/null
+++ b/src/Disks/S3/S3Capabilities.h
@@ -0,0 +1,27 @@
+#pragma once
+
+#include
+#include
+
+namespace DB
+{
+
+/// Supported/unsupported features by different S3 implementations
+/// Can be useful only for almost compatible with AWS S3 versions.
+struct S3Capabilities
+{
+ /// Google S3 implementation doesn't support batch delete
+ /// TODO: possibly we have to use Google SDK https://github.com/googleapis/google-cloud-cpp/tree/main/google/cloud/storage
+ /// because looks like it miss a lot of features like:
+ /// 1) batch delete
+ /// 2) list_v2
+ /// 3) multipart upload works differently
+ bool support_batch_delete{true};
+
+ /// Y.Cloud S3 implementation support proxy for connection
+ bool support_proxy{false};
+};
+
+S3Capabilities getCapabilitiesFromConfig(const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix);
+
+}
diff --git a/src/Disks/S3/registerDiskS3.cpp b/src/Disks/S3/registerDiskS3.cpp
index 2b5fe3c5a81b..474378f4f3e3 100644
--- a/src/Disks/S3/registerDiskS3.cpp
+++ b/src/Disks/S3/registerDiskS3.cpp
@@ -9,6 +9,8 @@
#if USE_AWS_S3
#include
+#include
+#include
#include
#include "DiskS3.h"
#include "Disks/DiskCacheWrapper.h"
@@ -21,6 +23,7 @@
#include "Disks/RemoteDisksCommon.h"
#include
+
namespace DB
{
namespace ErrorCodes
@@ -46,7 +49,79 @@ void checkReadAccess(const String & disk_name, IDisk & disk)
throw Exception("No read access to S3 bucket in disk " + disk_name, ErrorCodes::PATH_ACCESS_DENIED);
}
-void checkRemoveAccess(IDisk & disk) { disk.removeFile("test_acl"); }
+void checkRemoveAccess(IDisk & disk)
+{
+ disk.removeFile("test_acl");
+}
+
+bool checkBatchRemoveIsMissing(DiskS3 & disk, std::unique_ptr settings, const String & bucket)
+{
+ const String path = "_test_remove_objects_capability";
+ try
+ {
+ auto file = disk.writeFile(path, DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite);
+ file->write("test", 4);
+ file->finalize();
+ }
+ catch (...)
+ {
+ try
+ {
+ disk.removeFile(path);
+ }
+ catch (...)
+ {
+ }
+ return false; /// We don't have write access, therefore no information about batch remove.
+ }
+
+ /// See `IDiskRemote::removeSharedFile`.
+ auto fs_paths_keeper = std::dynamic_pointer_cast(disk.createFSPathKeeper());
+ disk.removeMetadata(path, fs_paths_keeper);
+
+ auto fs_paths_keeper_copy = std::dynamic_pointer_cast(disk.createFSPathKeeper());
+ for (const auto & chunk : fs_paths_keeper->getChunks())
+ for (const auto & obj : chunk)
+ fs_paths_keeper_copy->addPath(obj.GetKey());
+
+ try
+ {
+ /// See `DiskS3::removeFromRemoteFS`.
+ fs_paths_keeper->removePaths([&](S3PathKeeper::Chunk && chunk)
+ {
+ String keys = S3PathKeeper::getChunkKeys(chunk);
+ LOG_TRACE(&Poco::Logger::get("registerDiskS3"), "Remove AWS keys {}", keys);
+ Aws::S3::Model::Delete delkeys;
+ delkeys.SetObjects(chunk);
+ Aws::S3::Model::DeleteObjectsRequest request;
+ request.SetBucket(bucket);
+ request.SetDelete(delkeys);
+ auto outcome = settings->client->DeleteObjects(request);
+ if (!outcome.IsSuccess())
+ {
+ const auto & err = outcome.GetError();
+ throw Exception(err.GetMessage(), static_cast(err.GetErrorType()));
+ }
+ });
+ return false;
+ }
+ catch (const Exception &)
+ {
+ fs_paths_keeper_copy->removePaths([&](S3PathKeeper::Chunk && chunk)
+ {
+ String keys = S3PathKeeper::getChunkKeys(chunk);
+ LOG_TRACE(&Poco::Logger::get("registerDiskS3"), "Remove AWS keys {} one by one", keys);
+ for (const auto & obj : chunk)
+ {
+ Aws::S3::Model::DeleteObjectRequest request;
+ request.SetBucket(bucket);
+ request.SetKey(obj.GetKey());
+ settings->client->DeleteObject(request);
+ }
+ });
+ return true;
+ }
+}
std::shared_ptr getProxyResolverConfiguration(
const String & prefix, const Poco::Util::AbstractConfiguration & proxy_resolver_config)
@@ -187,6 +262,7 @@ void registerDiskS3(DiskFactory & factory)
auto [metadata_path, metadata_disk] = prepareForLocalMetadata(name, config, config_prefix, context);
FileCachePtr cache = getCachePtrForDisk(name, config, config_prefix, context);
+ S3Capabilities s3_capabilities = getCapabilitiesFromConfig(config, config_prefix);
std::shared_ptr s3disk = std::make_shared(
name,
@@ -195,11 +271,30 @@ void registerDiskS3(DiskFactory & factory)
metadata_disk,
std::move(cache),
context,
+ s3_capabilities,
getSettings(config, config_prefix, context),
getSettings);
+ bool skip_access_check = config.getBool(config_prefix + ".skip_access_check", false);
+
+ if (!skip_access_check)
+ {
+ /// If `support_batch_delete` is turned on (default), check and possibly switch it off.
+ if (s3_capabilities.support_batch_delete && checkBatchRemoveIsMissing(*std::dynamic_pointer_cast(s3disk), getSettings(config, config_prefix, context), uri.bucket))
+ {
+ LOG_WARNING(
+ &Poco::Logger::get("registerDiskS3"),
+ "Storage for disk {} does not support batch delete operations, "
+ "so `s3_capabilities.support_batch_delete` was automatically turned off during the access check. "
+ "To remove this message set `s3_capabilities.support_batch_delete` for the disk to `false`.",
+ name
+ );
+ std::dynamic_pointer_cast(s3disk)->setCapabilitiesSupportBatchDelete(false);
+ }
+ }
+
/// This code is used only to check access to the corresponding disk.
- if (!config.getBool(config_prefix + ".skip_access_check", false))
+ if (!skip_access_check)
{
checkWriteAccess(*s3disk);
checkReadAccess(name, *s3disk);
diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp
index e3191fefa004..61fd9c0baf20 100644
--- a/src/Interpreters/ExpressionAnalyzer.cpp
+++ b/src/Interpreters/ExpressionAnalyzer.cpp
@@ -607,7 +607,7 @@ void ExpressionAnalyzer::makeAggregateDescriptions(ActionsDAGPtr & actions, Aggr
}
}
-void makeWindowDescriptionFromAST(const Context & context,
+void ExpressionAnalyzer::makeWindowDescriptionFromAST(const Context & context_,
const WindowDescriptions & existing_descriptions,
WindowDescription & desc, const IAST * ast)
{
@@ -676,6 +676,10 @@ void makeWindowDescriptionFromAST(const Context & context,
desc.partition_by.push_back(SortColumnDescription(
with_alias->getColumnName(), 1 /* direction */,
1 /* nulls_direction */));
+
+ auto actions_dag = std::make_shared(columns_after_join);
+ getRootActions(column_ast, false, actions_dag);
+ desc.partition_by_actions.push_back(std::move(actions_dag));
}
}
@@ -693,6 +697,10 @@ void makeWindowDescriptionFromAST(const Context & context,
order_by_element.children.front()->getColumnName(),
order_by_element.direction,
order_by_element.nulls_direction));
+
+ auto actions_dag = std::make_shared(columns_after_join);
+ getRootActions(column_ast, false, actions_dag);
+ desc.order_by_actions.push_back(std::move(actions_dag));
}
}
@@ -719,14 +727,14 @@ void makeWindowDescriptionFromAST(const Context & context,
if (definition.frame_end_type == WindowFrame::BoundaryType::Offset)
{
auto [value, _] = evaluateConstantExpression(definition.frame_end_offset,
- context.shared_from_this());
+ context_.shared_from_this());
desc.frame.end_offset = value;
}
if (definition.frame_begin_type == WindowFrame::BoundaryType::Offset)
{
auto [value, _] = evaluateConstantExpression(definition.frame_begin_offset,
- context.shared_from_this());
+ context_.shared_from_this());
desc.frame.begin_offset = value;
}
}
diff --git a/src/Interpreters/ExpressionAnalyzer.h b/src/Interpreters/ExpressionAnalyzer.h
index a034dd573931..6e3bc3f81b47 100644
--- a/src/Interpreters/ExpressionAnalyzer.h
+++ b/src/Interpreters/ExpressionAnalyzer.h
@@ -132,6 +132,7 @@ class ExpressionAnalyzer : protected ExpressionAnalyzerData, private boost::nonc
/// A list of windows for window functions.
const WindowDescriptions & windowDescriptions() const { return window_descriptions; }
+ void makeWindowDescriptionFromAST(const Context & context, const WindowDescriptions & existing_descriptions, WindowDescription & desc, const IAST * ast);
void makeWindowDescriptions(ActionsDAGPtr actions);
/**
diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp
index 35d20be46553..ffa094a82ae5 100644
--- a/src/Interpreters/InterpreterSelectQuery.cpp
+++ b/src/Interpreters/InterpreterSelectQuery.cpp
@@ -811,7 +811,7 @@ static FillColumnDescription getWithFillDescription(const ASTOrderByElement & or
return descr;
}
-static SortDescription getSortDescription(const ASTSelectQuery & query, ContextPtr context)
+SortDescription InterpreterSelectQuery::getSortDescription(const ASTSelectQuery & query, ContextPtr context_)
{
SortDescription order_descr;
order_descr.reserve(query.orderBy()->children.size());
@@ -826,7 +826,7 @@ static SortDescription getSortDescription(const ASTSelectQuery & query, ContextP
if (order_by_elem.with_fill)
{
- FillColumnDescription fill_desc = getWithFillDescription(order_by_elem, context);
+ FillColumnDescription fill_desc = getWithFillDescription(order_by_elem, context_);
order_descr.emplace_back(name, order_by_elem.direction, order_by_elem.nulls_direction, collator, true, fill_desc);
}
else
@@ -885,12 +885,12 @@ static std::pair getLimitLengthAndOffset(const ASTSelectQuery &
}
-static UInt64 getLimitForSorting(const ASTSelectQuery & query, ContextPtr context)
+UInt64 InterpreterSelectQuery::getLimitForSorting(const ASTSelectQuery & query, ContextPtr context_)
{
/// Partial sort can be done if there is LIMIT but no DISTINCT or LIMIT BY, neither ARRAY JOIN.
if (!query.distinct && !query.limitBy() && !query.limit_with_ties && !query.arrayJoinExpressionList().first && query.limitLength())
{
- auto [limit_length, limit_offset] = getLimitLengthAndOffset(query, context);
+ auto [limit_length, limit_offset] = getLimitLengthAndOffset(query, context_);
if (limit_length > std::numeric_limits::max() - limit_offset)
return 0;
diff --git a/src/Interpreters/InterpreterSelectQuery.h b/src/Interpreters/InterpreterSelectQuery.h
index aa41d8376013..0eb6ae06e82f 100644
--- a/src/Interpreters/InterpreterSelectQuery.h
+++ b/src/Interpreters/InterpreterSelectQuery.h
@@ -106,6 +106,9 @@ class InterpreterSelectQuery : public IInterpreterUnionOrSelectQuery
Names getRequiredColumns() { return required_columns; }
+ static SortDescription getSortDescription(const ASTSelectQuery & query, ContextPtr context);
+ static UInt64 getLimitForSorting(const ASTSelectQuery & query, ContextPtr context);
+
private:
InterpreterSelectQuery(
const ASTPtr & query_ptr_,
diff --git a/src/Interpreters/WindowDescription.h b/src/Interpreters/WindowDescription.h
index bb0130b4d4ee..65c8cb9423c9 100644
--- a/src/Interpreters/WindowDescription.h
+++ b/src/Interpreters/WindowDescription.h
@@ -7,6 +7,7 @@
#include
#include
#include
+#include
namespace DB
{
@@ -90,6 +91,9 @@ struct WindowDescription
// then by ORDER BY. This field holds this combined sort order.
SortDescription full_sort_description;
+ std::vector partition_by_actions;
+ std::vector order_by_actions;
+
WindowFrame frame;
// The window functions that are calculated for this window.
diff --git a/src/Processors/QueryPlan/Optimizations/Optimizations.h b/src/Processors/QueryPlan/Optimizations/Optimizations.h
index 10bc62935371..11bdb1c95e50 100644
--- a/src/Processors/QueryPlan/Optimizations/Optimizations.h
+++ b/src/Processors/QueryPlan/Optimizations/Optimizations.h
@@ -44,16 +44,21 @@ size_t tryMergeExpressions(QueryPlan::Node * parent_node, QueryPlan::Nodes &);
/// May split FilterStep and push down only part of it.
size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes);
+/// Utilize storage sorting when sorting for window functions.
+/// Update information about prefix sort description in SortingStep.
+size_t tryReuseStorageOrderingForWindowFunctions(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes);
+
inline const auto & getOptimizations()
{
- static const std::array optimizations =
+ static const std::array optimizations =
{{
{tryLiftUpArrayJoin, "liftUpArrayJoin", &QueryPlanOptimizationSettings::optimize_plan},
{tryPushDownLimit, "pushDownLimit", &QueryPlanOptimizationSettings::optimize_plan},
{trySplitFilter, "splitFilter", &QueryPlanOptimizationSettings::optimize_plan},
{tryMergeExpressions, "mergeExpressions", &QueryPlanOptimizationSettings::optimize_plan},
{tryPushDownFilter, "pushDownFilter", &QueryPlanOptimizationSettings::filter_push_down},
- }};
+ {tryReuseStorageOrderingForWindowFunctions, "reuseStorageOrderingForWindowFunctions", &QueryPlanOptimizationSettings::optimize_plan}
+ }};
return optimizations;
}
diff --git a/src/Processors/QueryPlan/Optimizations/reuseStorageOrderingForWindowFunctions.cpp b/src/Processors/QueryPlan/Optimizations/reuseStorageOrderingForWindowFunctions.cpp
new file mode 100644
index 000000000000..c68ec47edff0
--- /dev/null
+++ b/src/Processors/QueryPlan/Optimizations/reuseStorageOrderingForWindowFunctions.cpp
@@ -0,0 +1,122 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+namespace DB::QueryPlanOptimizations
+{
+
+size_t tryReuseStorageOrderingForWindowFunctions(QueryPlan::Node * parent_node, QueryPlan::Nodes & /*nodes*/)
+{
+ /// Find the following sequence of steps, add InputOrderInfo and apply prefix sort description to
+ /// SortingStep:
+ /// WindowStep <- SortingStep <- [Expression] <- [SettingQuotaAndLimits] <- ReadFromMergeTree
+
+ auto * window_node = parent_node;
+ auto * window = typeid_cast(window_node->step.get());
+ if (!window)
+ return 0;
+ if (window_node->children.size() != 1)
+ return 0;
+
+ auto * sorting_node = window_node->children.front();
+ auto * sorting = typeid_cast(sorting_node->step.get());
+ if (!sorting)
+ return 0;
+ if (sorting_node->children.size() != 1)
+ return 0;
+
+ auto * possible_read_from_merge_tree_node = sorting_node->children.front();
+
+ if (typeid_cast(possible_read_from_merge_tree_node->step.get()))
+ {
+ if (possible_read_from_merge_tree_node->children.size() != 1)
+ return 0;
+
+ possible_read_from_merge_tree_node = possible_read_from_merge_tree_node->children.front();
+ }
+
+ if (typeid_cast(possible_read_from_merge_tree_node->step.get()))
+ {
+ if (possible_read_from_merge_tree_node->children.size() != 1)
+ return 0;
+
+ possible_read_from_merge_tree_node = possible_read_from_merge_tree_node->children.front();
+ }
+
+ auto * read_from_merge_tree = typeid_cast(possible_read_from_merge_tree_node->step.get());
+ if (!read_from_merge_tree)
+ {
+ return 0;
+ }
+
+ auto context = read_from_merge_tree->getContext();
+ if (!context->getSettings().optimize_read_in_window_order)
+ {
+ return 0;
+ }
+
+ const auto & query_info = read_from_merge_tree->getQueryInfo();
+ const auto * select_query = query_info.query->as();
+
+ ManyExpressionActions order_by_elements_actions;
+ const auto & window_desc = window->getWindowDescription();
+
+ for (const auto & actions_dag : window_desc.partition_by_actions)
+ {
+ order_by_elements_actions.emplace_back(
+ std::make_shared(actions_dag, ExpressionActionsSettings::fromContext(context, CompileExpressions::yes)));
+ }
+
+ for (const auto & actions_dag : window_desc.order_by_actions)
+ {
+ order_by_elements_actions.emplace_back(
+ std::make_shared(actions_dag, ExpressionActionsSettings::fromContext(context, CompileExpressions::yes)));
+ }
+
+ auto order_optimizer = std::make_shared(
+ *select_query,
+ order_by_elements_actions,
+ window->getWindowDescription().full_sort_description,
+ query_info.syntax_analyzer_result);
+
+ read_from_merge_tree->setQueryInfoOrderOptimizer(order_optimizer);
+
+ /// If we don't have filtration, we can pushdown limit to reading stage for optimizations.
+ UInt64 limit = (select_query->hasFiltration() || select_query->groupBy()) ? 0 : InterpreterSelectQuery::getLimitForSorting(*select_query, context);
+
+ auto order_info = order_optimizer->getInputOrder(
+ query_info.projection ? query_info.projection->desc->metadata : read_from_merge_tree->getStorageMetadata(),
+ context,
+ limit);
+
+ if (order_info)
+ {
+ read_from_merge_tree->setQueryInfoInputOrderInfo(order_info);
+ sorting->convertToFinishSorting(order_info->order_key_prefix_descr);
+ }
+
+ return 0;
+}
+
+}
diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp
index 1bfc1ec7306f..abc753c5fa7b 100644
--- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp
+++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp
@@ -977,6 +977,30 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToRead(
return std::make_shared(MergeTreeDataSelectAnalysisResult{.result = std::move(result)});
}
+void ReadFromMergeTree::setQueryInfoOrderOptimizer(std::shared_ptr order_optimizer)
+{
+ if (query_info.projection)
+ {
+ query_info.projection->order_optimizer = order_optimizer;
+ }
+ else
+ {
+ query_info.order_optimizer = order_optimizer;
+ }
+}
+
+void ReadFromMergeTree::setQueryInfoInputOrderInfo(InputOrderInfoPtr order_info)
+{
+ if (query_info.projection)
+ {
+ query_info.projection->input_order_info = order_info;
+ }
+ else
+ {
+ query_info.input_order_info = order_info;
+ }
+}
+
ReadFromMergeTree::AnalysisResult ReadFromMergeTree::getAnalysisResult() const
{
auto result_ptr = analyzed_result_ptr ? analyzed_result_ptr : selectRangesToRead(prepared_parts);
@@ -1060,7 +1084,7 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons
column_names_to_read,
result_projection);
}
- else if ((settings.optimize_read_in_order || settings.optimize_aggregation_in_order) && input_order_info)
+ else if ((settings.optimize_read_in_order || settings.optimize_aggregation_in_order || settings.optimize_read_in_window_order) && input_order_info)
{
pipe = spreadMarkRangesAmongStreamsWithOrder(
std::move(result.parts_with_ranges),
diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h
index 685b99a7bdcb..16333edcaf3b 100644
--- a/src/Processors/QueryPlan/ReadFromMergeTree.h
+++ b/src/Processors/QueryPlan/ReadFromMergeTree.h
@@ -128,6 +128,13 @@ class ReadFromMergeTree final : public ISourceStep
bool sample_factor_column_queried,
Poco::Logger * log);
+ ContextPtr getContext() const { return context; }
+ const SelectQueryInfo & getQueryInfo() const { return query_info; }
+ StorageMetadataPtr getStorageMetadata() const { return metadata_for_reading; }
+
+ void setQueryInfoOrderOptimizer(std::shared_ptr read_in_order_optimizer);
+ void setQueryInfoInputOrderInfo(InputOrderInfoPtr order_info);
+
private:
const MergeTreeReaderSettings reader_settings;
diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp
index 32b314b1c505..602680e17182 100644
--- a/src/Processors/QueryPlan/SortingStep.cpp
+++ b/src/Processors/QueryPlan/SortingStep.cpp
@@ -97,6 +97,12 @@ void SortingStep::updateLimit(size_t limit_)
}
}
+void SortingStep::convertToFinishSorting(SortDescription prefix_description_)
+{
+ type = Type::FinishSorting;
+ prefix_description = std::move(prefix_description_);
+}
+
void SortingStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &)
{
if (type == Type::FinishSorting)
diff --git a/src/Processors/QueryPlan/SortingStep.h b/src/Processors/QueryPlan/SortingStep.h
index 8e253e71f441..4da98a15f65d 100644
--- a/src/Processors/QueryPlan/SortingStep.h
+++ b/src/Processors/QueryPlan/SortingStep.h
@@ -49,6 +49,8 @@ class SortingStep : public ITransformingStep
/// Add limit or change it to lower value.
void updateLimit(size_t limit_);
+ void convertToFinishSorting(SortDescription prefix_description);
+
private:
enum class Type
diff --git a/src/Processors/QueryPlan/WindowStep.cpp b/src/Processors/QueryPlan/WindowStep.cpp
index cd4bb5f67307..0916b43b29a5 100644
--- a/src/Processors/QueryPlan/WindowStep.cpp
+++ b/src/Processors/QueryPlan/WindowStep.cpp
@@ -138,4 +138,9 @@ void WindowStep::describeActions(JSONBuilder::JSONMap & map) const
map.add("Functions", std::move(functions_array));
}
+const WindowDescription & WindowStep::getWindowDescription() const
+{
+ return window_description;
+}
+
}
diff --git a/src/Processors/QueryPlan/WindowStep.h b/src/Processors/QueryPlan/WindowStep.h
index a65b157f4817..9b58cceb972b 100644
--- a/src/Processors/QueryPlan/WindowStep.h
+++ b/src/Processors/QueryPlan/WindowStep.h
@@ -25,6 +25,8 @@ class WindowStep : public ITransformingStep
void describeActions(JSONBuilder::JSONMap & map) const override;
void describeActions(FormatSettings & settings) const override;
+ const WindowDescription & getWindowDescription() const;
+
private:
WindowDescription window_description;
std::vector window_functions;
diff --git a/src/Processors/Transforms/WindowTransform.cpp b/src/Processors/Transforms/WindowTransform.cpp
index b81ed099915f..2a2fed1cc078 100644
--- a/src/Processors/Transforms/WindowTransform.cpp
+++ b/src/Processors/Transforms/WindowTransform.cpp
@@ -1,5 +1,7 @@
#include
+#include
+
#include
#include
#include
@@ -14,6 +16,7 @@
#include
#include
+
namespace DB
{
@@ -1538,65 +1541,21 @@ struct WindowFunctionDenseRank final : public WindowFunction
namespace recurrent_detail
{
- template T getLastValueFromInputColumn(const WindowTransform * /*transform*/, size_t /*function_index*/, size_t /*column_index*/)
+ template T getValue(const WindowTransform * /*transform*/, size_t /*function_index*/, size_t /*column_index*/, RowNumber /*row*/)
{
- throw Exception(ErrorCodes::NOT_IMPLEMENTED, "getLastValueFromInputColumn() is not implemented for {} type", typeid(T).name());
+ throw Exception(ErrorCodes::NOT_IMPLEMENTED, "recurrent_detail::getValue() is not implemented for {} type", typeid(T).name());
}
- template<> Float64 getLastValueFromInputColumn(const WindowTransform * transform, size_t function_index, size_t column_index)
+ template<> Float64 getValue(const WindowTransform * transform, size_t function_index, size_t column_index, RowNumber row)
{
const auto & workspace = transform->workspaces[function_index];
- auto current_row = transform->current_row;
-
- if (current_row.row == 0)
- {
- if (current_row.block > 0)
- {
- const auto & column = transform->blockAt(current_row.block - 1).input_columns[workspace.argument_column_indices[column_index]];
- return column->getFloat64(column->size() - 1);
- }
- }
- else
- {
- const auto & column = transform->blockAt(current_row.block).input_columns[workspace.argument_column_indices[column_index]];
- return column->getFloat64(current_row.row - 1);
- }
-
- return 0;
- }
-
- template T getLastValueFromState(const WindowTransform * /*transform*/, size_t /*function_index*/, size_t /*data_index*/)
- {
- throw Exception(ErrorCodes::NOT_IMPLEMENTED, "getLastValueFromInputColumn() is not implemented for {} type", typeid(T).name());
- }
-
- template<> Float64 getLastValueFromState(const WindowTransform * transform, size_t function_index, size_t data_index)
- {
- const auto & workspace = transform->workspaces[function_index];
- if (workspace.aggregate_function_state.data() == nullptr)
- {
- return 0.0;
- }
- else
- {
- return static_cast(static_cast(workspace.aggregate_function_state.data()))[data_index];
- }
- }
-
- template void setValueToState(const WindowTransform * /*transform*/, size_t /*function_index*/, T /*value*/, size_t /*data_index*/)
- {
- throw Exception(ErrorCodes::NOT_IMPLEMENTED, "setValueToState() is not implemented for {} type", typeid(T).name());
- }
-
- template<> void setValueToState(const WindowTransform * transform, size_t function_index, Float64 value, size_t data_index)
- {
- const auto & workspace = transform->workspaces[function_index];
- static_cast(static_cast(workspace.aggregate_function_state.data()))[data_index] = value;
+ const auto & column = transform->blockAt(row.block).input_columns[workspace.argument_column_indices[column_index]];
+ return column->getFloat64(row.row);
}
template void setValueToOutputColumn(const WindowTransform * /*transform*/, size_t /*function_index*/, T /*value*/)
{
- throw Exception(ErrorCodes::NOT_IMPLEMENTED, "setValueToOutputColumn() is not implemented for {} type", typeid(T).name());
+ throw Exception(ErrorCodes::NOT_IMPLEMENTED, "recurrent_detail::setValueToOutputColumn() is not implemented for {} type", typeid(T).name());
}
template<> void setValueToOutputColumn(const WindowTransform * transform, size_t function_index, Float64 value)
@@ -1607,82 +1566,77 @@ namespace recurrent_detail
assert_cast(to).getData().push_back(value);
}
+}
- template T getCurrentValueFromInputColumn(const WindowTransform * /*transform*/, size_t /*function_index*/, size_t /*column_index*/)
+struct WindowFunctionHelpers
+{
+ template
+ static T getValue(const WindowTransform * transform, size_t function_index, size_t column_index, RowNumber row)
{
- throw Exception(ErrorCodes::NOT_IMPLEMENTED, "getCurrentValueFromInputColumn() is not implemented for {} type", typeid(T).name());
+ return recurrent_detail::getValue(transform, function_index, column_index, row);
}
- template<> Float64 getCurrentValueFromInputColumn(const WindowTransform * transform, size_t function_index, size_t column_index)
+ template
+ static void setValueToOutputColumn(const WindowTransform * transform, size_t function_index, T value)
{
- const auto & workspace = transform->workspaces[function_index];
- auto current_row = transform->current_row;
- const auto & current_block = transform->blockAt(current_row);
-
- return (*current_block.input_columns[workspace.argument_column_indices[column_index]]).getFloat64(transform->current_row.row);
+ recurrent_detail::setValueToOutputColumn(transform, function_index, value);
}
-}
+};
-template
-struct RecurrentWindowFunction : public WindowFunction
+template
+struct StatefulWindowFunction : public WindowFunction
{
- RecurrentWindowFunction(const std::string & name_,
+ StatefulWindowFunction(const std::string & name_,
const DataTypes & argument_types_, const Array & parameters_)
: WindowFunction(name_, argument_types_, parameters_)
{
}
- size_t sizeOfData() const override { return sizeof(Float64)*state_size; }
+ size_t sizeOfData() const override { return sizeof(State); }
size_t alignOfData() const override { return 1; }
void create(AggregateDataPtr __restrict place) const override
{
- auto * const state = static_cast(static_cast(place));
- for (size_t i = 0; i < state_size; ++i)
- state[i] = 0.0;
+ new (place) State();
}
- template
- static T getLastValueFromInputColumn(const WindowTransform * transform, size_t function_index, size_t column_index)
+ void destroy(AggregateDataPtr __restrict place) const noexcept override
{
- return recurrent_detail::getLastValueFromInputColumn(transform, function_index, column_index);
+ auto * const state = static_cast(static_cast(place));
+ state->~State();
}
- template
- static T getLastValueFromState(const WindowTransform * transform, size_t function_index, size_t data_index)
+ State & getState(const WindowFunctionWorkspace & workspace)
{
- return recurrent_detail::getLastValueFromState(transform, function_index, data_index);
- }
-
- template
- static void setValueToState(const WindowTransform * transform, size_t function_index, T value, size_t data_index)
- {
- recurrent_detail::setValueToState(transform, function_index, value, data_index);
+ return *static_cast(static_cast(workspace.aggregate_function_state.data()));
}
+};
- template
- static void setValueToOutputColumn(const WindowTransform * transform, size_t function_index, T value)
- {
- recurrent_detail::setValueToOutputColumn(transform, function_index, value);
- }
+struct ExponentialTimeDecayedSumState
+{
+ RowNumber previous_frame_start;
+ RowNumber previous_frame_end;
+ Float64 previous_time;
+ Float64 previous_sum;
+};
- template
- static T getCurrentValueFromInputColumn(const WindowTransform * transform, size_t function_index, size_t column_index)
- {
- return recurrent_detail::getCurrentValueFromInputColumn(transform, function_index, column_index);
- }
+struct ExponentialTimeDecayedAvgState
+{
+ RowNumber previous_frame_start;
+ RowNumber previous_frame_end;
+ Float64 previous_time;
+ Float64 previous_sum;
+ Float64 previous_count;
};
-struct WindowFunctionExponentialTimeDecayedSum final : public RecurrentWindowFunction<1>
+struct WindowFunctionExponentialTimeDecayedSum final : public StatefulWindowFunction
{
static constexpr size_t ARGUMENT_VALUE = 0;
static constexpr size_t ARGUMENT_TIME = 1;
- static constexpr size_t STATE_SUM = 0;
-
WindowFunctionExponentialTimeDecayedSum(const std::string & name_,
const DataTypes & argument_types_, const Array & parameters_)
- : RecurrentWindowFunction(name_, argument_types_, parameters_)
+ : StatefulWindowFunction(name_, argument_types_, parameters_)
{
if (parameters_.size() != 1)
{
@@ -1724,33 +1678,60 @@ struct WindowFunctionExponentialTimeDecayedSum final : public RecurrentWindowFun
void windowInsertResultInto(const WindowTransform * transform,
size_t function_index) override
{
- Float64 last_sum = getLastValueFromState(transform, function_index, STATE_SUM);
- Float64 last_t = getLastValueFromInputColumn(transform, function_index, ARGUMENT_TIME);
+ const auto & workspace = transform->workspaces[function_index];
+ auto & state = getState(workspace);
+
+ Float64 result = 0;
+ Float64 curr_t = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIME, transform->current_row);
- Float64 x = getCurrentValueFromInputColumn(transform, function_index, ARGUMENT_VALUE);
- Float64 t = getCurrentValueFromInputColumn(transform, function_index, ARGUMENT_TIME);
+ if (state.previous_frame_start <= transform->frame_start
+ && transform->frame_start < state.previous_frame_end
+ && state.previous_frame_end <= transform->frame_end)
+ {
+ for (RowNumber i = state.previous_frame_start; i < transform->frame_start; transform->advanceRowNumber(i))
+ {
+ Float64 prev_val = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_VALUE, i);
+ Float64 prev_t = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIME, i);
+ result -= std::exp((prev_t - curr_t) / decay_length) * prev_val;
+ }
+ result += std::exp((state.previous_time - curr_t) / decay_length) * state.previous_sum;
+ for (RowNumber i = state.previous_frame_end; i < transform->frame_end; transform->advanceRowNumber(i))
+ {
+ Float64 prev_val = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_VALUE, i);
+ Float64 prev_t = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIME, i);
+ result += std::exp((prev_t - curr_t) / decay_length) * prev_val;
+ }
+ }
+ else
+ {
+ for (RowNumber i = transform->frame_start; i < transform->frame_end; transform->advanceRowNumber(i))
+ {
+ Float64 prev_val = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_VALUE, i);
+ Float64 prev_t = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIME, i);
+ result += std::exp((prev_t - curr_t) / decay_length) * prev_val;
+ }
+ }
- Float64 c = exp((last_t - t) / decay_length);
- Float64 result = x + c * last_sum;
+ state.previous_sum = result;
+ state.previous_time = curr_t;
+ state.previous_frame_start = transform->frame_start;
+ state.previous_frame_end = transform->frame_end;
- setValueToOutputColumn(transform, function_index, result);
- setValueToState(transform, function_index, result, STATE_SUM);
+ WindowFunctionHelpers::setValueToOutputColumn(transform, function_index, result);
}
private:
Float64 decay_length;
};
-struct WindowFunctionExponentialTimeDecayedMax final : public RecurrentWindowFunction<1>
+struct WindowFunctionExponentialTimeDecayedMax final : public WindowFunction
{
static constexpr size_t ARGUMENT_VALUE = 0;
static constexpr size_t ARGUMENT_TIME = 1;
- static constexpr size_t STATE_MAX = 0;
-
WindowFunctionExponentialTimeDecayedMax(const std::string & name_,
const DataTypes & argument_types_, const Array & parameters_)
- : RecurrentWindowFunction(name_, argument_types_, parameters_)
+ : WindowFunction(name_, argument_types_, parameters_)
{
if (parameters_.size() != 1)
{
@@ -1792,32 +1773,35 @@ struct WindowFunctionExponentialTimeDecayedMax final : public RecurrentWindowFun
void windowInsertResultInto(const WindowTransform * transform,
size_t function_index) override
{
- Float64 last_max = getLastValueFromState(transform, function_index, STATE_MAX);
- Float64 last_t = getLastValueFromInputColumn(transform, function_index, ARGUMENT_TIME);
+ Float64 result = std::numeric_limits::lowest();
+ Float64 curr_t = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIME, transform->current_row);
- Float64 x = getCurrentValueFromInputColumn(transform, function_index, ARGUMENT_VALUE);
- Float64 t = getCurrentValueFromInputColumn(transform, function_index, ARGUMENT_TIME);
+ for (RowNumber i = transform->frame_start; i < transform->frame_end; transform->advanceRowNumber(i))
+ {
+ Float64 value = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_VALUE, i);
+ Float64 t = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIME, i);
- Float64 c = exp((last_t - t) / decay_length);
- Float64 result = std::max(x, c * last_max);
+ /// Avoiding extra calls to `exp` and multiplications.
+ if (value > result || t > curr_t || result < 0)
+ {
+ result = std::max(std::exp((t - curr_t) / decay_length) * value, result);
+ }
+ }
- setValueToOutputColumn(transform, function_index, result);
- setValueToState(transform, function_index, result, STATE_MAX);
+ WindowFunctionHelpers::setValueToOutputColumn(transform, function_index, result);
}
private:
Float64 decay_length;
};
-struct WindowFunctionExponentialTimeDecayedCount final : public RecurrentWindowFunction<1>
+struct WindowFunctionExponentialTimeDecayedCount final : public StatefulWindowFunction
{
static constexpr size_t ARGUMENT_TIME = 0;
- static constexpr size_t STATE_COUNT = 0;
-
WindowFunctionExponentialTimeDecayedCount(const std::string & name_,
const DataTypes & argument_types_, const Array & parameters_)
- : RecurrentWindowFunction(name_, argument_types_, parameters_)
+ : StatefulWindowFunction(name_, argument_types_, parameters_)
{
if (parameters_.size() != 1)
{
@@ -1851,33 +1835,57 @@ struct WindowFunctionExponentialTimeDecayedCount final : public RecurrentWindowF
void windowInsertResultInto(const WindowTransform * transform,
size_t function_index) override
{
- Float64 last_count = getLastValueFromState(transform, function_index, STATE_COUNT);
- Float64 last_t = getLastValueFromInputColumn(transform, function_index, ARGUMENT_TIME);
+ const auto & workspace = transform->workspaces[function_index];
+ auto & state = getState(workspace);
+
+ Float64 result = 0;
+ Float64 curr_t = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIME, transform->current_row);
- Float64 t = getCurrentValueFromInputColumn(transform, function_index, ARGUMENT_TIME);
+ if (state.previous_frame_start <= transform->frame_start
+ && transform->frame_start < state.previous_frame_end
+ && state.previous_frame_end <= transform->frame_end)
+ {
+ for (RowNumber i = state.previous_frame_start; i < transform->frame_start; transform->advanceRowNumber(i))
+ {
+ Float64 prev_t = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIME, i);
+ result -= std::exp((prev_t - curr_t) / decay_length);
+ }
+ result += std::exp((state.previous_time - curr_t) / decay_length) * state.previous_sum;
+ for (RowNumber i = state.previous_frame_end; i < transform->frame_end; transform->advanceRowNumber(i))
+ {
+ Float64 prev_t = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIME, i);
+ result += std::exp((prev_t - curr_t) / decay_length);
+ }
+ }
+ else
+ {
+ for (RowNumber i = transform->frame_start; i < transform->frame_end; transform->advanceRowNumber(i))
+ {
+ Float64 prev_t = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIME, i);
+ result += std::exp((prev_t - curr_t) / decay_length);
+ }
+ }
- Float64 c = exp((last_t - t) / decay_length);
- Float64 result = c * last_count + 1.0;
+ state.previous_sum = result;
+ state.previous_time = curr_t;
+ state.previous_frame_start = transform->frame_start;
+ state.previous_frame_end = transform->frame_end;
- setValueToOutputColumn(transform, function_index, result);
- setValueToState(transform, function_index, result, STATE_COUNT);
+ WindowFunctionHelpers::setValueToOutputColumn(transform, function_index, result);
}
private:
Float64 decay_length;
};
-struct WindowFunctionExponentialTimeDecayedAvg final : public RecurrentWindowFunction<2>
+struct WindowFunctionExponentialTimeDecayedAvg final : public StatefulWindowFunction
{
static constexpr size_t ARGUMENT_VALUE = 0;
static constexpr size_t ARGUMENT_TIME = 1;
- static constexpr size_t STATE_SUM = 0;
- static constexpr size_t STATE_COUNT = 1;
-
WindowFunctionExponentialTimeDecayedAvg(const std::string & name_,
const DataTypes & argument_types_, const Array & parameters_)
- : RecurrentWindowFunction(name_, argument_types_, parameters_)
+ : StatefulWindowFunction(name_, argument_types_, parameters_)
{
if (parameters_.size() != 1)
{
@@ -1919,21 +1927,60 @@ struct WindowFunctionExponentialTimeDecayedAvg final : public RecurrentWindowFun
void windowInsertResultInto(const WindowTransform * transform,
size_t function_index) override
{
- Float64 last_sum = getLastValueFromState(transform, function_index, STATE_SUM);
- Float64 last_count = getLastValueFromState(transform, function_index, STATE_COUNT);
- Float64 last_t = getLastValueFromInputColumn(transform, function_index, ARGUMENT_TIME);
+ const auto & workspace = transform->workspaces[function_index];
+ auto & state = getState(workspace);
+
+ Float64 count = 0;
+ Float64 sum = 0;
+ Float64 curr_t = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIME, transform->current_row);
+
+ if (state.previous_frame_start <= transform->frame_start
+ && transform->frame_start < state.previous_frame_end
+ && state.previous_frame_end <= transform->frame_end)
+ {
+ for (RowNumber i = state.previous_frame_start; i < transform->frame_start; transform->advanceRowNumber(i))
+ {
+ Float64 prev_val = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_VALUE, i);
+ Float64 prev_t = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIME, i);
+ Float64 decay = std::exp((prev_t - curr_t) / decay_length);
+ sum -= decay * prev_val;
+ count -= decay;
+ }
- Float64 x = getCurrentValueFromInputColumn(transform, function_index, ARGUMENT_VALUE);
- Float64 t = getCurrentValueFromInputColumn(transform, function_index, ARGUMENT_TIME);
+ {
+ Float64 decay = std::exp((state.previous_time - curr_t) / decay_length);
+ sum += decay * state.previous_sum;
+ count += decay * state.previous_count;
+ }
+
+ for (RowNumber i = state.previous_frame_end; i < transform->frame_end; transform->advanceRowNumber(i))
+ {
+ Float64 prev_val = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_VALUE, i);
+ Float64 prev_t = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIME, i);
+ Float64 decay = std::exp((prev_t - curr_t) / decay_length);
+ sum += decay * prev_val;
+ count += decay;
+ }
+ }
+ else
+ {
+ for (RowNumber i = transform->frame_start; i < transform->frame_end; transform->advanceRowNumber(i))
+ {
+ Float64 prev_val = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_VALUE, i);
+ Float64 prev_t = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIME, i);
+ Float64 decay = std::exp((prev_t - curr_t) / decay_length);
+ sum += decay * prev_val;
+ count += decay;
+ }
+ }
- Float64 c = exp((last_t - t) / decay_length);
- Float64 new_sum = c * last_sum + x;
- Float64 new_count = c * last_count + 1.0;
- Float64 result = new_sum / new_count;
+ state.previous_sum = sum;
+ state.previous_count = count;
+ state.previous_time = curr_t;
+ state.previous_frame_start = transform->frame_start;
+ state.previous_frame_end = transform->frame_end;
- setValueToOutputColumn(transform, function_index, result);
- setValueToState(transform, function_index, new_sum, STATE_SUM);
- setValueToState(transform, function_index, new_count, STATE_COUNT);
+ WindowFunctionHelpers::setValueToOutputColumn(transform, function_index, sum/count);
}
private:
diff --git a/tests/ci/ast_fuzzer_check.py b/tests/ci/ast_fuzzer_check.py
index 94f5eff51d7e..d3c871207896 100644
--- a/tests/ci/ast_fuzzer_check.py
+++ b/tests/ci/ast_fuzzer_check.py
@@ -24,7 +24,7 @@
from stopwatch import Stopwatch
from rerun_helper import RerunHelper
-IMAGE_NAME = "clickhouse/fuzzer"
+IMAGE_NAME = "altinityinfra/fuzzer"
def get_run_command(pr_number, sha, download_url, workspace_path, image):
diff --git a/tests/ci/build_check.py b/tests/ci/build_check.py
index 4c77f77a7021..cb8d03a83553 100644
--- a/tests/ci/build_check.py
+++ b/tests/ci/build_check.py
@@ -8,7 +8,7 @@
import time
from typing import List, Optional, Tuple
-from env_helper import REPO_COPY, TEMP_PATH, CACHES_PATH, IMAGES_PATH, GITHUB_JOB
+from env_helper import REPO_COPY, TEMP_PATH, CACHES_PATH, IMAGES_PATH, S3_BUILDS_BUCKET, GITHUB_JOB, CLICKHOUSE_STABLE_VERSION_SUFFIX
from s3_helper import S3Helper
from pr_info import PRInfo
from version_helper import (
@@ -21,7 +21,7 @@
from docker_pull_helper import get_image_with_version
from tee_popen import TeePopen
-IMAGE_NAME = "clickhouse/binary-builder"
+IMAGE_NAME = "altinityinfra/binary-builder"
def get_build_config(build_check_name: str, build_name: str) -> BuildConfig:
@@ -230,12 +230,12 @@ def main():
log_url = ""
for url in build_results:
if "build_log.log" in url:
- log_url = "https://s3.amazonaws.com/clickhouse-builds/" + url.replace(
+ log_url = f"https://s3.amazonaws.com/{S3_BUILDS_BUCKET}/" + url.replace(
"+", "%2B"
).replace(" ", "%20")
else:
build_urls.append(
- "https://s3.amazonaws.com/clickhouse-builds/"
+ f"https://s3.amazonaws.com/{S3_BUILDS_BUCKET}/"
+ url.replace("+", "%2B").replace(" ", "%20")
)
success = len(build_urls) > 0
@@ -259,15 +259,19 @@ def main():
logging.info("Got version from repo %s", version.string)
- official_flag = pr_info.number == 0
- version_type = "testing"
- if "release" in pr_info.labels or "release-lts" in pr_info.labels:
- version_type = "stable"
- official_flag = True
+ official_flag = True
+ version._flavour = version_type = CLICKHOUSE_STABLE_VERSION_SUFFIX
+ # TODO (vnemkov): right now we'll use simplified version management:
+ # only update git hash and explicitly set stable version suffix.
+ # official_flag = pr_info.number == 0
+ # version_type = "testing"
+ # if "release" in pr_info.labels or "release-lts" in pr_info.labels:
+ # version_type = CLICKHOUSE_STABLE_VERSION_SUFFIX
+ # official_flag = True
update_version_local(version, version_type)
- logging.info("Updated local files with version")
+ logging.info(f"Updated local files with version : {version.string} / {version.describe}")
logging.info("Build short name %s", build_name)
diff --git a/tests/ci/ccache_utils.py b/tests/ci/ccache_utils.py
index 7b0b0f01aa3b..734818f6ec6e 100644
--- a/tests/ci/ccache_utils.py
+++ b/tests/ci/ccache_utils.py
@@ -5,6 +5,7 @@
import sys
import os
import shutil
+from env_helper import S3_BUILDS_BUCKET
from pathlib import Path
import requests
@@ -71,7 +72,7 @@ def get_ccache_if_not_exists(
for obj in objects:
if ccache_name in obj:
logging.info("Found ccache on path %s", obj)
- url = "https://s3.amazonaws.com/clickhouse-builds/" + obj
+ url = f"https://s3.amazonaws.com/{S3_BUILDS_BUCKET}/" + obj
compressed_cache = os.path.join(temp_path, os.path.basename(obj))
dowload_file_with_progress(url, compressed_cache)
diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py
index 74dbe65911c8..08a608570a4a 100644
--- a/tests/ci/ci_config.py
+++ b/tests/ci/ci_config.py
@@ -188,15 +188,7 @@
},
"builds_report_config": {
"ClickHouse build check (actions)": [
- "package_release",
- "performance",
- "package_aarch64",
- "package_asan",
- "package_ubsan",
- "package_tsan",
- "package_msan",
- "package_debug",
- "binary_release",
+ "package_release"
],
"ClickHouse special build check (actions)": [
"binary_tidy",
diff --git a/tests/ci/clickhouse_helper.py b/tests/ci/clickhouse_helper.py
index 7ccbcb4a47e1..bfe2ed0e72b6 100644
--- a/tests/ci/clickhouse_helper.py
+++ b/tests/ci/clickhouse_helper.py
@@ -15,7 +15,9 @@ def __init__(self, url=None):
"X-ClickHouse-User": get_parameter_from_ssm(
"clickhouse-test-stat-login2"
),
- "X-ClickHouse-Key": "",
+ "X-ClickHouse-Key": get_parameter_from_ssm(
+ "clickhouse-test-stat-password"
+ ),
}
@staticmethod
@@ -117,7 +119,7 @@ def prepare_tests_results_for_clickhouse(
check_name,
):
- pull_request_url = "https://github.com/ClickHouse/ClickHouse/commits/master"
+ pull_request_url = "https://github.com/Altinity/ClickHouse/commits/master"
base_ref = "master"
head_ref = "master"
base_repo = pr_info.repo_full_name
diff --git a/tests/ci/codebrowser_check.py b/tests/ci/codebrowser_check.py
index 48c92e9f6acc..3a245005cb40 100644
--- a/tests/ci/codebrowser_check.py
+++ b/tests/ci/codebrowser_check.py
@@ -40,7 +40,7 @@ def get_run_command(repo_path, output_path, image):
if not os.path.exists(temp_path):
os.makedirs(temp_path)
- docker_image = get_image_with_version(IMAGES_PATH, "clickhouse/codebrowser")
+ docker_image = get_image_with_version(IMAGES_PATH, "altinityinfra/codebrowser")
s3_helper = S3Helper("https://s3.amazonaws.com")
result_path = os.path.join(temp_path, "result_path")
diff --git a/tests/ci/compatibility_check.py b/tests/ci/compatibility_check.py
index d546fabf2316..2351ef0c60a1 100644
--- a/tests/ci/compatibility_check.py
+++ b/tests/ci/compatibility_check.py
@@ -24,8 +24,8 @@
from stopwatch import Stopwatch
from rerun_helper import RerunHelper
-IMAGE_UBUNTU = "clickhouse/test-old-ubuntu"
-IMAGE_CENTOS = "clickhouse/test-old-centos"
+IMAGE_UBUNTU = "altinityinfra/test-old-ubuntu"
+IMAGE_CENTOS = "altinityinfra/test-old-centos"
MAX_GLIBC_VERSION = "2.4"
DOWNLOAD_RETRIES_COUNT = 5
CHECK_NAME = "Compatibility check (actions)"
@@ -197,4 +197,8 @@ def url_filter(url):
report_url,
CHECK_NAME,
)
+
ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events)
+
+ if state == "error":
+ sys.exit(1)
diff --git a/tests/ci/docker_images_check.py b/tests/ci/docker_images_check.py
index 8185229dfd69..9d5173648e35 100644
--- a/tests/ci/docker_images_check.py
+++ b/tests/ci/docker_images_check.py
@@ -106,22 +106,23 @@ def get_changed_docker_images(
str(files_changed),
)
- changed_images = []
-
- for dockerfile_dir, image_description in images_dict.items():
- for f in files_changed:
- if f.startswith(dockerfile_dir):
- name = image_description["name"]
- only_amd64 = image_description.get("only_amd64", False)
- logging.info(
- "Found changed file '%s' which affects "
- "docker image '%s' with path '%s'",
- f,
- name,
- dockerfile_dir,
- )
- changed_images.append(DockerImage(dockerfile_dir, name, only_amd64))
- break
+ # Rebuild all images
+ changed_images = [DockerImage(dockerfile_dir, image_description["name"], image_description.get("only_amd64", False)) for dockerfile_dir, image_description in images_dict.items()]
+
+ # for dockerfile_dir, image_description in images_dict.items():
+ # for f in files_changed:
+ # if f.startswith(dockerfile_dir):
+ # name = image_description["name"]
+ # only_amd64 = image_description.get("only_amd64", False)
+ # logging.info(
+ # "Found changed file '%s' which affects "
+ # "docker image '%s' with path '%s'",
+ # f,
+ # name,
+ # dockerfile_dir,
+ # )
+ # changed_images.append(DockerImage(dockerfile_dir, name, only_amd64))
+ # break
# The order is important: dependents should go later than bases, so that
# they are built with updated base versions.
@@ -253,6 +254,19 @@ def build_and_push_one_image(
f"--tag {image.repo}:{version_string} "
f"{cache_from} "
f"--cache-to type=inline,mode=max "
+ # FIXME: many tests utilize packages without specifying version, hence docker pulls :latest
+ # this will fail multiple jobs are going to be executed on different machines and
+ # push different images as latest.
+ # To fix it we may:
+ # - require jobs to be executed on same machine images were built (no parallelism)
+ # - change all the test's code (mostly docker-compose files in integration tests)
+ # that depend on said images and push version somehow into docker-compose.
+ # (and that is lots of work and many potential conflicts with upstream)
+ # - tag and push all images as :latest and then just pray that collisions are infrequent.
+ # and if even if collision happens, image is not that different and would still properly work.
+ # (^^^ CURRENT SOLUTION ^^^) But this is just a numbers game, it will blow up at some point.
+ # - do something crazy
+ f"--tag {image.repo}:latest "
f"{push_arg}"
f"--progress plain {image.full_path}"
)
@@ -261,6 +275,7 @@ def build_and_push_one_image(
retcode = proc.wait()
if retcode != 0:
+ logging.error("Building image {} failed with error: {}\n{}".format(image, retcode, ''.join(list(open(build_log, 'rt')))))
return False, build_log
logging.info("Processing of %s successfully finished", image.repo)
@@ -407,8 +422,8 @@ def main():
if args.push:
subprocess.check_output( # pylint: disable=unexpected-keyword-arg
- "docker login --username 'robotclickhouse' --password-stdin",
- input=get_parameter_from_ssm("dockerhub_robot_password"),
+ "docker login --username 'altinityinfra' --password-stdin",
+ input=get_parameter_from_ssm("dockerhub-password"),
encoding="utf-8",
shell=True,
)
diff --git a/tests/ci/docker_manifests_merge.py b/tests/ci/docker_manifests_merge.py
index 8bd50819877c..0d061bb0db33 100644
--- a/tests/ci/docker_manifests_merge.py
+++ b/tests/ci/docker_manifests_merge.py
@@ -173,8 +173,8 @@ def main():
args = parse_args()
if args.push:
subprocess.check_output( # pylint: disable=unexpected-keyword-arg
- "docker login --username 'robotclickhouse' --password-stdin",
- input=get_parameter_from_ssm("dockerhub_robot_password"),
+ "docker login --username 'altinityinfra' --password-stdin",
+ input=get_parameter_from_ssm("dockerhub-password"),
encoding="utf-8",
shell=True,
)
diff --git a/tests/ci/docker_pull_helper.py b/tests/ci/docker_pull_helper.py
index 54d48c588050..c1c0637411d2 100644
--- a/tests/ci/docker_pull_helper.py
+++ b/tests/ci/docker_pull_helper.py
@@ -5,6 +5,7 @@
import time
import subprocess
import logging
+import traceback
class DockerImage:
@@ -48,6 +49,7 @@ def get_images_with_versions(reports_path, required_image, pull=True):
docker_images.append(docker_image)
if pull:
+ latest_error = None
for docker_image in docker_images:
for i in range(10):
try:
@@ -60,7 +62,8 @@ def get_images_with_versions(reports_path, required_image, pull=True):
break
except Exception as ex:
time.sleep(i * 3)
- logging.info("Got execption pulling docker %s", ex)
+ logging.info("Got exception pulling docker %s", ex)
+ latest_error = traceback.format_exc()
else:
raise Exception(
f"Cannot pull dockerhub for image docker pull {docker_image} because of {latest_error}"
diff --git a/tests/ci/docker_server.py b/tests/ci/docker_server.py
index 4fbb1faee04a..4a03b49c5aa6 100644
--- a/tests/ci/docker_server.py
+++ b/tests/ci/docker_server.py
@@ -71,7 +71,7 @@ def parse_args() -> argparse.Namespace:
parser.add_argument(
"--image-repo",
type=str,
- default="clickhouse/clickhouse-server",
+ default="altinityinfra/clickhouse-server",
help="image name on docker hub",
)
parser.add_argument(
@@ -308,8 +308,8 @@ def main():
if args.push:
subprocess.check_output( # pylint: disable=unexpected-keyword-arg
- "docker login --username 'robotclickhouse' --password-stdin",
- input=get_parameter_from_ssm("dockerhub_robot_password"),
+ "docker login --username 'altinityinfra' --password-stdin",
+ input=get_parameter_from_ssm("dockerhub-password"),
encoding="utf-8",
shell=True,
)
diff --git a/tests/ci/docker_test.py b/tests/ci/docker_test.py
index 32df6d5f1d07..95ac61a0c1ee 100644
--- a/tests/ci/docker_test.py
+++ b/tests/ci/docker_test.py
@@ -37,61 +37,61 @@ def test_get_changed_docker_images(self):
self.maxDiff = None
expected = sorted(
[
- di.DockerImage("docker/test/base", "clickhouse/test-base", False),
- di.DockerImage("docker/docs/builder", "clickhouse/docs-builder", True),
+ di.DockerImage("docker/test/base", "altinityinfra/test-base", False),
+ di.DockerImage("docker/docs/builder", "altinityinfra/docs-builder", True),
di.DockerImage(
"docker/test/stateless",
- "clickhouse/stateless-test",
+ "altinityinfra/stateless-test",
False,
- "clickhouse/test-base",
+ "altinityinfra/test-base",
),
di.DockerImage(
"docker/test/integration/base",
- "clickhouse/integration-test",
+ "altinityinfra/integration-test",
False,
- "clickhouse/test-base",
- ),
- di.DockerImage(
- "docker/test/fuzzer",
- "clickhouse/fuzzer",
- False,
- "clickhouse/test-base",
+ "altinityinfra/test-base",
),
+ # di.DockerImage(
+ # "docker/test/fuzzer",
+ # "altinityinfra/fuzzer",
+ # False,
+ # "altinityinfra/test-base",
+ # ),
di.DockerImage(
"docker/test/keeper-jepsen",
- "clickhouse/keeper-jepsen-test",
- False,
- "clickhouse/test-base",
- ),
- di.DockerImage(
- "docker/docs/check",
- "clickhouse/docs-check",
- False,
- "clickhouse/docs-builder",
- ),
- di.DockerImage(
- "docker/docs/release",
- "clickhouse/docs-release",
+ "altinityinfra/keeper-jepsen-test",
False,
- "clickhouse/docs-builder",
+ "altinityinfra/test-base",
),
+ # di.DockerImage(
+ # "docker/docs/check",
+ # "altinityinfra/docs-check",
+ # False,
+ # "altinityinfra/docs-builder",
+ # ),
+ # di.DockerImage(
+ # "docker/docs/release",
+ # "altinityinfra/docs-release",
+ # False,
+ # "altinityinfra/docs-builder",
+ # ),
di.DockerImage(
"docker/test/stateful",
- "clickhouse/stateful-test",
+ "altinityinfra/stateful-test",
False,
- "clickhouse/stateless-test",
+ "altinityinfra/stateless-test",
),
di.DockerImage(
"docker/test/unit",
- "clickhouse/unit-test",
+ "altinityinfra/unit-test",
False,
- "clickhouse/stateless-test",
+ "altinityinfra/stateless-test",
),
di.DockerImage(
"docker/test/stress",
- "clickhouse/stress-test",
+ "altinityinfra/stress-test",
False,
- "clickhouse/stateful-test",
+ "altinityinfra/stateful-test",
),
]
)
diff --git a/tests/ci/docs_check.py b/tests/ci/docs_check.py
index 58678b160a4f..10168c9a77e8 100644
--- a/tests/ci/docs_check.py
+++ b/tests/ci/docs_check.py
@@ -50,7 +50,7 @@
if not os.path.exists(temp_path):
os.makedirs(temp_path)
- docker_image = get_image_with_version(temp_path, "clickhouse/docs-check")
+ docker_image = get_image_with_version(temp_path, "altinityinfra/docs-check")
test_output = os.path.join(temp_path, "docs_check_log")
if not os.path.exists(test_output):
@@ -114,4 +114,7 @@
report_url,
NAME,
)
+
ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events)
+ if status == "error":
+ sys.exit(1)
diff --git a/tests/ci/docs_release.py b/tests/ci/docs_release.py
index b6d47326f9b8..99ee9e0910d7 100644
--- a/tests/ci/docs_release.py
+++ b/tests/ci/docs_release.py
@@ -34,7 +34,7 @@
if not os.path.exists(temp_path):
os.makedirs(temp_path)
- docker_image = get_image_with_version(temp_path, "clickhouse/docs-release")
+ docker_image = get_image_with_version(temp_path, "altinityinfra/docs-release")
test_output = os.path.join(temp_path, "docs_release_log")
if not os.path.exists(test_output):
diff --git a/tests/ci/env_helper.py b/tests/ci/env_helper.py
index dd081523db11..d2ef7f397a27 100644
--- a/tests/ci/env_helper.py
+++ b/tests/ci/env_helper.py
@@ -11,7 +11,7 @@
CLOUDFLARE_TOKEN = os.getenv("CLOUDFLARE_TOKEN")
GITHUB_EVENT_PATH = os.getenv("GITHUB_EVENT_PATH", "")
GITHUB_JOB = os.getenv("GITHUB_JOB", "local")
-GITHUB_REPOSITORY = os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse")
+GITHUB_REPOSITORY = os.getenv("GITHUB_REPOSITORY", "Altinity/ClickHouse")
GITHUB_RUN_ID = os.getenv("GITHUB_RUN_ID", "0")
GITHUB_SERVER_URL = os.getenv("GITHUB_SERVER_URL", "https://github.com")
GITHUB_WORKSPACE = os.getenv("GITHUB_WORKSPACE", git_root)
@@ -20,5 +20,6 @@
REPORTS_PATH = os.getenv("REPORTS_PATH", p.abspath(p.join(module_dir, "./reports")))
REPO_COPY = os.getenv("REPO_COPY", git_root)
RUNNER_TEMP = os.getenv("RUNNER_TEMP", p.abspath(p.join(module_dir, "./tmp")))
-S3_BUILDS_BUCKET = os.getenv("S3_BUILDS_BUCKET", "clickhouse-builds")
-S3_TEST_REPORTS_BUCKET = os.getenv("S3_TEST_REPORTS_BUCKET", "clickhouse-test-reports")
+S3_BUILDS_BUCKET = os.getenv("S3_BUILDS_BUCKET", "altinity-build-artifacts")
+S3_TEST_REPORTS_BUCKET = os.getenv("S3_TEST_REPORTS_BUCKET", "altinity-build-artifacts")
+CLICKHOUSE_STABLE_VERSION_SUFFIX = os.getenv("CLICKHOUSE_STABLE_VERSION_SUFFIX", "stable")
diff --git a/tests/ci/fast_test_check.py b/tests/ci/fast_test_check.py
index 64e045947864..df5464a7dace 100644
--- a/tests/ci/fast_test_check.py
+++ b/tests/ci/fast_test_check.py
@@ -98,7 +98,7 @@ def process_results(result_folder):
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
- docker_image = get_image_with_version(temp_path, "clickhouse/fasttest")
+ docker_image = get_image_with_version(temp_path, "altinityinfra/fasttest")
s3_helper = S3Helper("https://s3.amazonaws.com")
@@ -208,7 +208,7 @@ def process_results(result_folder):
# Refuse other checks to run if fast test failed
if state != "success":
- if "force-tests" in pr_info.labels:
+ if "force-tests" in pr_info.labels and state != "error":
print("'force-tests' enabled, will report success")
else:
sys.exit(1)
diff --git a/tests/ci/functional_test_check.py b/tests/ci/functional_test_check.py
index 6113bfdf0cdf..2b8a52b4db43 100644
--- a/tests/ci/functional_test_check.py
+++ b/tests/ci/functional_test_check.py
@@ -48,9 +48,9 @@ def get_additional_envs(check_name, run_by_hash_num, run_by_hash_total):
def get_image_name(check_name):
if "stateless" in check_name.lower():
- return "clickhouse/stateless-test"
+ return "altinityinfra/stateless-test"
if "stateful" in check_name.lower():
- return "clickhouse/stateful-test"
+ return "altinityinfra/stateful-test"
else:
raise Exception(f"Cannot deduce image name based on check name {check_name}")
@@ -190,10 +190,12 @@ def process_results(result_folder, server_log_path):
run_by_hash_total = 0
check_name_with_group = check_name
- rerun_helper = RerunHelper(gh, pr_info, check_name_with_group)
- if rerun_helper.is_already_finished_by_status():
- logging.info("Check is already finished according to github status, exiting")
- sys.exit(0)
+ # Always re-run, even if it finished in previous run.
+ # gh = Github(get_best_robot_token())
+ # rerun_helper = RerunHelper(gh, pr_info, check_name_with_group)
+ # if rerun_helper.is_already_finished_by_status():
+ # logging.info("Check is already finished according to github status, exiting")
+ # sys.exit(0)
if not os.path.exists(temp_path):
os.makedirs(temp_path)
diff --git a/tests/ci/get_robot_token.py b/tests/ci/get_robot_token.py
index cb79d9ae01ac..10d742083f56 100644
--- a/tests/ci/get_robot_token.py
+++ b/tests/ci/get_robot_token.py
@@ -9,7 +9,14 @@ def get_parameter_from_ssm(name, decrypt=True, client=None):
return client.get_parameter(Name=name, WithDecryption=decrypt)["Parameter"]["Value"]
-def get_best_robot_token(token_prefix_env_name="github_robot_token_", total_tokens=4):
+# Original CI code uses the "_original" version of this method. Each robot token is rate limited
+# and the original implementation selects the "best one". To make it simpler and iterate faster,
+# we are using only one robot and keeping the method signature. In the future we might reconsider
+# having multiple robot tokens
+def get_best_robot_token(token_prefix_env_name="github_robot_token", total_tokens=4):
+ return get_parameter_from_ssm(token_prefix_env_name)
+
+def get_best_robot_token_original(token_prefix_env_name="github_robot_token_", total_tokens=4):
client = boto3.client("ssm", region_name="us-east-1")
tokens = {}
for i in range(1, total_tokens + 1):
diff --git a/tests/ci/git_helper.py b/tests/ci/git_helper.py
index 50414ffb470d..18d0cbf3840c 100644
--- a/tests/ci/git_helper.py
+++ b/tests/ci/git_helper.py
@@ -9,7 +9,7 @@
# \A and \Z match only start and end of the whole string
RELEASE_BRANCH_REGEXP = r"\A\d+[.]\d+\Z"
TAG_REGEXP = (
- r"\Av\d{2}[.][1-9]\d*[.][1-9]\d*[.][1-9]\d*-(testing|prestable|stable|lts)\Z"
+ r"\Av\d{2}[.][1-9]\d*[.][1-9]\d*[.][1-9]\d*-(testing|prestable|stable|lts|altinitystable)\Z"
)
SHA_REGEXP = r"\A([0-9]|[a-f]){40}\Z"
diff --git a/tests/ci/git_test.py b/tests/ci/git_test.py
index 785c9b62ccef..69371af40b34 100644
--- a/tests/ci/git_test.py
+++ b/tests/ci/git_test.py
@@ -57,6 +57,9 @@ def test_tags(self):
with self.assertRaises(Exception):
setattr(self.git, tag_attr, tag)
+ def check_tag(self):
+ self.git.check_tag("v21.12.333.4567-altinitystable")
+
def test_tweak(self):
self.git.commits_since_tag = 0
self.assertEqual(self.git.tweak, 1)
@@ -66,3 +69,6 @@ def test_tweak(self):
self.assertEqual(self.git.tweak, 22224)
self.git.commits_since_tag = 0
self.assertEqual(self.git.tweak, 22222)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/ci/integration_test_check.py b/tests/ci/integration_test_check.py
index 4ee0c8349746..24d5eda5477c 100644
--- a/tests/ci/integration_test_check.py
+++ b/tests/ci/integration_test_check.py
@@ -30,17 +30,17 @@
# When update, update
# integration/ci-runner.py:ClickhouseIntegrationTestsRunner.get_images_names too
IMAGES = [
- "clickhouse/integration-tests-runner",
- "clickhouse/mysql-golang-client",
- "clickhouse/mysql-java-client",
- "clickhouse/mysql-js-client",
- "clickhouse/mysql-php-client",
- "clickhouse/postgresql-java-client",
- "clickhouse/integration-test",
- "clickhouse/kerberos-kdc",
- "clickhouse/kerberized-hadoop",
- "clickhouse/integration-helper",
- "clickhouse/dotnet-client",
+ "altinityinfra/integration-tests-runner",
+ "altinityinfra/mysql-golang-client",
+ "altinityinfra/mysql-java-client",
+ "altinityinfra/mysql-js-client",
+ "altinityinfra/mysql-php-client",
+ "altinityinfra/postgresql-java-client",
+ "altinityinfra/integration-test",
+ "altinityinfra/kerberos-kdc",
+ "altinityinfra/kerberized-hadoop",
+ "altinityinfra/integration-helper",
+ "altinityinfra/dotnet-client",
]
@@ -146,10 +146,12 @@ def process_results(result_folder):
gh = Github(get_best_robot_token())
- rerun_helper = RerunHelper(gh, pr_info, check_name_with_group)
- if rerun_helper.is_already_finished_by_status():
- logging.info("Check is already finished according to github status, exiting")
- sys.exit(0)
+ # Always re-run, even if it finished in previous run.
+ # gh = Github(get_best_robot_token())
+ # rerun_helper = RerunHelper(gh, pr_info, check_name_with_group)
+ # if rerun_helper.is_already_finished_by_status():
+ # logging.info("Check is already finished according to github status, exiting")
+ # sys.exit(0)
images = get_images_with_versions(reports_path, IMAGES)
images_with_versions = {i.name: i.version for i in images}
@@ -233,4 +235,8 @@ def process_results(result_folder):
report_url,
check_name_with_group,
)
+
ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events)
+
+ if state == "error":
+ sys.exit(1)
diff --git a/tests/ci/keeper_jepsen_check.py b/tests/ci/keeper_jepsen_check.py
index b0ec1e7ba8bf..98af355ab6d1 100644
--- a/tests/ci/keeper_jepsen_check.py
+++ b/tests/ci/keeper_jepsen_check.py
@@ -26,7 +26,7 @@
JEPSEN_GROUP_NAME = "jepsen_group"
DESIRED_INSTANCE_COUNT = 3
-IMAGE_NAME = "clickhouse/keeper-jepsen-test"
+IMAGE_NAME = "altinityinfra/keeper-jepsen-test"
CHECK_NAME = "ClickHouse Keeper Jepsen (actions)"
diff --git a/tests/ci/performance_comparison_check.py b/tests/ci/performance_comparison_check.py
index c6ce86b2ce10..bd0ca267ddd8 100644
--- a/tests/ci/performance_comparison_check.py
+++ b/tests/ci/performance_comparison_check.py
@@ -20,7 +20,7 @@
from tee_popen import TeePopen
from rerun_helper import RerunHelper
-IMAGE_NAME = "clickhouse/performance-comparison"
+IMAGE_NAME = "altinityinfra/performance-comparison"
def get_run_command(
@@ -217,3 +217,6 @@ def __exit__(self, exc_type, exc_val, exc_tb):
post_commit_status(
gh, pr_info.sha, check_name_with_group, message, status, report_url
)
+
+ if status == "error":
+ sys.exit(1)
diff --git a/tests/ci/release.py b/tests/ci/release.py
index 89182dc7428e..2fdd4c8b1742 100755
--- a/tests/ci/release.py
+++ b/tests/ci/release.py
@@ -400,7 +400,7 @@ def parse_args() -> argparse.Namespace:
)
parser.add_argument(
"--repo",
- default="ClickHouse/ClickHouse",
+ default="Altinity/ClickHouse",
help="repository to create the release",
)
parser.add_argument(
diff --git a/tests/ci/run_check.py b/tests/ci/run_check.py
index 9c7ba13f8e4d..ef96dc03e18d 100644
--- a/tests/ci/run_check.py
+++ b/tests/ci/run_check.py
@@ -78,6 +78,7 @@
"ilejn", # Arenadata, responsible for Kerberized Kafka
"thomoco", # ClickHouse
"BoloniniD", # Seasoned contributor, HSE
+ "arthurpassos" # Altinity
]
}
diff --git a/tests/ci/split_build_smoke_check.py b/tests/ci/split_build_smoke_check.py
index 41ba6c2fedb3..39561a311875 100644
--- a/tests/ci/split_build_smoke_check.py
+++ b/tests/ci/split_build_smoke_check.py
@@ -20,7 +20,7 @@
from rerun_helper import RerunHelper
-DOCKER_IMAGE = "clickhouse/split-build-smoke-test"
+DOCKER_IMAGE = "altinityinfra/split-build-smoke-test"
DOWNLOAD_RETRIES_COUNT = 5
RESULT_LOG_NAME = "run.log"
CHECK_NAME = "Split build smoke test (actions)"
@@ -147,4 +147,8 @@ def get_run_command(build_path, result_folder, server_log_folder, docker_image):
report_url,
CHECK_NAME,
)
+
ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events)
+
+ if state == "error":
+ sys.exit(1)
diff --git a/tests/ci/stress_check.py b/tests/ci/stress_check.py
index b95bf4b8aba0..7c110daf03e4 100644
--- a/tests/ci/stress_check.py
+++ b/tests/ci/stress_check.py
@@ -114,7 +114,7 @@ def process_results(result_folder, server_log_path, run_log_path):
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
- docker_image = get_image_with_version(reports_path, "clickhouse/stress-test")
+ docker_image = get_image_with_version(reports_path, "altinityinfra/stress-test")
packages_path = os.path.join(temp_path, "packages")
if not os.path.exists(packages_path):
@@ -175,3 +175,6 @@ def process_results(result_folder, server_log_path, run_log_path):
check_name,
)
ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events)
+
+ if state == "error":
+ sys.exit(1)
diff --git a/tests/ci/style_check.py b/tests/ci/style_check.py
index 1b3037217c83..2260c4d7f924 100644
--- a/tests/ci/style_check.py
+++ b/tests/ci/style_check.py
@@ -83,7 +83,7 @@ def process_result(result_folder):
if not os.path.exists(temp_path):
os.makedirs(temp_path)
- docker_image = get_image_with_version(temp_path, "clickhouse/style-test")
+ docker_image = get_image_with_version(temp_path, "altinityinfra/style-test")
s3_helper = S3Helper("https://s3.amazonaws.com")
cmd = (
@@ -118,3 +118,6 @@ def process_result(result_folder):
NAME,
)
ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events)
+
+ if state == "error":
+ sys.exit(1)
diff --git a/tests/ci/tests/docker_images.json b/tests/ci/tests/docker_images.json
index ca5c516bccba..53ad258f6ec9 100644
--- a/tests/ci/tests/docker_images.json
+++ b/tests/ci/tests/docker_images.json
@@ -1,10 +1,10 @@
{
"docker/packager/deb": {
- "name": "clickhouse/deb-builder",
+ "name": "altinityinfra/deb-builder",
"dependent": []
},
"docker/packager/binary": {
- "name": "clickhouse/binary-builder",
+ "name": "altinityinfra/binary-builder",
"dependent": [
"docker/test/split_build_smoke_test",
"docker/test/pvs",
@@ -12,156 +12,112 @@
]
},
"docker/test/compatibility/centos": {
- "name": "clickhouse/test-old-centos",
+ "name": "altinityinfra/test-old-centos",
"dependent": []
},
"docker/test/compatibility/ubuntu": {
- "name": "clickhouse/test-old-ubuntu",
+ "name": "altinityinfra/test-old-ubuntu",
"dependent": []
},
"docker/test/integration/base": {
- "name": "clickhouse/integration-test",
- "dependent": []
- },
- "docker/test/fuzzer": {
- "name": "clickhouse/fuzzer",
- "dependent": []
- },
- "docker/test/performance-comparison": {
- "name": "clickhouse/performance-comparison",
- "dependent": []
- },
- "docker/test/pvs": {
- "name": "clickhouse/pvs-test",
+ "name": "altinityinfra/integration-test",
"dependent": []
},
"docker/test/util": {
- "name": "clickhouse/test-util",
+ "name": "altinityinfra/test-util",
"dependent": [
"docker/test/base",
"docker/test/fasttest"
]
},
"docker/test/stateless": {
- "name": "clickhouse/stateless-test",
+ "name": "altinityinfra/stateless-test",
"dependent": [
"docker/test/stateful",
"docker/test/unit"
]
},
"docker/test/stateful": {
- "name": "clickhouse/stateful-test",
+ "name": "altinityinfra/stateful-test",
"dependent": [
"docker/test/stress"
]
},
"docker/test/unit": {
- "name": "clickhouse/unit-test",
- "dependent": []
- },
- "docker/test/stress": {
- "name": "clickhouse/stress-test",
- "dependent": []
- },
- "docker/test/split_build_smoke_test": {
- "name": "clickhouse/split-build-smoke-test",
- "dependent": []
- },
- "docker/test/codebrowser": {
- "name": "clickhouse/codebrowser",
+ "name": "altinityinfra/unit-test",
"dependent": []
},
"docker/test/integration/runner": {
- "name": "clickhouse/integration-tests-runner",
+ "name": "altinityinfra/integration-tests-runner",
"dependent": []
},
"docker/test/testflows/runner": {
- "name": "clickhouse/testflows-runner",
+ "name": "altinityinfra/testflows-runner",
"dependent": []
},
"docker/test/fasttest": {
- "name": "clickhouse/fasttest",
- "dependent": []
- },
- "docker/test/style": {
- "name": "clickhouse/style-test",
+ "name": "altinityinfra/fasttest",
"dependent": []
},
"docker/test/integration/s3_proxy": {
- "name": "clickhouse/s3-proxy",
+ "name": "altinityinfra/s3-proxy",
"dependent": []
},
"docker/test/integration/resolver": {
- "name": "clickhouse/python-bottle",
+ "name": "altinityinfra/python-bottle",
"dependent": []
},
"docker/test/integration/helper_container": {
- "name": "clickhouse/integration-helper",
+ "name": "altinityinfra/integration-helper",
"dependent": []
},
"docker/test/integration/mysql_golang_client": {
- "name": "clickhouse/mysql-golang-client",
+ "name": "altinityinfra/mysql-golang-client",
"dependent": []
},
"docker/test/integration/dotnet_client": {
- "name": "clickhouse/dotnet-client",
+ "name": "altinityinfra/dotnet-client",
"dependent": []
},
"docker/test/integration/mysql_java_client": {
- "name": "clickhouse/mysql-java-client",
+ "name": "altinityinfra/mysql-java-client",
"dependent": []
},
"docker/test/integration/mysql_js_client": {
- "name": "clickhouse/mysql-js-client",
+ "name": "altinityinfra/mysql-js-client",
"dependent": []
},
"docker/test/integration/mysql_php_client": {
- "name": "clickhouse/mysql-php-client",
+ "name": "altinityinfra/mysql-php-client",
"dependent": []
},
"docker/test/integration/postgresql_java_client": {
- "name": "clickhouse/postgresql-java-client",
+ "name": "altinityinfra/postgresql-java-client",
"dependent": []
},
"docker/test/integration/kerberos_kdc": {
- "name": "clickhouse/kerberos-kdc",
+ "name": "altinityinfra/kerberos-kdc",
"dependent": []
},
"docker/test/base": {
- "name": "clickhouse/test-base",
- "dependent": [
+ "name": "altinityinfra/test-base",
+ "dependent": [
"docker/test/stateless",
"docker/test/integration/base",
"docker/test/fuzzer",
"docker/test/keeper-jepsen"
- ]
+ ]
},
"docker/test/integration/kerberized_hadoop": {
- "name": "clickhouse/kerberized-hadoop",
+ "name": "altinityinfra/kerberized-hadoop",
"dependent": []
},
"docker/test/sqlancer": {
- "name": "clickhouse/sqlancer-test",
+ "name": "altinityinfra/sqlancer-test",
"dependent": []
},
"docker/test/keeper-jepsen": {
- "name": "clickhouse/keeper-jepsen-test",
- "dependent": []
- },
- "docker/docs/builder": {
- "name": "clickhouse/docs-builder",
- "only_amd64": true,
- "dependent": [
- "docker/docs/check",
- "docker/docs/release"
- ]
- },
- "docker/docs/check": {
- "name": "clickhouse/docs-check",
- "dependent": []
- },
- "docker/docs/release": {
- "name": "clickhouse/docs-release",
+ "name": "altinityinfra/keeper-jepsen-test",
"dependent": []
}
}
diff --git a/tests/ci/unit_tests_check.py b/tests/ci/unit_tests_check.py
index 84c4faa822db..b2a1f837faa7 100644
--- a/tests/ci/unit_tests_check.py
+++ b/tests/ci/unit_tests_check.py
@@ -25,7 +25,7 @@
from tee_popen import TeePopen
-IMAGE_NAME = "clickhouse/unit-test"
+IMAGE_NAME = "altinityinfra/unit-test"
def get_test_name(line):
@@ -173,4 +173,8 @@ def process_result(result_folder):
report_url,
check_name,
)
+
ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events)
+
+ if state == "error":
+ sys.exit(1)
diff --git a/tests/ci/version_helper.py b/tests/ci/version_helper.py
index 9c67191e4c3c..a82fcd97ea76 100755
--- a/tests/ci/version_helper.py
+++ b/tests/ci/version_helper.py
@@ -48,6 +48,7 @@ def __init__(
revision: Union[int, str],
git: Git,
tweak: str = None,
+ flavour: str = None,
):
self._major = int(major)
self._minor = int(minor)
@@ -58,6 +59,7 @@ def __init__(
if tweak is not None:
self._tweak = int(tweak)
self._describe = ""
+ self._flavour = flavour
def update(self, part: str) -> "ClickHouseVersion":
"""If part is valid, returns a new version"""
@@ -107,9 +109,12 @@ def describe(self):
@property
def string(self):
- return ".".join(
+ version_as_string = ".".join(
(str(self.major), str(self.minor), str(self.patch), str(self.tweak))
)
+ if self._flavour:
+ version_as_string = f"{version_as_string}.{self._flavour}"
+ return version_as_string
def as_dict(self) -> VERSIONS:
return {
@@ -129,7 +134,10 @@ def as_tuple(self) -> Tuple[int, int, int, int]:
def with_description(self, version_type):
if version_type not in VersionType.VALID:
raise ValueError(f"version type {version_type} not in {VersionType.VALID}")
- self._describe = f"v{self.string}-{version_type}"
+ if version_type == self._flavour:
+ self._describe = f"v{self.string}"
+ else:
+ self._describe = f"v{self.string}-{version_type}"
def __eq__(self, other) -> bool:
if not isinstance(self, type(other)):
@@ -157,7 +165,7 @@ def __le__(self, other: "ClickHouseVersion") -> bool:
class VersionType:
LTS = "lts"
PRESTABLE = "prestable"
- STABLE = "stable"
+ STABLE = "altinitystable"
TESTING = "testing"
VALID = (TESTING, PRESTABLE, STABLE, LTS)
@@ -205,6 +213,9 @@ def get_version_from_repo(
versions["patch"],
versions["revision"],
git,
+ # Explicitly use tweak value from version file
+ tweak=versions.get("tweak", versions["revision"]),
+ flavour=versions["flavour"]
)
@@ -278,7 +289,7 @@ def update_contributors(
cfd.write(content)
-def update_version_local(version, version_type="testing"):
+def update_version_local(version : ClickHouseVersion, version_type="testing"):
update_contributors()
version.with_description(version_type)
update_cmake_version(version)
diff --git a/tests/integration/ci-runner.py b/tests/integration/ci-runner.py
index a301869319df..da8346f55165 100755
--- a/tests/integration/ci-runner.py
+++ b/tests/integration/ci-runner.py
@@ -98,6 +98,7 @@ def get_counters(fname):
# Lines like:
# [gw0] [ 7%] ERROR test_mysql_protocol/test.py::test_golang_client
+ # [gw3] [ 40%] PASSED test_replicated_users/test.py::test_rename_replicated[QUOTA]
state = line_arr[-2]
test_name = line_arr[-1]
@@ -255,17 +256,17 @@ def shuffle_test_groups(self):
@staticmethod
def get_images_names():
return [
- "clickhouse/dotnet-client",
- "clickhouse/integration-helper",
- "clickhouse/integration-test",
- "clickhouse/integration-tests-runner",
- "clickhouse/kerberized-hadoop",
- "clickhouse/kerberos-kdc",
- "clickhouse/mysql-golang-client",
- "clickhouse/mysql-java-client",
- "clickhouse/mysql-js-client",
- "clickhouse/mysql-php-client",
- "clickhouse/postgresql-java-client",
+ "altinityinfra/dotnet-client",
+ "altinityinfra/integration-helper",
+ "altinityinfra/integration-test",
+ "altinityinfra/integration-tests-runner",
+ "altinityinfra/kerberized-hadoop",
+ "altinityinfra/kerberos-kdc",
+ "altinityinfra/mysql-golang-client",
+ "altinityinfra/mysql-java-client",
+ "altinityinfra/mysql-js-client",
+ "altinityinfra/mysql-php-client",
+ "altinityinfra/postgresql-java-client",
]
def _can_run_with(self, path, opt):
@@ -462,7 +463,7 @@ def _get_runner_image_cmd(self, repo_path):
"--docker-image-version",
):
for img in self.get_images_names():
- if img == "clickhouse/integration-tests-runner":
+ if img == "altinityinfra/integration-tests-runner":
runner_version = self.get_image_version(img)
logging.info(
"Can run with custom docker image version %s", runner_version
@@ -905,6 +906,16 @@ def run_impl(self, repo_path, build_path):
if "(memory)" in self.params["context_name"]:
result_state = "success"
+ for res in test_result:
+ # It's not easy to parse output of pytest
+ # Especially when test names may contain spaces
+ # Do not allow it to avoid obscure failures
+ if " " not in res[0]:
+ continue
+ logging.warning("Found invalid test name with space: %s", res[0])
+ status_text = "Found test with invalid name, see main log"
+ result_state = "failure"
+
return result_state, status_text, test_result, []
diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py
index d0b5e892f5b7..a32321c8dc20 100644
--- a/tests/integration/helpers/cluster.py
+++ b/tests/integration/helpers/cluster.py
@@ -16,21 +16,29 @@
import urllib.parse
import shlex
import urllib3
-
-from cassandra.policies import RoundRobinPolicy
-import cassandra.cluster
-import psycopg2
-import pymongo
-import pymysql
import requests
-from confluent_kafka.avro.cached_schema_registry_client import (
- CachedSchemaRegistryClient,
-)
+
+try:
+ # Please, add modules that required for specific tests only here.
+ # So contributors will be able to run most tests locally
+ # without installing tons of unneeded packages that may be not so easy to install.
+ from cassandra.policies import RoundRobinPolicy
+ import cassandra.cluster
+ import psycopg2
+ from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
+ import pymongo
+ import pymysql
+ from confluent_kafka.avro.cached_schema_registry_client import (
+ CachedSchemaRegistryClient,
+ )
+ import meilisearch
+except Exception as e:
+ logging.warning(f"Cannot import some modules, some tests may not work: {e}")
+
from dict2xml import dict2xml
from kazoo.client import KazooClient
from kazoo.exceptions import KazooException
from minio import Minio
-from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from helpers.test_tools import assert_eq_with_retry, exec_query_with_retry
from helpers import pytest_xdist_logging_to_separate_files
@@ -689,7 +697,7 @@ def setup_keeper_cmd(self, instance, env_variables, docker_compose_yml_dir):
binary_path = binary_path[: -len("-server")]
env_variables["keeper_binary"] = binary_path
- env_variables["image"] = "clickhouse/integration-test:" + self.docker_base_tag
+ env_variables["image"] = "altinityinfra/integration-test:" + self.docker_base_tag
env_variables["user"] = str(os.getuid())
env_variables["keeper_fs"] = "bind"
for i in range(1, 4):
@@ -1169,7 +1177,7 @@ def add_instance(
with_hive=False,
hostname=None,
env_variables=None,
- image="clickhouse/integration-test",
+ image="altinityinfra/integration-test",
tag=None,
stay_alive=False,
ipv4_address=None,
@@ -2643,7 +2651,7 @@ def __init__(
copy_common_configs=True,
hostname=None,
env_variables=None,
- image="clickhouse/integration-test",
+ image="altinityinfra/integration-test",
tag="latest",
stay_alive=False,
ipv4_address=None,
diff --git a/tests/integration/helpers/network.py b/tests/integration/helpers/network.py
index 63fb2065f9df..c3829e160e2b 100644
--- a/tests/integration/helpers/network.py
+++ b/tests/integration/helpers/network.py
@@ -248,7 +248,7 @@ def _ensure_container(self):
time.sleep(i)
image = subprocess.check_output(
- "docker images -q clickhouse/integration-helper 2>/dev/null", shell=True
+ "docker images -q altinityinfra/integration-helper 2>/dev/null", shell=True
)
if not image.strip():
print("No network image helper, will try download")
@@ -257,16 +257,16 @@ def _ensure_container(self):
for i in range(5):
try:
subprocess.check_call( # STYLE_CHECK_ALLOW_SUBPROCESS_CHECK_CALL
- "docker pull clickhouse/integration-helper", shell=True
+ "docker pull altinityinfra/integration-helper", shell=True
)
break
except:
time.sleep(i)
else:
- raise Exception("Cannot pull clickhouse/integration-helper image")
+ raise Exception("Cannot pull altinityinfra/integration-helper image")
self._container = self._docker_client.containers.run(
- "clickhouse/integration-helper",
+ "altinityinfra/integration-helper",
auto_remove=True,
command=("sleep %s" % self.container_exit_timeout),
# /run/xtables.lock passed inside for correct iptables --wait
diff --git a/tests/integration/runner b/tests/integration/runner
index 5a168eeea250..8666258d4851 100755
--- a/tests/integration/runner
+++ b/tests/integration/runner
@@ -19,7 +19,7 @@ CONFIG_DIR_IN_REPO = "programs/server"
INTEGRATION_DIR_IN_REPO = "tests/integration"
SRC_DIR_IN_REPO = "src"
-DIND_INTEGRATION_TESTS_IMAGE_NAME = "clickhouse/integration-tests-runner"
+DIND_INTEGRATION_TESTS_IMAGE_NAME = "altinityinfra/integration-tests-runner"
def check_args_and_update_paths(args):
if args.clickhouse_root:
@@ -226,23 +226,23 @@ if __name__ == "__main__":
if args.docker_compose_images_tags is not None:
for img_tag in args.docker_compose_images_tags:
[image, tag] = img_tag.split(":")
- if image == "clickhouse/mysql-golang-client":
+ if image == "altinityinfra/mysql-golang-client":
env_tags += "-e {}={} ".format("DOCKER_MYSQL_GOLANG_CLIENT_TAG", tag)
- elif image == "clickhouse/dotnet-client":
+ elif image == "altinityinfra/dotnet-client":
env_tags += "-e {}={} ".format("DOCKER_DOTNET_CLIENT_TAG", tag)
- elif image == "clickhouse/mysql-java-client":
+ elif image == "altinityinfra/mysql-java-client":
env_tags += "-e {}={} ".format("DOCKER_MYSQL_JAVA_CLIENT_TAG", tag)
- elif image == "clickhouse/mysql-js-client":
+ elif image == "altinityinfra/mysql-js-client":
env_tags += "-e {}={} ".format("DOCKER_MYSQL_JS_CLIENT_TAG", tag)
- elif image == "clickhouse/mysql-php-client":
+ elif image == "altinityinfra/mysql-php-client":
env_tags += "-e {}={} ".format("DOCKER_MYSQL_PHP_CLIENT_TAG", tag)
- elif image == "clickhouse/postgresql-java-client":
+ elif image == "altinityinfra/postgresql-java-client":
env_tags += "-e {}={} ".format("DOCKER_POSTGRESQL_JAVA_CLIENT_TAG", tag)
- elif image == "clickhouse/integration-test":
+ elif image == "altinityinfra/integration-test":
env_tags += "-e {}={} ".format("DOCKER_BASE_TAG", tag)
- elif image == "clickhouse/kerberized-hadoop":
+ elif image == "altinityinfra/kerberized-hadoop":
env_tags += "-e {}={} ".format("DOCKER_KERBERIZED_HADOOP_TAG", tag)
- elif image == "clickhouse/kerberos-kdc":
+ elif image == "altinityinfra/kerberos-kdc":
env_tags += "-e {}={} ".format("DOCKER_KERBEROS_KDC_TAG", tag)
else:
logging.info("Unknown image %s" % (image))
diff --git a/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml b/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml
index 2f1b8275a0bb..a6e2d29c5d57 100644
--- a/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml
+++ b/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml
@@ -15,6 +15,13 @@
minio123
10
+
+ s3
+ http://resolver:8082/root/data/
+ minio
+ minio123
+ 10
+
local
/
@@ -46,6 +53,13 @@
+
+
+
+ no_delete_objects_s3
+
+
+
diff --git a/tests/integration/test_merge_tree_s3/s3_mocks/no_delete_objects.py b/tests/integration/test_merge_tree_s3/s3_mocks/no_delete_objects.py
new file mode 100644
index 000000000000..111f3a490c2b
--- /dev/null
+++ b/tests/integration/test_merge_tree_s3/s3_mocks/no_delete_objects.py
@@ -0,0 +1,92 @@
+import http.client
+import http.server
+import random
+import socketserver
+import sys
+import urllib.parse
+
+
+UPSTREAM_HOST = "minio1:9001"
+random.seed("No delete objects/1.0")
+
+
+def request(command, url, headers={}, data=None):
+ """Mini-requests."""
+
+ class Dummy:
+ pass
+
+ parts = urllib.parse.urlparse(url)
+ c = http.client.HTTPConnection(parts.hostname, parts.port)
+ c.request(
+ command,
+ urllib.parse.urlunparse(parts._replace(scheme="", netloc="")),
+ headers=headers,
+ body=data,
+ )
+ r = c.getresponse()
+ result = Dummy()
+ result.status_code = r.status
+ result.headers = r.headers
+ result.content = r.read()
+ return result
+
+
+class RequestHandler(http.server.BaseHTTPRequestHandler):
+ def do_GET(self):
+ if self.path == "/":
+ self.send_response(200)
+ self.send_header("Content-Type", "text/plain")
+ self.end_headers()
+ self.wfile.write(b"OK")
+ else:
+ self.do_HEAD()
+
+ def do_PUT(self):
+ self.do_HEAD()
+
+ def do_DELETE(self):
+ self.do_HEAD()
+
+ def do_POST(self):
+ query = urllib.parse.urlparse(self.path).query
+ params = urllib.parse.parse_qs(query, keep_blank_values=True)
+ if "delete" in params:
+ self.send_response(501)
+ self.send_header("Content-Type", "application/xml")
+ self.end_headers()
+ self.wfile.write(
+ b"""
+
+ NotImplemented
+ Ima GCP and I can't do `DeleteObjects` request for ya. See https://issuetracker.google.com/issues/162653700 .
+ RESOURCE
+ REQUEST_ID
+"""
+ )
+ else:
+ self.do_HEAD()
+
+ def do_HEAD(self):
+ content_length = self.headers.get("Content-Length")
+ data = self.rfile.read(int(content_length)) if content_length else None
+ r = request(
+ self.command,
+ f"http://{UPSTREAM_HOST}{self.path}",
+ headers=self.headers,
+ data=data,
+ )
+ self.send_response(r.status_code)
+ for k, v in r.headers.items():
+ self.send_header(k, v)
+ self.end_headers()
+ self.wfile.write(r.content)
+ self.wfile.close()
+
+
+class ThreadedHTTPServer(socketserver.ThreadingMixIn, http.server.HTTPServer):
+ """Handle requests in a separate thread."""
+
+
+httpd = ThreadedHTTPServer(("0.0.0.0", int(sys.argv[1])), RequestHandler)
+httpd.serve_forever()
diff --git a/tests/integration/test_merge_tree_s3/test.py b/tests/integration/test_merge_tree_s3/test.py
index b7ef3ce3ef2e..b04b02246132 100644
--- a/tests/integration/test_merge_tree_s3/test.py
+++ b/tests/integration/test_merge_tree_s3/test.py
@@ -67,7 +67,10 @@ def create_table(node, table_name, **additional_settings):
def run_s3_mocks(cluster):
logging.info("Starting s3 mocks")
- mocks = (("unstable_proxy.py", "resolver", "8081"),)
+ mocks = (
+ ("unstable_proxy.py", "resolver", "8081"),
+ ("no_delete_objects.py", "resolver", "8082"),
+ )
for mock_filename, container, port in mocks:
container_id = cluster.get_container_id(container)
current_dir = os.path.dirname(__file__)
@@ -602,6 +605,15 @@ def restart_disk():
thread.join()
+@pytest.mark.parametrize("node_name", ["node"])
+def test_s3_no_delete_objects(cluster, node_name):
+ node = cluster.instances[node_name]
+ create_table(
+ node, "s3_test_no_delete_objects", storage_policy="no_delete_objects_s3"
+ )
+ node.query("DROP TABLE s3_test_no_delete_objects SYNC")
+
+
@pytest.mark.parametrize("node_name", ["node"])
def test_s3_disk_reads_on_unstable_connection(cluster, node_name):
node = cluster.instances[node_name]
diff --git a/tests/integration/test_replicated_users/test.py b/tests/integration/test_replicated_users/test.py
index add45d262e63..56383f0d2dfb 100644
--- a/tests/integration/test_replicated_users/test.py
+++ b/tests/integration/test_replicated_users/test.py
@@ -41,7 +41,7 @@ class Entity:
def get_entity_id(entity):
- return entity.keyword
+ return entity.keyword.replace(" ", "_")
@pytest.mark.parametrize("entity", entities, ids=get_entity_id)
diff --git a/tests/integration/test_s3_zero_copy_replication/test.py b/tests/integration/test_s3_zero_copy_replication/test.py
index d7aa4feb1d2f..1ce1047ebec9 100644
--- a/tests/integration/test_s3_zero_copy_replication/test.py
+++ b/tests/integration/test_s3_zero_copy_replication/test.py
@@ -361,6 +361,8 @@ def test_s3_zero_copy_with_ttl_delete(cluster, large_data, iterations):
)
node1.query("OPTIMIZE TABLE ttl_delete_test FINAL")
+
+ node1.query("SYSTEM SYNC REPLICA ttl_delete_test")
node2.query("SYSTEM SYNC REPLICA ttl_delete_test")
if large_data:
diff --git a/tests/integration/test_s3_zero_copy_ttl/test.py b/tests/integration/test_s3_zero_copy_ttl/test.py
index 14b4664fcc14..9a782aacef6b 100644
--- a/tests/integration/test_s3_zero_copy_ttl/test.py
+++ b/tests/integration/test_s3_zero_copy_ttl/test.py
@@ -68,19 +68,27 @@ def test_ttl_move_and_s3(started_cluster):
assert node1.query("SELECT COUNT() FROM s3_test_with_ttl") == "30\n"
assert node2.query("SELECT COUNT() FROM s3_test_with_ttl") == "30\n"
- time.sleep(5)
+ for attempt in reversed(range(5)):
+ time.sleep(5)
- print(
- node1.query(
- "SELECT * FROM system.parts WHERE table = 's3_test_with_ttl' FORMAT Vertical"
+ print(
+ node1.query(
+ "SELECT * FROM system.parts WHERE table = 's3_test_with_ttl' FORMAT Vertical"
+ )
)
- )
-
- minio = cluster.minio_client
- objects = minio.list_objects(cluster.minio_bucket, "data/", recursive=True)
- counter = 0
- for obj in objects:
- print("Objectname:", obj.object_name, "metadata:", obj.metadata)
- counter += 1
- print("Total objects", counter)
+
+ minio = cluster.minio_client
+ objects = minio.list_objects(cluster.minio_bucket, "data/", recursive=True)
+ counter = 0
+ for obj in objects:
+ print(f"Objectname: {obj.object_name}, metadata: {obj.metadata}")
+ counter += 1
+
+ print(f"Total objects: {counter}")
+
+ if counter == 300:
+ break
+
+ print(f"Attempts remaining: {attempt}")
+
assert counter == 300
diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py
index a27b5a134e49..45a944b8d93b 100644
--- a/tests/integration/test_storage_kafka/test.py
+++ b/tests/integration/test_storage_kafka/test.py
@@ -30,12 +30,24 @@
from kafka.protocol.group import MemberAssignment
from kafka.admin import NewTopic
+from pathlib import Path
+from helpers.cluster import run_and_check
# protoc --version
# libprotoc 3.0.0
# # to create kafka_pb2.py
# protoc --python_out=. kafka.proto
+# Regenerate _pb2 files on each run, to make sure test doesn't depend installed protobuf version
+proto_dir = Path(__file__).parent / "clickhouse_path/format_schemas"
+gen_dir = Path(__file__).parent
+gen_dir.mkdir(exist_ok=True)
+run_and_check(
+ f"python3 -m grpc_tools.protoc -I{proto_dir!s} --python_out={gen_dir!s} --grpc_python_out={gen_dir!s} \
+ {proto_dir!s}/kafka.proto",
+ shell=True,
+)
+
from . import kafka_pb2
from . import social_pb2
from . import message_with_repeated_pb2
diff --git a/tests/queries/0_stateless/00900_orc_load.sh b/tests/queries/0_stateless/00900_orc_load.sh
index b3f2c39e5d2c..62149fa554e1 100755
--- a/tests/queries/0_stateless/00900_orc_load.sh
+++ b/tests/queries/0_stateless/00900_orc_load.sh
@@ -5,16 +5,13 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
-DATA_FILE=$CUR_DIR/data_orc/test.orc
-
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS orc_load"
${CLICKHOUSE_CLIENT} --query="CREATE TABLE orc_load (int Int32, smallint Int8, bigint Int64, float Float32, double Float64, date Date, y String, datetime64 DateTime64(3)) ENGINE = Memory"
${CLICKHOUSE_CLIENT} --query="insert into orc_load values (0, 0, 0, 0, 0, '2019-01-01', 'test1', toDateTime64('2019-01-01 02:03:04.567', 3)), (2147483647, -1, 9223372036854775806, 123.345345, 345345.3453451212, '2019-01-01', 'test2', toDateTime64('2019-01-01 02:03:04.567', 3))"
-${CLICKHOUSE_CLIENT} --query="select * from orc_load FORMAT ORC" > $DATA_FILE
+${CLICKHOUSE_CLIENT} --query="select * from orc_load FORMAT ORC" > "${CLICKHOUSE_TMP}"/test.orc
${CLICKHOUSE_CLIENT} --query="truncate table orc_load"
-cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} -q "insert into orc_load format ORC"
-timeout 3 ${CLICKHOUSE_CLIENT} -q "insert into orc_load format ORC" < $DATA_FILE
+cat "${CLICKHOUSE_TMP}"/test.orc | ${CLICKHOUSE_CLIENT} -q "insert into orc_load format ORC"
+timeout 3 ${CLICKHOUSE_CLIENT} -q "insert into orc_load format ORC" < "${CLICKHOUSE_TMP}"/test.orc
${CLICKHOUSE_CLIENT} --query="select * from orc_load"
${CLICKHOUSE_CLIENT} --query="drop table orc_load"
-rm -rf "$DATA_FILE"
diff --git a/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order.reference b/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order.reference
new file mode 100644
index 000000000000..7fcd29b5faf9
--- /dev/null
+++ b/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order.reference
@@ -0,0 +1,12 @@
+Partial sorting plan
+ optimize_read_in_window_order=0
+ Sort description: n ASC, x ASC
+ optimize_read_in_window_order=1
+ Prefix sort description: n ASC
+ Result sort description: n ASC, x ASC
+No sorting plan
+ optimize_read_in_window_order=0
+ Sort description: n ASC, x ASC
+ optimize_read_in_window_order=1
+ Prefix sort description: n ASC, x ASC
+ Result sort description: n ASC, x ASC
diff --git a/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order.sh b/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order.sh
new file mode 100755
index 000000000000..418baea81136
--- /dev/null
+++ b/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+
+CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+# shellcheck source=../shell_config.sh
+. "$CURDIR"/../shell_config.sh
+
+name=test_01655_plan_optimizations_optimize_read_in_window_order
+
+$CLICKHOUSE_CLIENT -q "drop table if exists ${name}"
+$CLICKHOUSE_CLIENT -q "drop table if exists ${name}_n"
+$CLICKHOUSE_CLIENT -q "drop table if exists ${name}_n_x"
+
+$CLICKHOUSE_CLIENT -q "create table ${name} engine=MergeTree order by tuple() as select toInt64((sin(number)+2)*65535)%10 as n, number as x from numbers_mt(100000)"
+$CLICKHOUSE_CLIENT -q "create table ${name}_n engine=MergeTree order by n as select * from ${name} order by n"
+$CLICKHOUSE_CLIENT -q "create table ${name}_n_x engine=MergeTree order by (n, x) as select * from ${name} order by n, x"
+
+$CLICKHOUSE_CLIENT -q "optimize table ${name}_n final"
+$CLICKHOUSE_CLIENT -q "optimize table ${name}_n_x final"
+
+echo 'Partial sorting plan'
+echo ' optimize_read_in_window_order=0'
+$CLICKHOUSE_CLIENT -q "explain plan actions=1, description=1 select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n SETTINGS optimize_read_in_window_order=0" | grep -i "sort description"
+
+echo ' optimize_read_in_window_order=1'
+$CLICKHOUSE_CLIENT -q "explain plan actions=1, description=1 select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n SETTINGS optimize_read_in_window_order=1" | grep -i "sort description"
+
+echo 'No sorting plan'
+echo ' optimize_read_in_window_order=0'
+$CLICKHOUSE_CLIENT -q "explain plan actions=1, description=1 select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n_x SETTINGS optimize_read_in_window_order=0" | grep -i "sort description"
+
+echo ' optimize_read_in_window_order=1'
+$CLICKHOUSE_CLIENT -q "explain plan actions=1, description=1 select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n_x SETTINGS optimize_read_in_window_order=1" | grep -i "sort description"
+
+$CLICKHOUSE_CLIENT -q "drop table ${name}"
+$CLICKHOUSE_CLIENT -q "drop table ${name}_n"
+$CLICKHOUSE_CLIENT -q "drop table ${name}_n_x"
diff --git a/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order_long.reference b/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order_long.reference
new file mode 100644
index 000000000000..b462a5a7baa4
--- /dev/null
+++ b/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order_long.reference
@@ -0,0 +1,4 @@
+OK
+OK
+OK
+OK
diff --git a/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order_long.sh b/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order_long.sh
new file mode 100755
index 000000000000..297688a29c32
--- /dev/null
+++ b/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order_long.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+# Tags: long
+
+CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+# shellcheck source=../shell_config.sh
+. "$CURDIR"/../shell_config.sh
+
+name=test_01655_plan_optimizations_optimize_read_in_window_order_long
+max_memory_usage=20000000
+
+$CLICKHOUSE_CLIENT -q "drop table if exists ${name}"
+$CLICKHOUSE_CLIENT -q "drop table if exists ${name}_n"
+$CLICKHOUSE_CLIENT -q "drop table if exists ${name}_n_x"
+
+$CLICKHOUSE_CLIENT -q "create table ${name} engine=MergeTree order by tuple() as select toInt64((sin(number)+2)*65535)%500 as n, number as x from numbers_mt(5000000)"
+$CLICKHOUSE_CLIENT -q "create table ${name}_n engine=MergeTree order by n as select * from ${name} order by n"
+$CLICKHOUSE_CLIENT -q "create table ${name}_n_x engine=MergeTree order by (n, x) as select * from ${name} order by n, x"
+
+$CLICKHOUSE_CLIENT -q "optimize table ${name}_n final"
+$CLICKHOUSE_CLIENT -q "optimize table ${name}_n_x final"
+
+$CLICKHOUSE_CLIENT -q "select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n SETTINGS optimize_read_in_window_order=0, max_memory_usage=$max_memory_usage, max_threads=1 format Null" 2>&1 | grep -F -q "MEMORY_LIMIT_EXCEEDED" && echo 'OK' || echo 'FAIL'
+$CLICKHOUSE_CLIENT -q "select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n SETTINGS optimize_read_in_window_order=1, max_memory_usage=$max_memory_usage, max_threads=1 format Null"
+
+$CLICKHOUSE_CLIENT -q "select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n_x SETTINGS optimize_read_in_window_order=0, max_memory_usage=$max_memory_usage, max_threads=1 format Null" 2>&1 | grep -F -q "MEMORY_LIMIT_EXCEEDED" && echo 'OK' || echo 'FAIL'
+$CLICKHOUSE_CLIENT -q "select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n_x SETTINGS optimize_read_in_window_order=1, max_memory_usage=$max_memory_usage, max_threads=1 format Null"
+
+$CLICKHOUSE_CLIENT -q "select n, sum(x) OVER (PARTITION BY n ORDER BY x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n_x SETTINGS optimize_read_in_window_order=0, max_memory_usage=$max_memory_usage, max_threads=1 format Null" 2>&1 | grep -F -q "MEMORY_LIMIT_EXCEEDED" && echo 'OK' || echo 'FAIL'
+$CLICKHOUSE_CLIENT -q "select n, sum(x) OVER (PARTITION BY n ORDER BY x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n_x SETTINGS optimize_read_in_window_order=1, max_memory_usage=$max_memory_usage, max_threads=1 format Null"
+
+$CLICKHOUSE_CLIENT -q "select n, sum(x) OVER (PARTITION BY n+x%2 ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n_x SETTINGS optimize_read_in_window_order=1, max_memory_usage=$max_memory_usage, max_threads=1 format Null" 2>&1 | grep -F -q "MEMORY_LIMIT_EXCEEDED" && echo 'OK' || echo 'FAIL'
+
+$CLICKHOUSE_CLIENT -q "drop table ${name}"
+$CLICKHOUSE_CLIENT -q "drop table ${name}_n"
+$CLICKHOUSE_CLIENT -q "drop table ${name}_n_x"
diff --git a/tests/queries/0_stateless/02020_exponential_smoothing.reference b/tests/queries/0_stateless/02020_exponential_smoothing.reference
index b3c234206783..334d32e1c163 100644
--- a/tests/queries/0_stateless/02020_exponential_smoothing.reference
+++ b/tests/queries/0_stateless/02020_exponential_smoothing.reference
@@ -1,13 +1,14 @@
+exponentialMovingAverage
1 0 0.5
0 1 0.25
0 2 0.125
-0 3 0.0625
-0 4 0.03125
-0 5 0.015625
-0 6 0.0078125
-0 7 0.00390625
-0 8 0.001953125
-0 9 0.0009765625
+0 3 0.062
+0 4 0.031
+0 5 0.016
+0 6 0.008
+0 7 0.004
+0 8 0.002
+0 9 0.001
1 0 0.067
0 1 0.062
0 2 0.058
@@ -128,16 +129,17 @@
0 47 0.129 ██████▍
0 48 0.065 ███▏
0 49 0.032 █▌
+exponentialTimeDecayedSum
1 0 1
-0 1 0.36787944117144233
-0 2 0.1353352832366127
-0 3 0.04978706836786395
-0 4 0.018315638888734186
-0 5 0.00673794699908547
-0 6 0.0024787521766663594
-0 7 0.0009118819655545166
-0 8 0.00033546262790251196
-0 9 0.0001234098040866796
+0 1 0.368
+0 2 0.135
+0 3 0.05
+0 4 0.018
+0 5 0.007
+0 6 0.002
+0 7 0.001
+0 8 0
+0 9 0
1 0 1
0 1 0.905
0 2 0.819
@@ -258,16 +260,17 @@
0 47 0.136 ██████▋
0 48 0.05 ██▌
0 49 0.018 ▊
+exponentialTimeDecayedMax
1 0 1
-0 1 0.36787944117144233
-0 2 0.1353352832366127
-0 3 0.04978706836786395
-0 4 0.018315638888734186
-0 5 0.00673794699908547
-0 6 0.0024787521766663594
-0 7 0.0009118819655545166
-0 8 0.00033546262790251196
-0 9 0.0001234098040866796
+0 1 0.368
+0 2 0.135
+0 3 0.05
+0 4 0.018
+0 5 0.007
+0 6 0.002
+0 7 0.001
+0 8 0
+0 9 0
1 0 1
0 1 0.905
0 2 0.819
@@ -388,16 +391,17 @@
0 47 0.135 ██████▋
0 48 0.05 ██▍
0 49 0.018 ▊
+exponentialTimeDecayedCount
1 0 1
-0 1 1.3678794411714423
-0 2 1.5032147244080551
-0 3 1.553001792775919
-0 4 1.5713174316646532
-0 5 1.5780553786637386
-0 6 1.5805341308404048
-0 7 1.5814460128059595
-0 8 1.581781475433862
-0 9 1.5819048852379487
+0 1 1.368
+0 2 1.503
+0 3 1.553
+0 4 1.571
+0 5 1.578
+0 6 1.581
+0 7 1.581
+0 8 1.582
+0 9 1.582
1 0 1
0 1 1.905
0 2 2.724
@@ -518,16 +522,17 @@
0 47 10.422 ██████████████████████████
0 48 10.43 ██████████████████████████
0 49 10.438 ██████████████████████████
+exponentialTimeDecayedAvg
1 0 1
-0 1 0.2689414213699951
-0 2 0.09003057317038046
-0 3 0.032058603280084995
-0 4 0.01165623095603961
-0 5 0.004269778545282112
-0 6 0.0015683003158864733
-0 7 0.000576612769687006
-0 8 0.00021207899644323433
-0 9 0.00007801341612780745
+0 1 0.269
+0 2 0.09
+0 3 0.032
+0 4 0.012
+0 5 0.004
+0 6 0.002
+0 7 0.001
+0 8 0
+0 9 0
1 0 1
0 1 0.475
0 2 0.301
@@ -648,3 +653,24 @@
0 47 0.206 ████████████████████▋
0 48 0.201 ████████████████████
0 49 0.196 ███████████████████▌
+Check `exponentialTimeDecayed.*` supports sliding windows
+2 1 3.010050167084 2 3.030251507111 0.993333444442
+1 2 7.060905027605 4.080805360107 4.02030134086 1.756312382816
+0 3 12.091654548833 5.101006700134 5.000500014167 2.418089094006
+4 4 11.050650848754 5.050250835421 5.000500014167 2.209909172572
+5 5 9.970249502081 5 5.000500014167 1.993850509716
+1 6 20.07305726224 10.202013400268 5.000500014167 4.014210020072
+0 7 15.991544871125 10.100501670842 3.98029867414 4.017674596889
+10 8 10.980198673307 10 2.970248507056 3.696727276261
+Check `exponentialTimeDecayedMax` works with negative values
+2 1 -1.010050167084
+1 2 -1
+10 3 -0.990049833749
+4 4 -0.980198673307
+5 5 -1.010050167084
+1 6 -1
+10 7 -0.990049833749
+10 8 -0.980198673307
+10 9 -9.801986733068
+9.81 10 -9.801986733068
+9.9 11 -9.712388869079
diff --git a/tests/queries/0_stateless/02020_exponential_smoothing.sql b/tests/queries/0_stateless/02020_exponential_smoothing.sql
index a39b09a883da..462081b12d6c 100644
--- a/tests/queries/0_stateless/02020_exponential_smoothing.sql
+++ b/tests/queries/0_stateless/02020_exponential_smoothing.sql
@@ -1,5 +1,6 @@
--- exponentialMovingAverage
-SELECT number = 0 AS value, number AS time, exponentialMovingAverage(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10);
+SELECT 'exponentialMovingAverage';
+
+SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialMovingAverage(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10));
SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialMovingAverage(10)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10));
SELECT number AS value, number AS time, exponentialMovingAverage(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10);
@@ -32,8 +33,9 @@ FROM
FROM numbers(50)
);
--- exponentialTimeDecayedSum
-SELECT number = 0 AS value, number AS time, exponentialTimeDecayedSum(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10);
+SELECT 'exponentialTimeDecayedSum';
+
+SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedSum(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10));
SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedSum(10)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10));
SELECT number AS value, number AS time, exponentialTimeDecayedSum(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10);
@@ -66,8 +68,9 @@ FROM
FROM numbers(50)
);
--- exponentialTimeDecayedMax
-SELECT number = 0 AS value, number AS time, exponentialTimeDecayedMax(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10);
+SELECT 'exponentialTimeDecayedMax';
+
+SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedMax(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10));
SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedMax(10)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10));
SELECT number AS value, number AS time, exponentialTimeDecayedMax(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10);
@@ -100,8 +103,9 @@ FROM
FROM numbers(50)
);
--- exponentialTimeDecayedCount
-SELECT number = 0 AS value, number AS time, exponentialTimeDecayedCount(1)(time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10);
+SELECT 'exponentialTimeDecayedCount';
+
+SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedCount(1)(time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10));
SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedCount(10)(time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10));
SELECT number AS value, number AS time, exponentialTimeDecayedCount(1)(time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10);
@@ -134,8 +138,9 @@ FROM
FROM numbers(50)
);
--- exponentialTimeDecayedAvg
-SELECT number = 0 AS value, number AS time, exponentialTimeDecayedAvg(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10);
+SELECT 'exponentialTimeDecayedAvg';
+
+SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedAvg(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10));
SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedAvg(10)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10));
SELECT number AS value, number AS time, exponentialTimeDecayedAvg(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10);
@@ -167,3 +172,70 @@ FROM
exponentialTimeDecayedAvg(100)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth
FROM numbers(50)
);
+
+SELECT 'Check `exponentialTimeDecayed.*` supports sliding windows';
+
+SELECT
+ x,
+ t,
+ round(sum, 12),
+ round(max, 12),
+ round(count, 12),
+ round(avg, 12)
+FROM
+(
+ SELECT
+ d[1] AS x,
+ d[2] AS t,
+ exponentialTimeDecayedSum(100)(x, t) OVER w AS sum,
+ exponentialTimeDecayedMax(100)(x, t) OVER w AS max,
+ exponentialTimeDecayedCount(100)(t) OVER w AS count,
+ exponentialTimeDecayedAvg(100)(x, t) OVER w AS avg
+ FROM
+ (
+ SELECT [[2, 1], [1, 2], [0, 3], [4, 4], [5, 5], [1, 6], [0, 7], [10, 8]] AS d
+ )
+ ARRAY JOIN d
+ WINDOW w AS (ORDER BY 1 ASC Rows BETWEEN 2 PRECEDING AND 2 FOLLOWING)
+);
+
+SELECT
+ x,
+ t,
+ round(sum, 12),
+ round(max, 12),
+ round(count, 12),
+ round(avg, 12)
+FROM
+(
+ SELECT
+ sin(number) AS x,
+ number AS t,
+ exponentialTimeDecayedSum(100)(x, t) OVER w AS sum,
+ exponentialTimeDecayedMax(100)(x, t) OVER w AS max,
+ exponentialTimeDecayedCount(100)(t) OVER w AS count,
+ exponentialTimeDecayedAvg(100)(x, t) OVER w AS avg
+ FROM numbers(1000000)
+ WINDOW w AS (ORDER BY 1 ASC Rows BETWEEN 2 PRECEDING AND 2 FOLLOWING)
+)
+FORMAT `Null`;
+
+SELECT 'Check `exponentialTimeDecayedMax` works with negative values';
+
+SELECT
+ x,
+ t,
+ round(max, 12)
+FROM
+(
+ SELECT
+ d[1] AS x,
+ d[2] AS t,
+ exponentialTimeDecayedMax(100)(-x, t) OVER w AS max
+ FROM
+ (
+ SELECT [[2, 1], [1, 2], [10, 3], [4, 4], [5, 5], [1, 6], [10, 7], [10, 8], [10, 9], [9.81, 10], [9.9, 11]] AS d
+ )
+ ARRAY JOIN d
+ WINDOW w AS (ORDER BY 1 ASC Rows BETWEEN 2 PRECEDING AND 2 FOLLOWING)
+);
diff --git a/tests/testflows/aes_encryption/aes_encryption_env/clickhouse-service.yml b/tests/testflows/aes_encryption/aes_encryption_env/clickhouse-service.yml
index 0c9352dbc0b6..74a56b63aabc 100644
--- a/tests/testflows/aes_encryption/aes_encryption_env/clickhouse-service.yml
+++ b/tests/testflows/aes_encryption/aes_encryption_env/clickhouse-service.yml
@@ -2,7 +2,7 @@ version: '2.3'
services:
clickhouse:
- image: clickhouse/integration-test
+ image: altinityinfra/integration-test
expose:
- "9000"
- "9009"
diff --git a/tests/testflows/datetime64_extended_range/datetime64_extended_range_env/clickhouse-service.yml b/tests/testflows/datetime64_extended_range/datetime64_extended_range_env/clickhouse-service.yml
index 0c9352dbc0b6..74a56b63aabc 100644
--- a/tests/testflows/datetime64_extended_range/datetime64_extended_range_env/clickhouse-service.yml
+++ b/tests/testflows/datetime64_extended_range/datetime64_extended_range_env/clickhouse-service.yml
@@ -2,7 +2,7 @@ version: '2.3'
services:
clickhouse:
- image: clickhouse/integration-test
+ image: altinityinfra/integration-test
expose:
- "9000"
- "9009"
diff --git a/tests/testflows/example/example_env/clickhouse-service.yml b/tests/testflows/example/example_env/clickhouse-service.yml
index 0c9352dbc0b6..74a56b63aabc 100644
--- a/tests/testflows/example/example_env/clickhouse-service.yml
+++ b/tests/testflows/example/example_env/clickhouse-service.yml
@@ -2,7 +2,7 @@ version: '2.3'
services:
clickhouse:
- image: clickhouse/integration-test
+ image: altinityinfra/integration-test
expose:
- "9000"
- "9009"
diff --git a/tests/testflows/extended_precision_data_types/extended-precision-data-type_env/clickhouse-service.yml b/tests/testflows/extended_precision_data_types/extended-precision-data-type_env/clickhouse-service.yml
index afb31f77c94c..9162d06bf27d 100644
--- a/tests/testflows/extended_precision_data_types/extended-precision-data-type_env/clickhouse-service.yml
+++ b/tests/testflows/extended_precision_data_types/extended-precision-data-type_env/clickhouse-service.yml
@@ -2,7 +2,7 @@ version: '2.3'
services:
clickhouse:
- image: clickhouse/integration-test
+ image: altinityinfra/integration-test
expose:
- "9000"
- "9009"
diff --git a/tests/testflows/kerberos/kerberos_env/clickhouse-service.yml b/tests/testflows/kerberos/kerberos_env/clickhouse-service.yml
index 45b975db00d7..7671684f6ee0 100644
--- a/tests/testflows/kerberos/kerberos_env/clickhouse-service.yml
+++ b/tests/testflows/kerberos/kerberos_env/clickhouse-service.yml
@@ -2,7 +2,7 @@ version: '2.3'
services:
clickhouse:
- image: clickhouse/integration-test
+ image: altinityinfra/integration-test
expose:
- "9000"
- "9009"
diff --git a/tests/testflows/ldap/authentication/authentication_env/clickhouse-service.yml b/tests/testflows/ldap/authentication/authentication_env/clickhouse-service.yml
index 74661f6fa04b..f8cc0a62c67c 100644
--- a/tests/testflows/ldap/authentication/authentication_env/clickhouse-service.yml
+++ b/tests/testflows/ldap/authentication/authentication_env/clickhouse-service.yml
@@ -2,7 +2,7 @@ version: '2.3'
services:
clickhouse:
- image: clickhouse/integration-test
+ image: altinityinfra/integration-test
init: true
expose:
- "9000"
diff --git a/tests/testflows/ldap/authentication/ldap_authentication_env/clickhouse-service.yml b/tests/testflows/ldap/authentication/ldap_authentication_env/clickhouse-service.yml
index 0c9352dbc0b6..74a56b63aabc 100644
--- a/tests/testflows/ldap/authentication/ldap_authentication_env/clickhouse-service.yml
+++ b/tests/testflows/ldap/authentication/ldap_authentication_env/clickhouse-service.yml
@@ -2,7 +2,7 @@ version: '2.3'
services:
clickhouse:
- image: clickhouse/integration-test
+ image: altinityinfra/integration-test
expose:
- "9000"
- "9009"
diff --git a/tests/testflows/ldap/external_user_directory/external_user_directory_env/clickhouse-service.yml b/tests/testflows/ldap/external_user_directory/external_user_directory_env/clickhouse-service.yml
index 74661f6fa04b..f8cc0a62c67c 100644
--- a/tests/testflows/ldap/external_user_directory/external_user_directory_env/clickhouse-service.yml
+++ b/tests/testflows/ldap/external_user_directory/external_user_directory_env/clickhouse-service.yml
@@ -2,7 +2,7 @@ version: '2.3'
services:
clickhouse:
- image: clickhouse/integration-test
+ image: altinityinfra/integration-test
init: true
expose:
- "9000"
diff --git a/tests/testflows/ldap/external_user_directory/ldap_external_user_directory_env/clickhouse-service.yml b/tests/testflows/ldap/external_user_directory/ldap_external_user_directory_env/clickhouse-service.yml
index 0c9352dbc0b6..74a56b63aabc 100644
--- a/tests/testflows/ldap/external_user_directory/ldap_external_user_directory_env/clickhouse-service.yml
+++ b/tests/testflows/ldap/external_user_directory/ldap_external_user_directory_env/clickhouse-service.yml
@@ -2,7 +2,7 @@ version: '2.3'
services:
clickhouse:
- image: clickhouse/integration-test
+ image: altinityinfra/integration-test
expose:
- "9000"
- "9009"
diff --git a/tests/testflows/ldap/role_mapping/ldap_role_mapping_env/clickhouse-service.yml b/tests/testflows/ldap/role_mapping/ldap_role_mapping_env/clickhouse-service.yml
index 0c9352dbc0b6..74a56b63aabc 100644
--- a/tests/testflows/ldap/role_mapping/ldap_role_mapping_env/clickhouse-service.yml
+++ b/tests/testflows/ldap/role_mapping/ldap_role_mapping_env/clickhouse-service.yml
@@ -2,7 +2,7 @@ version: '2.3'
services:
clickhouse:
- image: clickhouse/integration-test
+ image: altinityinfra/integration-test
expose:
- "9000"
- "9009"
diff --git a/tests/testflows/ldap/role_mapping/role_mapping_env/clickhouse-service.yml b/tests/testflows/ldap/role_mapping/role_mapping_env/clickhouse-service.yml
index 7ff0139ab9be..3fe80bfce343 100644
--- a/tests/testflows/ldap/role_mapping/role_mapping_env/clickhouse-service.yml
+++ b/tests/testflows/ldap/role_mapping/role_mapping_env/clickhouse-service.yml
@@ -2,7 +2,7 @@ version: '2.3'
services:
clickhouse:
- image: clickhouse/integration-test
+ image: altinityinfra/integration-test
init: true
expose:
- "9000"
diff --git a/tests/testflows/map_type/map_type_env/clickhouse-service.yml b/tests/testflows/map_type/map_type_env/clickhouse-service.yml
index afb31f77c94c..9162d06bf27d 100755
--- a/tests/testflows/map_type/map_type_env/clickhouse-service.yml
+++ b/tests/testflows/map_type/map_type_env/clickhouse-service.yml
@@ -2,7 +2,7 @@ version: '2.3'
services:
clickhouse:
- image: clickhouse/integration-test
+ image: altinityinfra/integration-test
expose:
- "9000"
- "9009"
diff --git a/tests/testflows/rbac/rbac_env/clickhouse-service.yml b/tests/testflows/rbac/rbac_env/clickhouse-service.yml
index c808372d7e94..4634f3b8721f 100755
--- a/tests/testflows/rbac/rbac_env/clickhouse-service.yml
+++ b/tests/testflows/rbac/rbac_env/clickhouse-service.yml
@@ -2,7 +2,7 @@ version: '2.3'
services:
clickhouse:
- image: clickhouse/integration-test
+ image: altinityinfra/integration-test
init: true
expose:
- "9000"
diff --git a/tests/testflows/runner b/tests/testflows/runner
index 0208512762ce..1cf2a784ca0e 100755
--- a/tests/testflows/runner
+++ b/tests/testflows/runner
@@ -14,7 +14,7 @@ DEFAULT_CLICKHOUSE_ROOT = os.path.abspath(os.path.join(CUR_FILE_DIR, "../../"))
CURRENT_WORK_DIR = os.getcwd()
CONTAINER_NAME = "clickhouse_testflows_tests"
-DIND_TESTFLOWS_TESTS_IMAGE_NAME = "clickhouse/testflows-runner"
+DIND_TESTFLOWS_TESTS_IMAGE_NAME = "altinityinfra/testflows-runner"
def check_args_and_update_paths(args):
if not os.path.isabs(args.binary):
diff --git a/tests/testflows/window_functions/window_functions_env/clickhouse-service.yml b/tests/testflows/window_functions/window_functions_env/clickhouse-service.yml
index afb31f77c94c..9162d06bf27d 100755
--- a/tests/testflows/window_functions/window_functions_env/clickhouse-service.yml
+++ b/tests/testflows/window_functions/window_functions_env/clickhouse-service.yml
@@ -2,7 +2,7 @@ version: '2.3'
services:
clickhouse:
- image: clickhouse/integration-test
+ image: altinityinfra/integration-test
expose:
- "9000"
- "9009"
diff --git a/utils/clickhouse-docker b/utils/clickhouse-docker
index cfe515f1de54..34b637f0eaad 100755
--- a/utils/clickhouse-docker
+++ b/utils/clickhouse-docker
@@ -26,11 +26,11 @@ then
# https://stackoverflow.com/a/39454426/1555175
wget -nv https://registry.hub.docker.com/v1/repositories/clickhouse/clickhouse-server/tags -O - | sed -e 's/[][]//g' -e 's/"//g' -e 's/ //g' | tr '}' '\n' | awk -F: '{print $3}'
else
- docker pull clickhouse/clickhouse-server:${param}
+ docker pull altinityinfra/clickhouse-server:${param}
tmp_dir=$(mktemp -d -t ci-XXXXXXXXXX) # older version require /nonexistent folder to exist to run clickhouse client :D
chmod 777 ${tmp_dir}
set -e
- containerid=`docker run -v${tmp_dir}:/nonexistent -d clickhouse/clickhouse-server:${param}`
+ containerid=`docker run -v${tmp_dir}:/nonexistent -d altinityinfra/clickhouse-server:${param}`
set +e
while :
do