Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 8 additions & 1 deletion .github/actions/test_sqllogic_standalone_linux/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,13 @@ inputs:
default: ""
handlers:
description: "logic test handlers, mysql,http,clickhouse"
required: false
required: true
default: ""
storage-format:
description: "storage format for databend query to test"
required: true
default: all

runs:
using: "composite"
steps:
Expand All @@ -27,11 +32,13 @@ runs:
sha: ${{ github.sha }}
target: ${{ inputs.target }}
- name: Run sqllogic Tests with Standalone mode
if: inputs.storage-format == 'all' || inputs.storage-format == 'parquet'
shell: bash
env:
TEST_HANDLERS: ${{ inputs.handlers }}
run: bash ./scripts/ci/ci-run-sqllogic-tests.sh ${{ inputs.dirs }}
- name: Run native sqllogic Tests with Standalone mode
if: inputs.storage-format == 'all' || inputs.storage-format == 'native'
shell: bash
env:
TEST_HANDLERS: ${{ inputs.handlers }}
Expand Down
34 changes: 31 additions & 3 deletions .github/workflows/dev-linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -157,10 +157,8 @@ jobs:
strategy:
matrix:
dirs:
- "base"
- "query"
- "standalone"
- "ydb"
- "crdb"
- "duckdb"
handlers:
Expand All @@ -172,15 +170,45 @@ jobs:
with:
dirs: ${{ matrix.dirs }}
handlers: ${{ matrix.handlers }}
storage-format: all
- name: Upload failure
if: failure() || cancelled()
uses: ./.github/actions/artifact_failure
with:
name: test-sqllogic-standalone-${{ matrix.handlers }}-${{ matrix.dirs }}

test_sqllogic_standalone_with_native:
name: test_sqllogic_standalone_${{ matrix.dirs }}_${{ matrix.format }}
runs-on: [self-hosted, X64, Linux, 4c8g]
needs: build
strategy:
matrix:
dirs:
- "base"
- "ydb"
format:
- "parquet"
- "native"
handlers:
- "mysql,http,clickhouse"
steps:
- uses: actions/checkout@v3
- uses: ./.github/actions/test_sqllogic_standalone_linux
timeout-minutes: 30
with:
dirs: ${{ matrix.dirs }}
handlers: ${{ matrix.handlers }}
storage-format: ${{ matrix.format }}
- name: Upload failure
if: failure() || cancelled()
uses: ./.github/actions/artifact_failure
with:
name: test-sqllogic-standalone-${{ matrix.handlers }}-${{ matrix.dirs }}_${{ matrix.format }}


test_sqllogic_management_mode:
timeout-minutes: 30
name: test_sqllogic_management
name: test_sqllogic_${{ matrix.dirs }}
runs-on: [self-hosted, X64, Linux, 4c8g]
needs: build
strategy:
Expand Down
2 changes: 1 addition & 1 deletion scripts/ci/deploy/config/databend-meta-node-share-1.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ raft_dir = "./.databend/share_meta1"
raft_api_port = 28103

# Assign raft_{listen|advertise}_host in test config.
# This allows you to catch a bug in unit tests when something goes wrong in raft meta nodes communication.
# This allows you to catch a bug in unit tests when something goes wrong in raft meta nodes communication.
raft_listen_host = "127.0.0.1"
raft_advertise_host = "localhost"

Expand Down
6 changes: 3 additions & 3 deletions scripts/ci/deploy/config/databend-meta-node-share-2.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# databend-meta -c databend-meta-node-1.toml

log_dir = "./.databend/logs2"
admin_api_address = "0.0.0.0:38101"
admin_api_address = "0.0.0.0:29101"
grpc_api_address = "0.0.0.0:19191"
# databend-query fetch this address to update its databend-meta endpoints list,
# in case databend-meta cluster changes.
Expand All @@ -11,10 +11,10 @@ grpc_api_advertise_host = "127.0.0.1"
[raft_config]
id = 1
raft_dir = "./.databend/share_meta2"
raft_api_port = 38103
raft_api_port = 29103

# Assign raft_{listen|advertise}_host in test config.
# This allows you to catch a bug in unit tests when something goes wrong in raft meta nodes communication.
# This allows you to catch a bug in unit tests when something goes wrong in raft meta nodes communication.
raft_listen_host = "127.0.0.1"
raft_advertise_host = "localhost"

Expand Down
14 changes: 7 additions & 7 deletions scripts/ci/deploy/config/databend-query-node-share-1.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,29 +6,29 @@ max_active_sessions = 256
wait_timeout_mills = 5000

# For flight rpc.
flight_api_address = "0.0.0.0:59091"
flight_api_address = "0.0.0.0:19091"

# Databend Query http address.
# For admin RESET API.
admin_api_address = "0.0.0.0:58080"
admin_api_address = "0.0.0.0:18080"

# Databend Query metrics RESET API.
metric_api_address = "0.0.0.0:57070"
metric_api_address = "0.0.0.0:17070"

# Databend Query MySQL Handler.
mysql_handler_host = "0.0.0.0"
mysql_handler_port = 53307
mysql_handler_port = 13307

# Databend Query ClickHouse Handler.
clickhouse_http_handler_host = "0.0.0.0"
clickhouse_http_handler_port = 58124
clickhouse_http_handler_port = 18124

# Databend Query HTTP Handler.
http_handler_host = "0.0.0.0"
http_handler_port = 58000
http_handler_port = 18000

flight_sql_handler_host = "0.0.0.0"
flight_sql_handler_port = 58900
flight_sql_handler_port = 18900

tenant_id = "shared_tenant"
cluster_id = "test_cluster"
Expand Down
16 changes: 8 additions & 8 deletions scripts/ci/deploy/config/databend-query-node-share-2.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,29 +6,29 @@ max_active_sessions = 256
wait_timeout_mills = 5000

# For flight rpc.
flight_api_address = "0.0.0.0:49091"
flight_api_address = "0.0.0.0:29091"

# Databend Query http address.
# For admin RESET API.
admin_api_address = "0.0.0.0:48080"
admin_api_address = "0.0.0.0:28080"

# Databend Query metrics RESET API.
metric_api_address = "0.0.0.0:47070"
metric_api_address = "0.0.0.0:27070"

# Databend Query MySQL Handler.
mysql_handler_host = "0.0.0.0"
mysql_handler_port = 43307
mysql_handler_port = 23307

# Databend Query ClickHouse Handler.
clickhouse_http_handler_host = "0.0.0.0"
clickhouse_http_handler_port = 48124
clickhouse_http_handler_port = 28124

# Databend Query HTTP Handler.
http_handler_host = "0.0.0.0"
http_handler_port = 48000
http_handler_port = 28000

flight_sql_handler_host = "0.0.0.0"
flight_sql_handler_port = 48901
flight_sql_handler_port = 28901

tenant_id = "to_tenant"
cluster_id = "test_cluster"
Expand All @@ -53,7 +53,7 @@ default_compression = 'zstd'
# # echo -n "datafuselabs" | sha256sum
# auth_string = "6db1a2f5da402b43c066fcadcbf78f04260b3236d9035e44dd463f21e29e6f3b"

share_endpoint_address = "127.0.0.1:33003"
share_endpoint_address = "127.0.0.1:23003"

[log]

Expand Down
8 changes: 4 additions & 4 deletions scripts/ci/deploy/databend-query-sharing.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,16 +18,16 @@ python3 scripts/ci/wait_tcp.py --timeout 10 --port 9191

echo "Start query node1 for sharding data"
nohup target/${BUILD_PROFILE}/databend-query -c scripts/ci/deploy/config/databend-query-node-share-1.toml &
python3 scripts/ci/wait_tcp.py --timeout 30 --port 53307
python3 scripts/ci/wait_tcp.py --timeout 30 --port 13307

echo 'Start query node2 databend-meta...'
nohup target/${BUILD_PROFILE}/databend-meta -c scripts/ci/deploy/config/databend-meta-node-share-2.toml &
python3 scripts/ci/wait_tcp.py --timeout 10 --port 19191

echo 'Start query node2 open-sharing...'
nohup target/${BUILD_PROFILE}/open-sharing --tenant=shared_tenant --storage-type=s3 --storage-s3-bucket=testbucket --storage-s3-endpoint-url=http://127.0.0.1:9900 --storage-s3-access-key-id=minioadmin --storage-s3-secret-access-key=minioadmin --storage-allow-insecure --share-endpoint-address=127.0.0.1:33003 &
python3 scripts/ci/wait_tcp.py --timeout 10 --port 33003
nohup target/${BUILD_PROFILE}/open-sharing --tenant=shared_tenant --storage-type=s3 --storage-s3-bucket=testbucket --storage-s3-endpoint-url=http://127.0.0.1:9900 --storage-s3-access-key-id=minioadmin --storage-s3-secret-access-key=minioadmin --storage-allow-insecure --share-endpoint-address=127.0.0.1:23003 &
python3 scripts/ci/wait_tcp.py --timeout 10 --port 23003

echo "Start query node2 for sharding data"
nohup target/${BUILD_PROFILE}/databend-query -c scripts/ci/deploy/config/databend-query-node-share-2.toml &
python3 scripts/ci/wait_tcp.py --timeout 30 --port 43307
python3 scripts/ci/wait_tcp.py --timeout 30 --port 13307
3 changes: 1 addition & 2 deletions src/query/sharing-endpoint/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ add `share_endpoint_address` field on your databend query config file
# databend-query -c databend_query_config_spec.toml
[query]
...
share_endpoint_address = "127.0.0.1:33003" # receive shared information from open sharing
share_endpoint_address = "127.0.0.1:13003" # receive shared information from open sharing
...
```

Expand Down Expand Up @@ -78,4 +78,3 @@ For **API** changes, please follow the following steps:
1. provide a RFC to explain the reason why we need the additional api or why we need to change the existing api.
2. update the protocol.md to reflect the changes.
3. update the implementation to reflect the changes.

2 changes: 1 addition & 1 deletion src/query/sharing-endpoint/src/configs/outer_v0.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ use super::inner::Config as InnerConfig;
pub struct Config {
#[clap(long, default_value = "")]
pub tenant: String,
#[clap(long, default_value = "127.0.0.1:33003")]
#[clap(long, default_value = "127.0.0.1:13003")]
pub share_endpoint_address: String,
// Storage backend config.
#[clap(flatten)]
Expand Down
6 changes: 3 additions & 3 deletions tests/shell_env.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,10 @@ export QUERY_MYSQL_HANDLER_HOST=${QUERY_MYSQL_HANDLER_HOST:="127.0.0.1"}
export QUERY_MYSQL_HANDLER_PORT=${QUERY_MYSQL_HANDLER_PORT:="3307"}
export QUERY_HTTP_HANDLER_PORT=${QUERY_HTTP_HANDLER_PORT:="8000"}
export QUERY_CLICKHOUSE_HTTP_HANDLER_PORT=${QUERY_CLICKHOUSE_HTTP_HANDLER_PORT:="8124"}
export QUERY_MYSQL_HANDLER_SHARE_1_PORT="53307"
export QUERY_MYSQL_HANDLER_SHARE_2_PORT="43307"
export QUERY_MYSQL_HANDLER_SHARE_1_PORT="13307"
export QUERY_MYSQL_HANDLER_SHARE_2_PORT="23307"

export MYSQL_CLIENT_CONNECT="mysql -uroot --host ${QUERY_MYSQL_HANDLER_HOST} --port ${QUERY_MYSQL_HANDLER_PORT} ${MYSQL_DATABASE} -s"

export MYSQL_CLIENT_SHARE_1_CONNECT="mysql -uroot --host ${QUERY_MYSQL_HANDLER_HOST} --port ${QUERY_MYSQL_HANDLER_SHARE_1_PORT} ${MYSQL_DATABASE} -s"
export MYSQL_CLIENT_SHARE_2_CONNECT="mysql -uroot --host ${QUERY_MYSQL_HANDLER_HOST} --port ${QUERY_MYSQL_HANDLER_SHARE_2_PORT} ${MYSQL_DATABASE} -s"
export MYSQL_CLIENT_SHARE_2_CONNECT="mysql -uroot --host ${QUERY_MYSQL_HANDLER_HOST} --port ${QUERY_MYSQL_HANDLER_SHARE_2_PORT} ${MYSQL_DATABASE} -s"
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,9 @@
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../../../shell_env.sh



echo "drop share if exists test_share" | $MYSQL_CLIENT_SHARE_1_CONNECT
echo "drop database if exists test_database" | $MYSQL_CLIENT_SHARE_1_CONNECT
echo "drop database if exists shared" | $SHARING_MYSQL_CLIENT_1_CONNECT
echo "drop database if exists shared" | $SHARING_MYSQL_CLIENT_1_CONNECT

# prepare shared database and table
echo "CREATE SHARE test_share" | $MYSQL_CLIENT_SHARE_1_CONNECT
Expand All @@ -21,13 +19,12 @@ echo "SHOW SHARES" | $MYSQL_CLIENT_SHARE_1_CONNECT | awk '{print $(NF-4), $(NF-3

# get shared database and table from another tenant
echo "drop share endpoint if exists to_share" | $MYSQL_CLIENT_SHARE_2_CONNECT
echo "create share endpoint to_share url='http://127.0.0.1:33003' tenant=shared_tenant" | $MYSQL_CLIENT_SHARE_2_CONNECT
echo "create share endpoint to_share url='http://127.0.0.1:23003' tenant=shared_tenant" | $MYSQL_CLIENT_SHARE_2_CONNECT
echo "SHOW SHARES" | $MYSQL_CLIENT_SHARE_2_CONNECT | awk '{print $(NF-4), $(NF-3), $(NF-2), $(NF-1), $NF}'
echo "CREATE DATABASE if not exists shared_db FROM SHARE shared_tenant.test_share" | $MYSQL_CLIENT_SHARE_2_CONNECT
echo "SELECT * FROM shared_db.t1" | $MYSQL_CLIENT_SHARE_2_CONNECT


## Drop table.
echo "drop database if exists shared_db" | $MYSQL_CLIENT_SHARE_2_CONNECT
echo "drop database if exists shared_db" | $MYSQL_CLIENT_SHARE_2_CONNECT
echo "drop share if exists test_share" | $MYSQL_CLIENT_SHARE_1_CONNECT
echo "drop database if exists test_database" | $MYSQL_CLIENT_SHARE_1_CONNECT