Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions tests/docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ RUN pip3 install --upgrade cffi virtualenv pyasn1 boto3 pycrypto pywinrm ipaddre

# AutoMQ inject start
RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-$(uname -m | sed 's/arm64/aarch64/')-2.15.15.zip" -o "awscliv2.zip" && unzip awscliv2.zip && ./aws/install && rm -rf awscliv2.zip aws
RUN curl -fsSL https://get.docker.com -o get-docker.sh && sudo sh get-docker.sh
# AutoMQ inject end

COPY --from=build-native-image /build/kafka-binary/ /opt/kafka-binary/
Expand Down
25 changes: 17 additions & 8 deletions tests/docker/ducker-ak
Original file line number Diff line number Diff line change
Expand Up @@ -293,14 +293,23 @@ docker_run() {
if [[ -n ${port_mapping} ]]; then
expose_ports="${expose_ports} -p ${port_mapping}:${port_mapping}"
fi

# Invoke docker-run. We need privileged mode to be able to run iptables
# and mount FUSE filesystems inside the container. We also need it to
# run iptables inside the container.
must_do -v docker run --privileged \
-d -t -h "${node}" --network ducknet "${expose_ports}" \
--memory=${docker_run_memory_limit} --memory-swappiness=1 \
-v "${kafka_dir}:/opt/kafka-dev" --name "${node}" -- "${image_name}"
# AutoMQ inject start
if [[ "${node}" == "ducker01" ]]; then
must_do -v docker run --privileged \
-d -t -h "${node}" --network ducknet "${expose_ports}" \
--memory=${docker_run_memory_limit} --memory-swappiness=1 \
-v /var/run/docker.sock:/var/run/docker.sock \
-v "${kafka_dir}:/opt/kafka-dev" --name "${node}" -- "${image_name}"
else
# AutoMQ inject end
# Invoke docker-run. We need privileged mode to be able to run iptables
# and mount FUSE filesystems inside the container. We also need it to
# run iptables inside the container.
must_do -v docker run --privileged \
-d -t -h "${node}" --network ducknet "${expose_ports}" \
--memory=${docker_run_memory_limit} --memory-swappiness=1 \
-v "${kafka_dir}:/opt/kafka-dev" --name "${node}" -- "${image_name}"
fi
}

setup_custom_ducktape() {
Expand Down
43 changes: 43 additions & 0 deletions tests/docker/iceberg-catalog/docker-compose.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

services:
rest:
image: apache/iceberg-rest-fixture
container_name: "${REST_DOCKER_NAME-rest}"
hostname: "${REST_DOCKER_NAME-rest}"
ports:
- 8181:8181
environment:
- AWS_ACCESS_KEY_ID=admin
- AWS_SECRET_ACCESS_KEY=password
- AWS_REGION=us-east-1
- CATALOG_WAREHOUSE=s3://ko3/iceberg/
- CATALOG_IO__IMPL=org.apache.iceberg.aws.s3.S3FileIO
- CATALOG_S3_ENDPOINT=http://10.5.0.2:4566
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:8181/v1/config || exit 1"]
interval: 10s
timeout: 5s
retries: 20
start_period: 30s
networks:
ducknet:
ipv4_address: 10.5.1.4

networks:
ducknet:
name: ducknet
external: true
40 changes: 40 additions & 0 deletions tests/docker/schema-registry/docker-compose.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

services:
schema-registry:
image: confluentinc/cp-schema-registry:latest
container_name: "${SCHEMA_REGISTRY_DOCKER_NAME-schema-registry}"
hostname: "${SCHEMA_REGISTRY_DOCKER_NAME-schema-registry}"
ports:
- "8081:8081"
environment:
SCHEMA_REGISTRY_HOST_NAME: ${LOCALSTACK_DOCKER_NAME-schema-registry}
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: ${KAFKA_BOOTSTRAP_SERVERS}
SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081
restart: on-failure
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:8081 || exit 1"]
interval: 10s
timeout: 60s
retries: 30
networks:
ducknet:
ipv4_address: 10.5.1.3

networks:
ducknet:
name: ducknet
external: true
63 changes: 63 additions & 0 deletions tests/kafkatest/services/external_services.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import subprocess

class DockerComposeService:
"""
A helper class to manage the lifecycle of an external service
defined in a docker-compose.yaml file.
This is NOT a ducktape service and must be managed manually from test code.
"""
def __init__(self, compose_file_path, logger):
"""
:param compose_file_path: Path to the docker-compose.yaml file.
:param logger: The test logger instance.
"""
self.compose_file_path = compose_file_path
self.logger = logger

def start(self, env=None):
"""
Starts the service using 'docker compose up'.
:param env: A dictionary of environment variables to pass to the command.
"""
self.logger.info(f"Manually starting external service from {self.compose_file_path}...")
self._run_command("up -d", env)

def stop(self):
"""
Stops the service using 'docker compose down'.
"""
self.logger.info(f"Manually stopping external service from {self.compose_file_path}...")
self._run_command("down --remove-orphans -v")

def _run_command(self, command, env=None):
env_prefix = ""
if env:
for key, value in env.items():
env_prefix += f"{key}='{value}' "

# Use sudo -E to preserve environment variables for the docker compose command.
cmd = f"{env_prefix} sudo -E docker compose -f {self.compose_file_path} {command}"
Copy link
Preview

Copilot AI Sep 12, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Using shell=True with user-controlled input in subprocess.check_call is a security risk. Consider using subprocess.run with a list of arguments instead of shell=True.

Copilot uses AI. Check for mistakes.


try:
self.logger.info(f"Running command: {cmd}")
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as e:
self.logger.error(f"Failed to run command: {cmd}. Error: {e}")
log_cmd = f"{env_prefix} sudo docker compose -f {self.compose_file_path} logs"
subprocess.run(log_cmd, shell=True)
raise
1 change: 1 addition & 0 deletions tests/kafkatest/services/performance/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,5 @@
from .performance import PerformanceService, throughput, latency, compute_aggregate_throughput
from .end_to_end_latency import EndToEndLatencyService
from .producer_performance import ProducerPerformanceService
from .automq_performance import AutoMQPerformanceService
from .consumer_performance import ConsumerPerformanceService
Loading
Loading