From c69821587069d933e6a881076ccaf8cff19d5db4 Mon Sep 17 00:00:00 2001 From: functionstackx <47992694+functionstackx@users.noreply.github.com> Date: Fri, 17 Apr 2026 22:48:04 -0400 Subject: [PATCH] Add MI355X config: glm5-fp8-sglang-mtp Mirrors the existing glm5-fp8-mi355x-sglang non-MTP recipe and adds EAGLE speculative decoding (num-steps=3, eagle-topk=1, num-draft-tokens=4) via the standard spec-decoding=mtp suffix. SGLANG_ENABLE_SPEC_V2=1 is set before launching the server as required for GLM-5 MTP. Script also passes --use-chat-template to run_benchmark_serving, as required by AGENTS.md for all MTP configs. Co-Authored-By: Claude Opus 4.7 (1M context) --- .github/configs/amd-master.yaml | 18 ++++ benchmarks/single_node/glm5_fp8_mi355x_mtp.sh | 87 +++++++++++++++++++ perf-changelog.yaml | 10 +++ 3 files changed, 115 insertions(+) create mode 100755 benchmarks/single_node/glm5_fp8_mi355x_mtp.sh diff --git a/.github/configs/amd-master.yaml b/.github/configs/amd-master.yaml index 2f8293688..993a075bd 100644 --- a/.github/configs/amd-master.yaml +++ b/.github/configs/amd-master.yaml @@ -301,6 +301,24 @@ glm5-fp8-mi355x-sglang: search-space: - { tp: 8, conc-start: 4, conc-end: 64 } +glm5-fp8-mi355x-sglang-mtp: + image: lmsysorg/sglang-rocm:v0.5.10rc0-rocm720-mi35x-20260413 + model: zai-org/GLM-5-FP8 + model-prefix: glm5 + runner: mi355x + precision: fp8 + framework: sglang + multinode: false + seq-len-configs: + - isl: 1024 + osl: 1024 + search-space: + - { tp: 8, conc-start: 4, conc-end: 64, spec-decoding: mtp } + - isl: 8192 + osl: 1024 + search-space: + - { tp: 8, conc-start: 4, conc-end: 64, spec-decoding: mtp } + glm5-fp8-mi355x-atom: image: rocm/atom:rocm7.2.1-ubuntu24.04-pytorch2.9.1-atom0.1.2.post model: zai-org/GLM-5-FP8 diff --git a/benchmarks/single_node/glm5_fp8_mi355x_mtp.sh b/benchmarks/single_node/glm5_fp8_mi355x_mtp.sh new file mode 100755 index 000000000..f4b899011 --- /dev/null +++ b/benchmarks/single_node/glm5_fp8_mi355x_mtp.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash + +source "$(dirname "$0")/../benchmark_lib.sh" + +check_env_vars \ + MODEL \ + TP \ + CONC \ + ISL \ + OSL \ + RANDOM_RANGE_RATIO \ + RESULT_FILENAME + +if [[ -n "$SLURM_JOB_ID" ]]; then + echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME" +fi + +# GLM-5 requires transformers with glm_moe_dsa model type support. +# However, the Image rocm/sgl-dev:v0.5.8.post1-rocm720-mi35x-20260219 doesn't provide this support. +python3 -m pip install -U --no-cache-dir \ + "git+https://github.com/huggingface/transformers.git@6ed9ee36f608fd145168377345bfc4a5de12e1e2" + +hf download "$MODEL" + +# ROCm / SGLang performance tuning for MI355X +export SGLANG_ROCM_FUSED_DECODE_MLA=0 +export ROCM_QUICK_REDUCE_QUANTIZATION=INT4 +export SAFETENSORS_FAST_GPU=1 +export SGLANG_ENABLE_SPEC_V2=1 + +SERVER_LOG=/workspace/server.log +PORT=${PORT:-8888} + +EVAL_CONTEXT_ARGS="" +if [ "${EVAL_ONLY}" = "true" ]; then + setup_eval_context + EVAL_CONTEXT_ARGS="--context-length $EVAL_MAX_MODEL_LEN" +fi +# Start GPU monitoring (power, temperature, clocks every second) +start_gpu_monitor + +python3 -m sglang.launch_server \ + --model-path $MODEL \ + --host=0.0.0.0 \ + --port $PORT \ + --tensor-parallel-size $TP \ + --trust-remote-code \ + --tool-call-parser glm47 \ + --reasoning-parser glm45 \ + --mem-fraction-static 0.85 \ + --model-loader-extra-config '{"enable_multithread_load": true, "num_threads": 8}' \ + --nsa-prefill-backend tilelang \ + --nsa-decode-backend tilelang $EVAL_CONTEXT_ARGS \ + --kv-cache-dtype fp8_e4m3 \ + --speculative-algorithm EAGLE \ + --speculative-num-steps 3 \ + --speculative-eagle-topk 1 \ + --speculative-num-draft-tokens 4 \ + --disable-radix-cache> $SERVER_LOG 2>&1 & + +SERVER_PID=$! + +# Wait for server to be ready +wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID" + +run_benchmark_serving \ + --model "$MODEL" \ + --port "$PORT" \ + --backend vllm \ + --input-len "$ISL" \ + --output-len "$OSL" \ + --random-range-ratio "$RANDOM_RANGE_RATIO" \ + --num-prompts "$((CONC * 10))" \ + --max-concurrency "$CONC" \ + --result-filename "$RESULT_FILENAME" \ + --result-dir /workspace/ \ + --use-chat-template + +# After throughput, run evaluation only if RUN_EVAL is true +if [ "${RUN_EVAL}" = "true" ]; then + run_eval --framework lm-eval --port "$PORT" + append_lm_eval_summary +fi + +# Stop GPU monitoring +stop_gpu_monitor +set +x diff --git a/perf-changelog.yaml b/perf-changelog.yaml index b4a289dbe..24f2e3a2c 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1576,3 +1576,13 @@ - "Mirrors the qwen3.5-fp8-mi355x-sglang non-MTP recipe and adds EAGLE speculative decoding (num-steps=3, eagle-topk=1, num-draft-tokens=4)" - "Configs: 1k1k (TP8/EP1, TP8/EP8, TP2/EP2) and 8k1k (TP2/EP2, TP4/EP1) with spec-decoding=mtp" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/XXXX + +- config-keys: + - glm5-fp8-mi355x-sglang-mtp + description: + - "Add GLM-5 FP8 MI355X SGLang MTP benchmark" + - "Image: lmsysorg/sglang-rocm:v0.5.10rc0-rocm720-mi35x-20260413" + - "Model: zai-org/GLM-5-FP8" + - "Mirrors the glm5-fp8-mi355x-sglang non-MTP recipe and adds EAGLE speculative decoding (num-steps=3, eagle-topk=1, num-draft-tokens=4) behind SGLANG_ENABLE_SPEC_V2=1" + - "Configs: 1k1k and 8k1k, TP=8 conc 4-64 with spec-decoding=mtp" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/XXXX