diff --git a/.github/configs/nvidia-master.yaml b/.github/configs/nvidia-master.yaml index d6202608d..7c4fcab9a 100644 --- a/.github/configs/nvidia-master.yaml +++ b/.github/configs/nvidia-master.yaml @@ -3538,15 +3538,16 @@ minimaxm2.5-fp8-b300-vllm: - isl: 1024 osl: 1024 search-space: - - { tp: 2, conc-start: 4, conc-end: 512 } - - { tp: 4, conc-start: 4, conc-end: 512 } - - { tp: 2, ep: 2, conc-start: 512, conc-end: 512 } + - { tp: 4, conc-start: 4, conc-end: 128 } - { tp: 4, ep: 4, conc-start: 256, conc-end: 512 } + - { tp: 2, ep: 2, conc-start: 512, conc-end: 1024 } + - { tp: 2, ep: 2, dp-attn: true, conc-start: 1024, conc-end: 1024 } - isl: 8192 osl: 1024 search-space: - - { tp: 2, conc-start: 4, conc-end: 512 } - - { tp: 4, conc-start: 4, conc-end: 512 } + - { tp: 1, conc-start: 4, conc-end: 16 } + - { tp: 2, conc-start: 64, conc-end: 256 } + - { tp: 4, conc-start: 4, conc-end: 8 } minimaxm2.5-fp4-b200-vllm: image: vllm/vllm-openai:v0.19.0-cu130 diff --git a/benchmarks/single_node/minimaxm2.5_fp8_b300.sh b/benchmarks/single_node/minimaxm2.5_fp8_b300.sh index 210109e89..3e83a0b36 100755 --- a/benchmarks/single_node/minimaxm2.5_fp8_b300.sh +++ b/benchmarks/single_node/minimaxm2.5_fp8_b300.sh @@ -28,7 +28,7 @@ hf download "$MODEL" SERVER_LOG=/workspace/server.log PORT=${PORT:-8888} -export VLLM_FLASHINFER_ALLREDUCE_BACKEND=mnnvl +export VLLM_FLOAT32_MATMUL_PRECISION=high if [ "$EP_SIZE" -gt 1 ]; then EP=" --enable-expert-parallel" diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 6bd6c92e3..fc14ce43e 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1678,3 +1678,9 @@ - "Add H200 multinode evals-only runs" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1094 evals-only: true + +- config-keys: + - minimaxm2.5-fp8-b300-vllm + description: + - "Add VLLM_FLOAT32_MATMUL_PRECISION=high, remove VLLM_FLASHINFER_ALLREDUCE_BACKEND=mnnvl" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1106