From ea0edbb54f29f6a9a075a3caf15c07d8dd26e437 Mon Sep 17 00:00:00 2001 From: yyzxw <1020938856@qq.com> Date: Mon, 29 Sep 2025 13:28:30 +0800 Subject: [PATCH] Throwing an exception when the model does not support pool tasks Signed-off-by: zxw <1020938856@qq.com> --- vllm/model_executor/models/adapters.py | 3 +++ vllm/v1/worker/gpu_model_runner.py | 22 +++++++++++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/vllm/model_executor/models/adapters.py b/vllm/model_executor/models/adapters.py index fd8a0b87e43e..cae0c738de05 100644 --- a/vllm/model_executor/models/adapters.py +++ b/vllm/model_executor/models/adapters.py @@ -399,6 +399,9 @@ def as_reward_model(cls: _T) -> _T: # Lazy import from vllm.model_executor.layers.pooler import DispatchPooler, Pooler + from .interfaces_base import default_pooling_type + + @default_pooling_type("ALL") class ModelForReward(_create_pooling_model_cls(cls)): def _init_pooler(self, vllm_config: "VllmConfig", prefix: str = ""): pooler_config = vllm_config.model_config.pooler_config diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index cbac67d9e24e..a9b7da482b34 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -3587,8 +3587,28 @@ def _dummy_pooler_run( hidden_states: torch.Tensor, ) -> PoolerOutput: # Find the task that has the largest output for subsequent steps + supported_pooling_tasks = self.get_supported_pooling_tasks() + + if not supported_pooling_tasks: + if self.scheduler_config.chunked_prefill_enabled: + raise RuntimeError( + f"Model {self.model_config.model} does not support " + "any pooling tasks with chunked prefill enabled. " + "Please add --no-enable-chunked-prefill to your " + "config or CLI args. See " + "https://docs.vllm.ai/en/latest/models/pooling_models.html " + "to learn more." + ) + else: + raise RuntimeError( + f"Model {self.model_config.model} does not support " + "any pooling tasks. See " + "https://docs.vllm.ai/en/latest/models/pooling_models.html " + "to learn more." + ) + output_size = dict[PoolingTask, float]() - for task in self.get_supported_pooling_tasks(): + for task in supported_pooling_tasks: # Run a full batch with each task to ensure none of them OOMs output = self._dummy_pooler_run_task(hidden_states, task) output_size[task] = sum(o.nbytes for o in output)