diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index 472e8b061a9e..14ea03444484 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -82,7 +82,7 @@ logger = init_logger(__name__) # For profile run -_MAX_FRAMES_PER_VIDEO = 600 +_MAX_FRAMES_PER_VIDEO = 32 # === Vision Inputs === # diff --git a/vllm/model_executor/models/qwen3_vl.py b/vllm/model_executor/models/qwen3_vl.py index d4f1547fd8e5..69acffbc6708 100644 --- a/vllm/model_executor/models/qwen3_vl.py +++ b/vllm/model_executor/models/qwen3_vl.py @@ -715,6 +715,18 @@ def _get_dummy_videos( video_items.append(video_item) return video_items + def get_dummy_processor_inputs(self, seq_len, mm_counts): + processor_inputs = super().get_dummy_processor_inputs( + seq_len, mm_counts) + # HACK(Isotr0py): We set do_resize to False here to reuse Qwen2-VL's + # profiling logic, which will be problematic for configurable mm + # profiling. + # TODO(Isotr0py): Switch to the implementation in + # https://github.com/vllm-project/vllm/pull/25557 + # after supporting configurable mm profiling. + processor_inputs.hf_processor_mm_kwargs = {"do_resize": False} + return processor_inputs + class Qwen3VLMultiModalProcessor(BaseMultiModalProcessor[Qwen3VLProcessingInfo] ):