-
Notifications
You must be signed in to change notification settings - Fork 694
[Model Runner] Refactor execute_model for GPU async scheduling #6176
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
9155c22
b1d4055
bc008b7
c45d6a9
7d80225
0abaca0
e92f885
4c0f069
2213666
866ae72
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,30 @@ | ||
| // Copyright (c) 2026 PaddlePaddle Authors. All Rights Reserved. | ||
| // | ||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||
| // you may not use this file except in compliance with the License. | ||
| // You may obtain a copy of the License at | ||
| // | ||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||
| // | ||
| // Unless required by applicable law or agreed to in writing, software | ||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| // See the License for the specific language governing permissions and | ||
| // limitations under the License. | ||
|
|
||
| #include "helper.h" | ||
|
|
||
| paddle::Tensor GetStop(paddle::Tensor& not_need_stop) { | ||
| bool* not_need_stop_data = const_cast<bool*>(not_need_stop.data<bool>()); | ||
| auto not_need_stop_cpu = | ||
| GetEmptyTensor({1}, paddle::DataType::BOOL, paddle::CPUPlace()); | ||
| bool* not_need_stop_cpu_data = | ||
| const_cast<bool*>(not_need_stop_cpu.data<bool>()); | ||
| not_need_stop_cpu_data[0] = not_need_stop_data[0]; | ||
| return not_need_stop_cpu; | ||
| } | ||
|
|
||
| void SetStop(paddle::Tensor& not_need_stop, bool flag) { | ||
| bool* not_need_stop_data = const_cast<bool*>(not_need_stop.data<bool>()); | ||
| not_need_stop_data[0] = flag; | ||
Sunny-bot1 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| } | ||
Sunny-bot1 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -316,9 +316,6 @@ def post_process_normal( | |
| share_inputs: Dict[str, paddle.Tensor], | ||
| sampling_metadata: SamplingMetadata, | ||
| block_size: int = 64, | ||
| save_each_rank: bool = False, | ||
| skip_save_output: bool = False, | ||
| async_output_queue: queue.Queue = None, | ||
| think_end_id: int = -1, | ||
| line_break_id: int = -1, | ||
| enable_entropy: bool = False, | ||
|
|
@@ -388,7 +385,7 @@ def post_process_normal( | |
| if envs.ENABLE_V1_KVCACHE_SCHEDULER: | ||
| update_inputs_v1( | ||
| model_output.stop_flags, | ||
| model_output.not_need_stop, | ||
| model_output.not_need_stop_device, | ||
| model_output.seq_lens_this_time, | ||
| model_output.seq_lens_encoder, | ||
| model_output.seq_lens_decoder, | ||
|
|
@@ -404,44 +401,53 @@ def post_process_normal( | |
| else: | ||
| update_inputs( | ||
| model_output.stop_flags, | ||
| model_output.not_need_stop, | ||
| model_output.not_need_stop_device, | ||
| model_output.seq_lens_this_time, | ||
| model_output.seq_lens_encoder, | ||
| model_output.seq_lens_decoder, | ||
| model_output.input_ids, | ||
| sampler_output.sampled_token_ids, | ||
| model_output.is_block_step, | ||
| ) | ||
| # 3. Transmit the model's output and stop generation signal via message queue. | ||
| # In the future, we will abandon this approach. | ||
| if not skip_save_output: | ||
| if envs.FD_USE_GET_SAVE_OUTPUT_V1: | ||
| if save_each_rank or model_output.mp_rank == 0: | ||
| output = _build_stream_transfer_data( | ||
| sampler_output.sampled_token_ids, | ||
| logprobs=sampler_output.logprobs_tensors, | ||
| prompt_logprobs_list=model_output.prompt_logprobs_list, | ||
| ) | ||
| async_output_queue.put(output) | ||
|
|
||
|
|
||
| def save_output_normal( | ||
| model_output: ModelOutputData, | ||
| sampler_output: SamplerOutput, | ||
| share_inputs: Dict[str, paddle.Tensor], | ||
| async_output_queue: queue.Queue = None, | ||
| save_each_rank: bool = False, | ||
| ): | ||
| # Transmit the model's output and stop generation signal via message queue. | ||
| # In the future, we will abandon this approach. | ||
| if envs.FD_USE_GET_SAVE_OUTPUT_V1: | ||
| if save_each_rank or model_output.mp_rank == 0: | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 如果是走 V1,这里会有同步问题吗?
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
还没测,如果是CPU上的操作理论上不会有同步 |
||
| output = _build_stream_transfer_data( | ||
| sampler_output.sampled_token_ids, | ||
| logprobs=sampler_output.logprobs_tensors, | ||
| prompt_logprobs_list=model_output.prompt_logprobs_list, | ||
| ) | ||
| async_output_queue.put(output) | ||
| else: | ||
| if sampler_output.logprobs_tensors is None: | ||
| save_output( | ||
| share_inputs["sampled_token_ids"], | ||
| model_output.not_need_stop, | ||
| share_inputs["preempted_idx"], | ||
| model_output.mp_rank, | ||
| save_each_rank, | ||
| ) | ||
| else: | ||
| if sampler_output.logprobs_tensors is None: | ||
| save_output( | ||
| sampler_output.sampled_token_ids, | ||
| model_output.not_need_stop, | ||
| share_inputs["preempted_idx"], | ||
| model_output.mp_rank, | ||
| save_each_rank, | ||
| ) | ||
| else: | ||
| save_output_topk( | ||
| sampler_output.sampled_token_ids, | ||
| sampler_output.logprobs_tensors.logprob_token_ids, | ||
| sampler_output.logprobs_tensors.logprobs, | ||
| sampler_output.logprobs_tensors.selected_token_ranks, | ||
| model_output.not_need_stop, | ||
| share_inputs["preempted_idx"], | ||
| model_output.mp_rank, | ||
| ) | ||
| save_output_topk( | ||
| sampler_output.sampled_token_ids, | ||
| sampler_output.logprobs_tensors.logprob_token_ids, | ||
| sampler_output.logprobs_tensors.logprobs, | ||
| sampler_output.logprobs_tensors.selected_token_ranks, | ||
| model_output.not_need_stop, | ||
| share_inputs["preempted_idx"], | ||
| model_output.mp_rank, | ||
| ) | ||
| share_inputs["preempted_idx"][:] = 0 | ||
|
|
||
|
|
||
| def post_process_specualate( | ||
|
|
@@ -540,6 +546,7 @@ def post_process_specualate( | |
| model_output.seq_lens_decoder, | ||
| model_output.step_idx, | ||
| ) | ||
| share_inputs["preempted_idx"][:] = 0 | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这里是因为什么加的?
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
只是换了个位置,这个操作之前是在post_process最后执行的,现在save_output提取出来了就跟着放在之后,不然会影响调度抢占的逻辑 |
||
|
|
||
|
|
||
| def post_process( | ||
|
|
@@ -588,14 +595,10 @@ def post_process( | |
| share_inputs, | ||
| sampling_metadata, | ||
| block_size, | ||
| save_each_rank, | ||
| skip_save_output, | ||
| async_output_queue, | ||
| think_end_id, | ||
| line_break_id, | ||
| enable_entropy, | ||
| ) | ||
| share_inputs["preempted_idx"][:] = 0 | ||
|
|
||
|
|
||
| def step_cuda( | ||
|
|
@@ -936,3 +939,5 @@ def post_process_pooling( | |
| if save_each_rank or model_output.mp_rank == 0: | ||
| output = _build_stream_transfer_data(output_tokens=None, pooler_outputs=pooler_output.outputs) | ||
| async_output_queue.put(output) | ||
|
|
||
| share_inputs["preempted_idx"][:] = 0 | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
这个后续反馈给框架同学修一下