-
Notifications
You must be signed in to change notification settings - Fork 739
[Optimization][DeepSeekV3.2]Reducing slot_mapping compute frequency from twice per layer to a single pre-processing step. #7367
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
337d91b
fa5947c
6ed9a77
5f2e485
fd9b282
db33441
55d8289
90ee366
be243e5
20cfaa4
76de51a
00e013f
736ddc1
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -45,6 +45,12 @@ | |
| from fastdeploy.model_executor.layers.attention.base_attention_backend import ( | ||
| AttentionBackend, | ||
| ) | ||
| from fastdeploy.model_executor.layers.attention.dsa_attention_backend import ( | ||
| DSAAttentionBackend, | ||
| ) | ||
| from fastdeploy.model_executor.layers.attention.mla_attention_backend import ( | ||
| MLAAttentionBackend, | ||
| ) | ||
| from fastdeploy.model_executor.layers.moe.routing_indices_cache import ( | ||
| RoutingReplayManager, | ||
| ) | ||
|
|
@@ -79,6 +85,7 @@ | |
| speculate_schedule_cache, | ||
| set_data_ipc, | ||
| unset_data_ipc, | ||
| get_position_ids_and_mask_encoder_batch, | ||
| ) | ||
|
|
||
| import zmq | ||
|
|
@@ -1267,6 +1274,33 @@ def _prepare_inputs(self, cached_token_num=-1, cached_real_bsz=-1, is_dummy_or_p | |
| ) | ||
| return token_num, token_num_event | ||
|
|
||
| def _compute_position_ids_and_slot_mapping(self) -> None: | ||
| """Compute position_ids and slot_mapping for KV cache addressing. | ||
| This is a general computation based on sequence length info and block tables, | ||
| applicable to all models that need per-token KV cache physical slot addresses. | ||
| Results are stored in self.forward_meta. | ||
| """ | ||
| # NOTE(zhushengguang): Only support MLAAttentionBackend and DSAAttentionBackend currently. | ||
| if not isinstance(self.attn_backends[0], (MLAAttentionBackend, DSAAttentionBackend)): | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🟡 建议 此处直接访问 对比同文件 建议修改: if not self.attn_backends or not isinstance(self.attn_backends[0], (MLAAttentionBackend, DSAAttentionBackend)):
return |
||
| return | ||
| current_total_tokens = self.forward_meta.ids_remove_padding.shape[0] | ||
| position_ids = self.share_inputs["position_ids_buffer"][:current_total_tokens] | ||
| get_position_ids_and_mask_encoder_batch( | ||
| self.forward_meta.seq_lens_encoder, | ||
| self.forward_meta.seq_lens_decoder, | ||
| self.forward_meta.seq_lens_this_time, | ||
| position_ids, | ||
| ) | ||
| block_size = self.cache_config.block_size | ||
This comment was marked as outdated.
Sorry, something went wrong. |
||
| block_idx = position_ids // block_size # [num_tokens] | ||
| assert self.forward_meta.batch_id_per_token.shape == block_idx.shape | ||
|
ShaneGZhu marked this conversation as resolved.
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🟡 建议 此处 assert self.forward_meta.batch_id_per_token.shape == block_idx.shape, (
f"Shape mismatch: batch_id_per_token {self.forward_meta.batch_id_per_token.shape} "
f"vs block_idx {block_idx.shape}"
) |
||
| block_ids = self.forward_meta.block_tables[self.forward_meta.batch_id_per_token, block_idx] # [num_tokens] | ||
| block_offset = position_ids % block_size # [num_tokens] | ||
| slot_mapping = self.share_inputs["slot_mapping_buffer"][:current_total_tokens] | ||
| paddle.assign((block_ids * block_size + block_offset).cast(paddle.int64), slot_mapping) | ||
| self.forward_meta.position_ids = position_ids | ||
| self.forward_meta.slot_mapping = slot_mapping | ||
|
|
||
| def _process_reorder(self) -> None: | ||
| if self.attn_backends and getattr(self.attn_backends[0], "enable_ids_reorder", False): | ||
| self.share_inputs.enable_pd_reorder = True | ||
|
|
@@ -1860,6 +1894,8 @@ def _dummy_run( | |
| # 2. Padding inputs for cuda graph | ||
| self.forward_meta.step_use_cudagraph = in_capturing and self.forward_meta.step_use_cudagraph | ||
| self.padding_cudagraph_inputs() | ||
| # Compute position_ids and slot_mapping | ||
| self._compute_position_ids_and_slot_mapping() | ||
|
|
||
| model_inputs = {} | ||
| model_inputs["ids_remove_padding"] = self.share_inputs["ids_remove_padding"] | ||
|
|
@@ -2197,6 +2233,8 @@ def _preprocess( | |
|
|
||
| # Padding inputs for cuda graph | ||
| self.padding_cudagraph_inputs() | ||
| # Compute position_ids and slot_mapping | ||
| self._compute_position_ids_and_slot_mapping() | ||
|
|
||
| model_inputs = {} | ||
| model_inputs["ids_remove_padding"] = self.share_inputs["ids_remove_padding"] | ||
|
|
||
This comment was marked as outdated.
Sorry, something went wrong.
Uh oh!
There was an error while loading. Please reload this page.