Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions lightllm/common/basemodel/basemodel.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,6 @@ def __init__(self, kvargs):
self.is_token_healing = kvargs.get("is_token_healing", False)
self.return_all_prompt_logics = kvargs.get("return_all_prompt_logics", False)
assert not (self.is_token_healing and self.return_all_prompt_logics), "can not be true in same time"
self.use_dynamic_prompt_cache = kvargs.get("use_dynamic_prompt_cache", False)
self.data_type = kvargs.get("data_type", "float16")
self.graph_max_batch_size = kvargs.get("graph_max_batch_size", 16)
self.graph_max_batch_size = (
Expand Down Expand Up @@ -251,7 +250,6 @@ def _create_inferstate(self, model_input: ModelInput, microbatch_index: int = 0)
infer_state.is_prefill = model_input.is_prefill
infer_state.is_token_healing = self.is_token_healing
infer_state.return_all_prompt_logics = self.return_all_prompt_logics
infer_state.use_dynamic_prompt_cache = self.use_dynamic_prompt_cache
infer_state.batch_size = model_input.batch_size
infer_state.total_token_num = model_input.total_token_num
infer_state.max_len_in_batch = model_input.max_len_in_batch
Expand Down
1 change: 0 additions & 1 deletion lightllm/common/basemodel/infer_struct.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ def __init__(self):

self.is_token_healing: bool = False
self.return_all_prompt_logics: bool = False
self.use_dynamic_prompt_cache: bool = False
self.multimodal_params: dict = None
self.is_cuda_graph: bool = False # 标记是否是cuda graph的捕获推理
self.dist_group: CustomProcessGroup = None
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ def _decompress_kv(
b_kv_start_loc,
skip_sample=False,
):
if infer_state.use_dynamic_prompt_cache and not skip_sample:
if not skip_sample:
if is_fp8:
kv = infer_state.mem_manager.kv_buffer[self.layer_num_][:, :, :-2].view(torch.float8_e4m3fn)
kv_scale = infer_state.mem_manager.kv_buffer[self.layer_num_][:, :, -2:].view(torch.bfloat16)
Expand Down
5 changes: 1 addition & 4 deletions lightllm/server/api_start.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,11 +94,8 @@ def normal_or_p_d_start(args):

if args.graph_max_len_in_batch == 0:
args.graph_max_len_in_batch = args.max_req_total_len

# mode setting check.
if not args.disable_chunked_prefill:
assert args.disable_dynamic_prompt_cache is False
assert args.disable_chunked_prefill is False
if args.output_constraint_mode != "none":
assert args.disable_dynamic_prompt_cache is False
assert args.disable_chunked_prefill is False
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,6 @@ def init_model(self, kvargs):
"max_seq_length": kvargs.get("max_seq_length", 1024 * 5),
"is_token_healing": kvargs.get("is_token_healing", False),
"return_all_prompt_logics": self.return_all_prompt_logprobs,
"use_dynamic_prompt_cache": self.use_dynamic_prompt_cache,
"disable_chunked_prefill": self.disable_chunked_prefill,
"data_type": kvargs.get("data_type", "float16"),
"graph_max_batch_size": kvargs.get("graph_max_batch_size", 16),
Expand Down Expand Up @@ -231,7 +230,6 @@ def init_mtp_draft_model(self, main_kvargs: dict):
"max_seq_length": main_kvargs.get("max_seq_length", 1024 * 5),
"is_token_healing": False,
"return_all_prompt_logics": False,
"use_dynamic_prompt_cache": self.use_dynamic_prompt_cache,
"disable_chunked_prefill": self.disable_chunked_prefill,
"data_type": main_kvargs.get("data_type", "float16"),
"graph_max_batch_size": main_kvargs.get("graph_max_batch_size", 16),
Expand Down
2 changes: 0 additions & 2 deletions test/benchmark/static_inference/model_infer_mtp.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ def init_mtp_model(args: StartArgs, kvargs, main_model):
{
"weight_dir": args.mtp_draft_model_dir,
"max_total_token_num": main_model.mem_manager.size,
"use_dynamic_prompt_cache": False,
"disable_chunked_prefill": True,
"mtp_mode": args.mtp_mode,
"main_model": main_model,
Expand All @@ -39,7 +38,6 @@ def init_mtp_model(args: StartArgs, kvargs, main_model):
{
"weight_dir": args.spec_model_dir,
"max_total_token_num": main_model.mem_manager.size,
"use_dynamic_prompt_cache": False,
"disable_chunked_prefill": True,
"mtp_mode": args.mtp_mode,
"main_model": main_model,
Expand Down
Loading