From 2ae3243220546309d45ac852fd974dfe4b8c518e Mon Sep 17 00:00:00 2001 From: wufeisheng Date: Thu, 23 Oct 2025 10:49:43 +0800 Subject: [PATCH 1/5] fix total_block_num init error in worker_process --- fastdeploy/worker/worker_process.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fastdeploy/worker/worker_process.py b/fastdeploy/worker/worker_process.py index f9cde4b1bdc..6e68b2d4181 100644 --- a/fastdeploy/worker/worker_process.py +++ b/fastdeploy/worker/worker_process.py @@ -695,6 +695,7 @@ def initialize_fd_config(args, ranks: int = 1, local_rank: int = 0) -> FDConfig: parallel_config = ParallelConfig(vars(args)) cache_config = CacheConfig(vars(args)) scheduler_config = SchedulerConfig(vars(args)) + parallel_config.tensor_parallel_rank = local_rank % parallel_config.tensor_parallel_size parallel_config.data_parallel_rank = local_rank // parallel_config.tensor_parallel_size # config for EP @@ -800,6 +801,8 @@ def initialize_fd_config(args, ranks: int = 1, local_rank: int = 0) -> FDConfig: plas_attention_config=plas_attention_config, structured_outputs_config=structured_outputs_config, ) + # cache_config would not init total_block_num, total_block_num has been set in postprocess function which invoked by init func, so reset here + cache_config.reset(args.total_block_num) update_fd_config_for_mm(fd_config) if fd_config.load_config.load_choices == "default_v1" and not v1_loader_support(fd_config): fd_config.load_config.load_choices = "default" From a9adcda062f02df48f077b098d38355a362ba583 Mon Sep 17 00:00:00 2001 From: wufeisheng Date: Thu, 23 Oct 2025 18:09:31 +0800 Subject: [PATCH 2/5] fix req and token client --- fastdeploy/engine/engine.py | 2 +- fastdeploy/worker/worker_process.py | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/fastdeploy/engine/engine.py b/fastdeploy/engine/engine.py index a5c317f80bb..52b7b1e1a6f 100644 --- a/fastdeploy/engine/engine.py +++ b/fastdeploy/engine/engine.py @@ -509,7 +509,7 @@ def _start_worker_service(self): f" --tensor_parallel_size {self.cfg.parallel_config.tensor_parallel_size}" f" --engine_worker_queue_port {ports}" f" --pod_ip {self.cfg.master_ip}" - f" --total_block_num {self.cfg.cache_config.total_block_num}" + f" --num_gpu_blocks_override {self.cfg.cache_config.num_gpu_blocks_override}" f" --block_size {self.cfg.cache_config.block_size}" f" --enc_dec_block_num {self.cfg.cache_config.enc_dec_block_num}" f" --eos_tokens_lens {self.data_processor.eos_token_id_len}" diff --git a/fastdeploy/worker/worker_process.py b/fastdeploy/worker/worker_process.py index 6e68b2d4181..825edacba18 100644 --- a/fastdeploy/worker/worker_process.py +++ b/fastdeploy/worker/worker_process.py @@ -480,7 +480,7 @@ def parse_args(): help="model dir", ) parser.add_argument("-mbs", "--max_num_seqs", type=int, default=34, help="max batch size") - parser.add_argument("--total_block_num", type=int, default=2000) + parser.add_argument("--num_gpu_blocks_override", type=int, default=1024) parser.add_argument("--block_size", type=int, default=64) parser.add_argument("--pod_ip", type=str, default="127.0.0.1") parser.add_argument("--engine_worker_queue_port", type=str, default="9923") @@ -801,8 +801,6 @@ def initialize_fd_config(args, ranks: int = 1, local_rank: int = 0) -> FDConfig: plas_attention_config=plas_attention_config, structured_outputs_config=structured_outputs_config, ) - # cache_config would not init total_block_num, total_block_num has been set in postprocess function which invoked by init func, so reset here - cache_config.reset(args.total_block_num) update_fd_config_for_mm(fd_config) if fd_config.load_config.load_choices == "default_v1" and not v1_loader_support(fd_config): fd_config.load_config.load_choices = "default" From 24e0bdb01ff3ac0689f1ce7396b7b02c1bb4fe2e Mon Sep 17 00:00:00 2001 From: wufeisheng Date: Thu, 23 Oct 2025 18:39:23 +0800 Subject: [PATCH 3/5] fix req and token client --- fastdeploy/engine/engine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastdeploy/engine/engine.py b/fastdeploy/engine/engine.py index 52b7b1e1a6f..18183b478dc 100644 --- a/fastdeploy/engine/engine.py +++ b/fastdeploy/engine/engine.py @@ -509,7 +509,7 @@ def _start_worker_service(self): f" --tensor_parallel_size {self.cfg.parallel_config.tensor_parallel_size}" f" --engine_worker_queue_port {ports}" f" --pod_ip {self.cfg.master_ip}" - f" --num_gpu_blocks_override {self.cfg.cache_config.num_gpu_blocks_override}" + f" --num_gpu_blocks_override {self.cfg.cache_config.total_block_num}" f" --block_size {self.cfg.cache_config.block_size}" f" --enc_dec_block_num {self.cfg.cache_config.enc_dec_block_num}" f" --eos_tokens_lens {self.data_processor.eos_token_id_len}" From 10339c2768fceeb32464d602822ed34be1f77089 Mon Sep 17 00:00:00 2001 From: wufeisheng Date: Mon, 27 Oct 2025 12:23:32 +0800 Subject: [PATCH 4/5] fix xpu xi --- fastdeploy/engine/engine.py | 2 +- fastdeploy/worker/worker_process.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/fastdeploy/engine/engine.py b/fastdeploy/engine/engine.py index 18183b478dc..1795b51700c 100644 --- a/fastdeploy/engine/engine.py +++ b/fastdeploy/engine/engine.py @@ -509,7 +509,6 @@ def _start_worker_service(self): f" --tensor_parallel_size {self.cfg.parallel_config.tensor_parallel_size}" f" --engine_worker_queue_port {ports}" f" --pod_ip {self.cfg.master_ip}" - f" --num_gpu_blocks_override {self.cfg.cache_config.total_block_num}" f" --block_size {self.cfg.cache_config.block_size}" f" --enc_dec_block_num {self.cfg.cache_config.enc_dec_block_num}" f" --eos_tokens_lens {self.data_processor.eos_token_id_len}" @@ -550,6 +549,7 @@ def _start_worker_service(self): "disable_custom_all_reduce": self.cfg.parallel_config.disable_custom_all_reduce, "enable_logprob": self.cfg.model_config.enable_logprob, "lm_head_fp32": self.cfg.model_config.lm_head_fp32, + "num_gpu_blocks_override": self.cfg.cache_config.num_gpu_blocks_override, } for worker_flag, value in worker_append_flag.items(): if value: diff --git a/fastdeploy/worker/worker_process.py b/fastdeploy/worker/worker_process.py index 825edacba18..94440e5b17e 100644 --- a/fastdeploy/worker/worker_process.py +++ b/fastdeploy/worker/worker_process.py @@ -480,7 +480,7 @@ def parse_args(): help="model dir", ) parser.add_argument("-mbs", "--max_num_seqs", type=int, default=34, help="max batch size") - parser.add_argument("--num_gpu_blocks_override", type=int, default=1024) + parser.add_argument("--num_gpu_blocks_override", type=int, default=None) parser.add_argument("--block_size", type=int, default=64) parser.add_argument("--pod_ip", type=str, default="127.0.0.1") parser.add_argument("--engine_worker_queue_port", type=str, default="9923") From 287c0ebe99595673c5d3454650c799b9c776aba8 Mon Sep 17 00:00:00 2001 From: wufeisheng Date: Mon, 27 Oct 2025 14:19:26 +0800 Subject: [PATCH 5/5] fix xpu ci --- fastdeploy/engine/engine.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/fastdeploy/engine/engine.py b/fastdeploy/engine/engine.py index 1795b51700c..0b2dfc3e1dd 100644 --- a/fastdeploy/engine/engine.py +++ b/fastdeploy/engine/engine.py @@ -539,7 +539,7 @@ def _start_worker_service(self): f" --override-pooler-config {self.cfg.model_config.override_pooler_config}" ) - worker_append_flag = { + worker_store_true_flag = { "enable_expert_parallel": self.cfg.parallel_config.enable_expert_parallel, "enable_prefix_caching": self.cfg.cache_config.enable_prefix_caching, "enable_chunked_prefill": self.cfg.cache_config.enable_chunked_prefill, @@ -549,11 +549,18 @@ def _start_worker_service(self): "disable_custom_all_reduce": self.cfg.parallel_config.disable_custom_all_reduce, "enable_logprob": self.cfg.model_config.enable_logprob, "lm_head_fp32": self.cfg.model_config.lm_head_fp32, - "num_gpu_blocks_override": self.cfg.cache_config.num_gpu_blocks_override, } - for worker_flag, value in worker_append_flag.items(): + for worker_flag, value in worker_store_true_flag.items(): if value: arguments = arguments + f" --{worker_flag}" + + worker_default_none_flag = { + "num_gpu_blocks_override": self.cfg.cache_config.num_gpu_blocks_override, + } + for worker_flag, value in worker_default_none_flag.items(): + if value: + arguments = arguments + f" --{worker_flag} {value}" + if self.cfg.nnode > 1: pd_cmd = pd_cmd + f" --ips {ips} --nnodes {len(self.cfg.ips)}" pd_cmd = pd_cmd + arguments + f" 2>{log_dir}/launch_worker.log"