From 0584b45ef1d640b4f46d16ceabaf9db341f1b043 Mon Sep 17 00:00:00 2001 From: Letong Han <106566639+letonghan@users.noreply.github.com> Date: Tue, 20 Aug 2024 21:27:09 +0800 Subject: [PATCH] refine llm parameter (#532) Signed-off-by: letonghan --- comps/cores/proto/api_protocol.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comps/cores/proto/api_protocol.py b/comps/cores/proto/api_protocol.py index 382982d271..93602cebb6 100644 --- a/comps/cores/proto/api_protocol.py +++ b/comps/cores/proto/api_protocol.py @@ -169,7 +169,7 @@ class ChatCompletionRequest(BaseModel): stop: Union[str, List[str], None] = Field(default_factory=list) stream: Optional[bool] = False stream_options: Optional[StreamOptions] = None - temperature: Optional[float] = 1.0 # vllm default 0.7 + temperature: Optional[float] = 0.01 # vllm default 0.7 top_p: Optional[float] = None # openai default 1.0, but tgi needs `top_p` must be > 0.0 and < 1.0, set None tools: Optional[List[ChatCompletionToolsParam]] = None tool_choice: Optional[Union[Literal["none"], ChatCompletionNamedToolChoiceParam]] = "none"