From d39c110a1ad7376565a96afbbe6d64023ceead0d Mon Sep 17 00:00:00 2001 From: Jakub Nyckowski Date: Wed, 12 Jul 2023 12:09:32 -0400 Subject: [PATCH] Set lower temperature to ChatGPT calls (#28959) Updated the ChatCompletionRequest in the agent model to include temperature parameter. The temperature parameter controls the randomness of the AI's responses, making the model more conservative and focused with a lower value. In this case, the temperature is set to 0.3 to produce more focused and consistent results. Default is 1.0. Max is 2.0. --- lib/ai/model/agent.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/ai/model/agent.go b/lib/ai/model/agent.go index a0cade5fda335..ba54b2791783d 100644 --- a/lib/ai/model/agent.go +++ b/lib/ai/model/agent.go @@ -244,9 +244,10 @@ func (a *Agent) plan(ctx context.Context, state *executionState) (*AgentAction, stream, err := state.llm.CreateChatCompletionStream( ctx, openai.ChatCompletionRequest{ - Model: openai.GPT4, - Messages: prompt, - Stream: true, + Model: openai.GPT4, + Messages: prompt, + Temperature: 0.3, + Stream: true, }, ) if err != nil {