From 046ce68a45063436a3a4c7e82aeb91e0fa612eaf Mon Sep 17 00:00:00 2001 From: Sojan Jose Date: Mon, 8 Jan 2024 17:02:25 -0800 Subject: [PATCH] chore: Improve Openai json rendering (#8666) We have been observing JSON parsing errors for responses from GPT. Switching to the gpt-4-1106-preview model along with using response_format has significantly improved the responses from OpenAI, hence making the switch in code. ref: https://openai.com/blog/new-models-and-developer-products-announced-at-devday fixes: #CW-2931 --- enterprise/lib/chat_gpt.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/enterprise/lib/chat_gpt.rb b/enterprise/lib/chat_gpt.rb index 4065d3686f77..9b09569d0fb8 100644 --- a/enterprise/lib/chat_gpt.rb +++ b/enterprise/lib/chat_gpt.rb @@ -4,7 +4,7 @@ def self.base_uri end def initialize(context_sections = '') - @model = 'gpt-4' + @model = 'gpt-4-1106-preview' @messages = [system_message(context_sections)] end @@ -53,7 +53,7 @@ def system_content(context_sections) def request_gpt headers = { 'Content-Type' => 'application/json', 'Authorization' => "Bearer #{ENV.fetch('OPENAI_API_KEY')}" } - body = { model: @model, messages: @messages }.to_json + body = { model: @model, messages: @messages, response_format: { type: 'json_object' } }.to_json Rails.logger.info "Requesting Chat GPT with body: #{body}" response = HTTParty.post("#{self.class.base_uri}/v1/chat/completions", headers: headers, body: body) Rails.logger.info "Chat GPT response: #{response.body}"