Skip to content

Commit

Permalink
chore: GPT Reply suggestion & summarize endpoints (#7011)
Browse files Browse the repository at this point in the history
Adds additional endpoints for OpenAI integration which will allow
- Reply Suggestions
- Summarization

ref: #6436
fixes: https://linear.app/chatwoot/issue/CW-1596/backend-for-generating-conversation-summary
  • Loading branch information
sojan-official committed Apr 28, 2023
1 parent 13fe439 commit 02c467b
Show file tree
Hide file tree
Showing 2 changed files with 139 additions and 18 deletions.
96 changes: 82 additions & 14 deletions lib/integrations/openai/processor_service.rb
Original file line number Diff line number Diff line change
@@ -1,25 +1,98 @@
class Integrations::Openai::ProcessorService
# 3.5 support 4,096 tokens
# 1 token is approx 4 characters
# 4,096 * 4 = 16,384 characters, sticking to 15,000 to be safe
TOKEN_LIMIT = 15_000
API_URL = 'https://api.openai.com/v1/chat/completions'.freeze
GPT_MODEL = 'gpt-3.5-turbo'.freeze

ALLOWED_EVENT_NAMES = %w[rephrase summarize reply_suggestion].freeze

pattr_initialize [:hook!, :event!]

def perform
rephrase_message if event['name'] == 'rephrase'
event_name = event['name']
return nil unless valid_event_name?(event_name)

send("#{event_name}_message")
end

private

def rephrase_body(tone, message)
def valid_event_name?(event_name)
ALLOWED_EVENT_NAMES.include?(event_name)
end

def rephrase_body
{
model: 'gpt-3.5-turbo',
model: GPT_MODEL,
messages: [
{ role: 'system', content: "You are a helpful support agent. Please rephrase the following response to a more #{tone} tone." },
{ role: 'user', content: message }
{ role: 'system',
content: "You are a helpful support agent. Please rephrase the following response to a more #{event['data']['tone']} tone." },
{ role: 'user', content: event['data']['content'] }
]
}.to_json
end

def conversation_messages(in_array_format: false)
conversation = hook.account.conversations.find_by(display_id: event['data']['conversation_display_id'])
messages = in_array_format ? [] : ''
character_count = 0

conversation.messages.chat.reorder('id desc').each do |message|
character_count += message.content.length
break if character_count > TOKEN_LIMIT

formatted_message = format_message(message, in_array_format)
messages.prepend(formatted_message)
end
messages
end

def format_message(message, in_array_format)
in_array_format ? format_message_in_array(message) : format_message_in_string(message)
end

def format_message_in_array(message)
{ role: (message.incoming? ? 'user' : 'assistant'), content: message.content }
end

def format_message_in_string(message)
sender_type = message.incoming? ? 'Customer' : 'Agent'
"#{sender_type} #{message.sender&.name} : #{message.content}\n"
end

def summarize_body
{
model: GPT_MODEL,
messages: [
{ role: 'system',
content: 'Please summarize the key points from the following conversation between support agents and ' \
'customer as bullet points for the next support agent looking into the conversation' },
{ role: 'user', content: conversation_messages }
]
}.to_json
end

def reply_suggestion_body
{
model: GPT_MODEL,
messages: [
{ role: 'system', content: 'Please suggest a reply to the following conversation between support agents and customer' }
].concat(conversation_messages(in_array_format: true))
}.to_json
end

def reply_suggestion_message
make_api_call(reply_suggestion_body)
end

def summarize_message
make_api_call(summarize_body)
end

def rephrase_message
response = make_api_call(rephrase_body(event['data']['tone'], event['data']['content']))
JSON.parse(response)['choices'].first['message']['content']
make_api_call(rephrase_body)
end

def make_api_call(body)
Expand All @@ -28,12 +101,7 @@ def make_api_call(body)
'Authorization' => "Bearer #{hook.settings['api_key']}"
}

response = HTTParty.post(
'https://api.openai.com/v1/chat/completions',
headers: headers,
body: body
)

response.body
response = HTTParty.post(API_URL, headers: headers, body: body)
JSON.parse(response.body)['choices'].first['message']['content']
end
end
61 changes: 57 additions & 4 deletions spec/lib/integrations/openai/processor_service_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -3,19 +3,23 @@
RSpec.describe Integrations::Openai::ProcessorService do
subject { described_class.new(hook: hook, event: event) }

let(:hook) { create(:integrations_hook, :openai) }
let(:account) { create(:account) }
let(:hook) { create(:integrations_hook, :openai, account: account) }
let(:expected_headers) { { 'Authorization' => "Bearer #{hook.settings['api_key']}" } }
let(:openai_response) do
{
'choices' => [
{
'message' => {
'content' => 'This is a rephrased test message.'
'content' => 'This is a reply from openai.'
}
}
]
}.to_json
end
let!(:conversation) { create(:conversation, account: account) }
let!(:customer_message) { create(:message, account: account, conversation: conversation, message_type: :incoming, content: 'hello agent') }
let!(:agent_message) { create(:message, account: account, conversation: conversation, message_type: :outgoing, content: 'hello customer') }

describe '#perform' do
context 'when event name is rephrase' do
Expand All @@ -36,11 +40,60 @@
.to_return(status: 200, body: openai_response, headers: {})

result = subject.perform
expect(result).to eq('This is a rephrased test message.')
expect(result).to eq('This is a reply from openai.')
end
end

context 'when event name is not rephrase' do
context 'when event name is reply_suggestion' do
let(:event) { { 'name' => 'reply_suggestion', 'data' => { 'conversation_display_id' => conversation.display_id } } }

it 'returns the suggested reply' do
request_body = {
'model' => 'gpt-3.5-turbo',
'messages' => [
{ role: 'system', content: 'Please suggest a reply to the following conversation between support agents and customer' },
{ role: 'user', content: customer_message.content },
{ role: 'assistant', content: agent_message.content }
]
}.to_json

# Update the stub_request with the correct messages order
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
.with(body: request_body, headers: expected_headers)
.to_return(status: 200, body: openai_response, headers: {})

result = subject.perform
expect(result).to eq('This is a reply from openai.')
end
end

context 'when event name is summarize' do
let(:event) { { 'name' => 'summarize', 'data' => { 'conversation_display_id' => conversation.display_id } } }
let(:conversation_messages) do
"Customer #{customer_message.sender.name} : #{customer_message.content}\nAgent #{agent_message.sender.name} : #{agent_message.content}\n"
end

it 'returns the summarized message' do
request_body = {
'model' => 'gpt-3.5-turbo',
'messages' => [
{ 'role' => 'system',
'content' => 'Please summarize the key points from the following conversation between support agents and customer ' \
'as bullet points for the next support agent looking into the conversation' },
{ 'role' => 'user', 'content' => conversation_messages }
]
}.to_json

stub_request(:post, 'https://api.openai.com/v1/chat/completions')
.with(body: request_body, headers: expected_headers)
.to_return(status: 200, body: openai_response, headers: {})

result = subject.perform
expect(result).to eq('This is a reply from openai.')
end
end

context 'when event name is not one that can be processed' do
let(:event) { { 'name' => 'unknown', 'data' => {} } }

it 'returns nil' do
Expand Down

0 comments on commit 02c467b

Please sign in to comment.