Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Custom summary prompt in env functionality #208

Closed
wants to merge 17 commits into from
39 changes: 39 additions & 0 deletions config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -70,3 +70,42 @@ data:
purge_every: 60
log:
level: "info"
# Custom Prompts Configuration
# Allows customization of extractor prompts.
custom_prompts:
summarizer_prompts:
# Anthropic Guidelines:
# - Use XML-style tags like <current_summary> as element identifiers.
# - Include {{.PrevSummary}} and {{.MessagesJoined}} as template variables.
# - Clearly explain model instructions, e.g., "Review content inside <current_summary></current_summary> tags".
# - Provide a clear example within the prompt.
#
# Example format:
# anthropic: |
# <YOUR INSTRUCTIONS HERE>
# <example>
# <PROVIDE AN EXAMPLE>
# </example>
# <current_summary>{{.PrevSummary}}</current_summary>
# <new_lines>{{.MessagesJoined}}</new_lines>
# Response without preamble.
#
# If left empty, the default Anthropic summary prompt from zep/pkg/extractors/prompts.go will be used.
anthropic: |

# OpenAI summarizer prompt configuration.
# Guidelines:
# - Include {{.PrevSummary}} and {{.MessagesJoined}} as template variables.
# - Provide a clear example within the prompt.
#
# Example format:
# openai: |
# <YOUR INSTRUCTIONS HERE>
# Example:
# <PROVIDE AN EXAMPLE>
# Current summary: {{.PrevSummary}}
# New lines of conversation: {{.MessagesJoined}}
# New summary:`
#
# If left empty, the default OpenAI summary prompt from zep/pkg/extractors/prompts.go will be used.
openai: |
30 changes: 20 additions & 10 deletions config/models.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,17 @@ package config
// Config holds the configuration of the application
// Use cmd.NewConfig to create a new instance
type Config struct {
LLM LLM `mapstructure:"llm"`
NLP NLP `mapstructure:"nlp"`
Memory MemoryConfig `mapstructure:"memory"`
Extractors ExtractorsConfig `mapstructure:"extractors"`
Store StoreConfig `mapstructure:"store"`
Server ServerConfig `mapstructure:"server"`
Log LogConfig `mapstructure:"log"`
Auth AuthConfig `mapstructure:"auth"`
DataConfig DataConfig `mapstructure:"data"`
Development bool `mapstructure:"development"`
LLM LLM `mapstructure:"llm"`
NLP NLP `mapstructure:"nlp"`
Memory MemoryConfig `mapstructure:"memory"`
Extractors ExtractorsConfig `mapstructure:"extractors"`
Store StoreConfig `mapstructure:"store"`
Server ServerConfig `mapstructure:"server"`
Log LogConfig `mapstructure:"log"`
Auth AuthConfig `mapstructure:"auth"`
DataConfig DataConfig `mapstructure:"data"`
Development bool `mapstructure:"development"`
CustomPrompts CustomPromptsConfig `mapstructure:"custom_prompts"`
}

type StoreConfig struct {
Expand Down Expand Up @@ -96,6 +97,15 @@ type SummarizerConfig struct {
Enabled bool `mapstructure:"enabled"`
}

type CustomPromptsConfig struct {
SummarizerPrompts ExtractorPromptsConfig `mapstructure:"summarizer_prompts"`
}

type ExtractorPromptsConfig struct {
OpenAI string `mapstructure:"openai"`
Anthropic string `mapstructure:"anthropic"`
}

type EmbeddingsConfig struct {
Enabled bool `mapstructure:"enabled"`
Dimensions int `mapstructure:"dimensions"`
Expand Down
4 changes: 2 additions & 2 deletions pkg/extractors/prompts.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ type IntentPromptTemplateData struct {
Input string
}

const summaryPromptTemplateAnthropic = `
const defaultSummaryPromptTemplateAnthropic = `
Review the Current Summary inside <current_summary></current_summary> XML tags,
and the New Lines of the provided conversation inside the <new_lines></new_lines> XML tags. Create a concise summary
of the conversation, adding from the <new_lines> to the <current_summary>.
Expand Down Expand Up @@ -47,7 +47,7 @@ singer and lists the founding members as Jimmy Page, John Paul Jones, and John B
Provide a response immediately without preamble.
`

const summaryPromptTemplateOpenAI = `
const defaultSummaryPromptTemplateOpenAI = `
Review the Current Content, if there is one, and the New Lines of the provided conversation. Create a concise summary
of the conversation, adding from the New Lines to the Current summary.
If the New Lines are meaningless, return the Current Content.
Expand Down
58 changes: 47 additions & 11 deletions pkg/extractors/summarizer.go
Original file line number Diff line number Diff line change
Expand Up @@ -250,6 +250,22 @@ func processOverLimitMessages(
}, nil
}

func validateSummarizerPrompt(prompt string) error {
prevSummaryIdentifier := "{{.PrevSummary}}"
messagesJoinedIdentifier := "{{.MessagesJoined}}"

isCustomPromptValid := strings.Contains(prompt, prevSummaryIdentifier) &&
strings.Contains(prompt, messagesJoinedIdentifier)

if !isCustomPromptValid {
return fmt.Errorf(
"wrong summary prompt format. please make sure it contains the identifiers %s and %s",
prevSummaryIdentifier, messagesJoinedIdentifier,
)
}
return nil
}

// incrementalSummarizer takes a slice of messages and a summary, calls the LLM,
// and returns a new summary enriched with the messages content. Summary can be
// an empty string. Returns a string with the new summary and the number of
Expand All @@ -276,17 +292,7 @@ func incrementalSummarizer(
MessagesJoined: messagesJoined,
}

var summaryPromptTemplate string
switch appState.Config.LLM.Service {
case "openai":
summaryPromptTemplate = summaryPromptTemplateOpenAI
case "anthropic":
summaryPromptTemplate = summaryPromptTemplateAnthropic
default:
return "", 0, fmt.Errorf("unknown LLM service: %s", appState.Config.LLM.Service)
}

progressivePrompt, err := internal.ParsePrompt(summaryPromptTemplate, promptData)
progressivePrompt, err := generateProgressiveSummarizerPrompt(appState, promptData)
if err != nil {
return "", 0, err
}
Expand All @@ -309,3 +315,33 @@ func incrementalSummarizer(

return summary, tokensUsed, nil
}

func generateProgressiveSummarizerPrompt(appState *models.AppState, promptData SummaryPromptTemplateData) (string, error) {
customSummaryPromptTemplateAnthropic := appState.Config.CustomPrompts.SummarizerPrompts.Anthropic
customSummaryPromptTemplateOpenAI := appState.Config.CustomPrompts.SummarizerPrompts.OpenAI

var summaryPromptTemplate string
switch appState.Config.LLM.Service {
case "openai":
if customSummaryPromptTemplateOpenAI != "" {
summaryPromptTemplate = customSummaryPromptTemplateOpenAI
} else {
summaryPromptTemplate = defaultSummaryPromptTemplateOpenAI
}
case "anthropic":
if customSummaryPromptTemplateAnthropic != "" {
summaryPromptTemplate = customSummaryPromptTemplateAnthropic
} else {
summaryPromptTemplate = defaultSummaryPromptTemplateAnthropic
}
default:
return "", fmt.Errorf("unknown LLM service: %s", appState.Config.LLM.Service)
}

err := validateSummarizerPrompt(summaryPromptTemplate)
if err != nil {
return "", err
}

return internal.ParsePrompt(summaryPromptTemplate, promptData)
}
104 changes: 104 additions & 0 deletions pkg/extractors/summarizer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package extractors
import (
"testing"

"github.com/getzep/zep/config"
"github.com/getzep/zep/pkg/llms"
"github.com/getzep/zep/pkg/models"
"github.com/getzep/zep/pkg/testutils"
Expand Down Expand Up @@ -80,3 +81,106 @@ func TestSummarize_Anthropic(t *testing.T) {
// Reset the config to the default
appState.Config = testutils.NewTestConfig()
}

func TestValidateSummarizerPrompt(t *testing.T) {
testCases := []struct {
name string
prompt string
wantErr bool
}{
{
name: "valid prompt",
prompt: "{{.PrevSummary}} {{.MessagesJoined}}",
wantErr: false,
},
{
name: "invalid prompt",
prompt: "{{.PrevSummary}}",
wantErr: true,
},
}

for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
err := validateSummarizerPrompt(tc.prompt)
if tc.wantErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
})
}
}

func TestGenerateProgressiveSummarizerPrompt(t *testing.T) {
testCases := []struct {
name string
service string
customPromptOpenAI string
customPromptAnthropic string
expectedPrompt string
defaultPrompt bool
}{
{
name: "OpenAI with custom prompt",
service: "openai",
customPromptOpenAI: "custom openai prompt {{.PrevSummary}} {{.MessagesJoined}}",
customPromptAnthropic: "",
expectedPrompt: "custom openai prompt previous summary joined messages",
},
{
name: "Anthropic with custom prompt",
service: "anthropic",
customPromptOpenAI: "",
customPromptAnthropic: "custom anthropic prompt {{.PrevSummary}} {{.MessagesJoined}}",
expectedPrompt: "custom anthropic prompt previous summary joined messages",
},
{
name: "OpenAI without custom prompt",
service: "openai",
customPromptOpenAI: "",
customPromptAnthropic: "",
expectedPrompt: defaultSummaryPromptTemplateOpenAI,
defaultPrompt: true,
},
{
name: "Anthropic without custom prompt",
service: "anthropic",
customPromptOpenAI: "",
customPromptAnthropic: "",
expectedPrompt: defaultSummaryPromptTemplateAnthropic,
defaultPrompt: true,
},
}

for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
appState := &models.AppState{
Config: &config.Config{
LLM: config.LLM{
Service: tc.service,
},
CustomPrompts: config.CustomPromptsConfig{
SummarizerPrompts: config.ExtractorPromptsConfig{
OpenAI: tc.customPromptOpenAI,
Anthropic: tc.customPromptAnthropic,
},
},
},
}
promptData := SummaryPromptTemplateData{
PrevSummary: "previous summary",
MessagesJoined: "joined messages",
}

prompt, err := generateProgressiveSummarizerPrompt(appState, promptData)
assert.NoError(t, err)
if !tc.defaultPrompt {
assert.Equal(t, tc.expectedPrompt, prompt)
} else {
// Only compare the first 50 characters of the prompt, since the instructions should match
assert.Equal(t, tc.expectedPrompt[:50], prompt[:50])
}
})
}
}
Loading