Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 7 additions & 6 deletions src/paperqa/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,10 @@
" answer the question, instead summarize to give evidence to help answer the"
" question. Stay detailed; report specific numbers, equations, or direct quotes"
' (marked with quotation marks). Reply "Not applicable" if the excerpt is'
" irrelevant. At the end of your response, provide an integer score from 1-10 on a"
" newline indicating relevance to question. Do not explain your score.\n\nRelevant"
" Information Summary ({summary_length}):"
" irrelevant. At the end of your response,"
"provide an integer score from 1-10 on a newline indicating relevance to question." # Don't use 0-10 since we mention "not applicable" instead # noqa: E501
" Do not explain your score."
"\n\nRelevant Information Summary ({summary_length}):"
)
# This prompt template integrates with `text` variable of the above `summary_prompt`
text_with_tables_prompt_template = (
Expand Down Expand Up @@ -111,14 +112,14 @@
" Your summary, combined with many others,"
" will be given to the model to generate an answer."
" Respond with the following JSON format:"
'\n\n{{\n "summary": "...",\n "relevance_score": "..."\n "used_images"\n}}'
'\n\n{{\n "summary": "...",\n "relevance_score": 0-10,\n "used_images"\n}}'
"\n\nwhere `summary` is relevant information from the text - {summary_length} words."
" `relevance_score` is an integer 0-10 for the relevance of `summary` to the question."
" `used_images` is a boolean flag indicating"
" if any images present in a multimodal message were used,"
" and if no images were present it should be false."
"\n\nThe excerpt may or may not contain relevant information. If not, leave `summary` empty, "
"and make `relevance_score` be 0."
"\n\nThe excerpt may or may not contain relevant information."
" If not, leave `summary` empty, and make `relevance_score` be 0."
)

env_system_prompt = (
Expand Down
19 changes: 11 additions & 8 deletions tests/test_paperqa.py
Original file line number Diff line number Diff line change
Expand Up @@ -529,14 +529,17 @@ async def test_json_evidence(docs_fixture: Docs) -> None:
settings = Settings.from_name("fast")
settings.prompts.use_json = True
settings.prompts.summary_json_system = (
"Provide a summary of the excerpt that could help answer the question based on"
" the excerpt. The excerpt may be irrelevant. Do not directly answer the"
" question - only summarize relevant information. Respond with the following"
' JSON format:\n\n {{\n"summary": "...",\n"author_name":'
' "...",\n"relevance_score": "..."}}\n\n where `summary` is relevant'
" information from text - about 100 words words, `author_name` specifies the"
" author , and `relevance_score` is the relevance of `summary` to answer the"
" question (integer out of 10)."
"Provide a summary of the relevant information"
" that could help answer the question based on the excerpt."
" Your summary, combined with many others,"
" will be given to the model to generate an answer."
" Respond with the following JSON format:"
'\n\n{{\n "summary": "...",\n "author_name": "...",\n "relevance_score": 0-10,\n}}'
"\n\nwhere `summary` is relevant information from the text - about 100 words."
" `author_name` specifies the author."
" `relevance_score` is an integer 0-10 for the relevance of `summary` to the question."
"\n\nThe excerpt may or may not contain relevant information."
" If not, leave `summary` empty, and make `relevance_score` be 0."
)
orig_acompletion = litellm.acompletion
has_made_bad_json_context = False
Expand Down
Loading