Skip to content

Commit

Permalink
Merge pull request #48 from dockhardman/feature/prompt_blueprint
Browse files Browse the repository at this point in the history
Feature/prompt blueprint
  • Loading branch information
dockhardman committed Jul 5, 2024
2 parents 24d0656 + fa0eb43 commit 8e143a5
Show file tree
Hide file tree
Showing 19 changed files with 629 additions and 213 deletions.
2 changes: 1 addition & 1 deletion languru/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from languru.models.data_model import DataModel
from languru.prompts.base import PromptTemplate
from languru.prompts.prompt_template import PromptTemplate
from languru.types.audio import (
AudioSpeechRequest,
AudioTranscriptionRequest,
Expand Down
7 changes: 2 additions & 5 deletions languru/models/data_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,8 @@
from languru.config import logger
from languru.prompts import PromptTemplate
from languru.prompts.repositories.data_model import prompt_date_model_from_openai
from languru.utils.common import (
display_messages,
ensure_list,
ensure_openai_chat_completion_content,
)
from languru.utils.common import display_messages, ensure_list
from languru.utils.openai_utils import ensure_openai_chat_completion_content

DataModelTypeVar = TypeVar("DataModelTypeVar", bound="DataModel")

Expand Down
2 changes: 1 addition & 1 deletion languru/prompts/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
from languru.prompts.base import PromptTemplate
from languru.prompts.prompt_template import PromptTemplate

__all__ = ["PromptTemplate"]
173 changes: 163 additions & 10 deletions languru/prompts/base.py → languru/prompts/prompt_template.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,38 @@
import copy
import hashlib
import json
from types import MappingProxyType
from typing import Any, Dict, List, Optional, Sequence, Text, Tuple, Union
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Text,
Tuple,
Union,
)

from openai.types.chat import ChatCompletionMessageParam
from pyassorted.string import Bracket, find_placeholders, multiple_replace
from pyassorted.string import (
Bracket,
extract_code_blocks,
find_placeholders,
multiple_replace,
)
from pydantic import BaseModel

from languru.prompts.repositories.assistant import explanation_co_star
from languru.prompts.repositories.user import (
question_of_costar,
request_to_rewrite_as_costar,
)
from languru.types.chat.completions import Message
from languru.utils.chat import chat_completion_once
from languru.utils.common import display_messages
from languru.utils.openai_utils import messages_to_md5

if TYPE_CHECKING:
from openai import OpenAI


class PromptTemplate:
Expand Down Expand Up @@ -40,6 +64,125 @@ def __init__(
m.model_dump() if isinstance(m, Message) else m for m in messages or []
]

@classmethod
def from_description(
cls,
prompt_description: Text,
client: "OpenAI",
*,
model: Text,
example_user_queries: Optional[Sequence[Text]] = None,
temperature: float = 0.7,
verbose: bool = False,
**kwargs,
):
"""
Create an instance of the class from a prompt description using OpenAI's API.
This method generates a prompt based on the given description, creates example
messages if provided, and constructs the final prompt with examples.
Parameters
----------
prompt_description : Text
A description of the desired prompt.
client : OpenAI
An instance of the OpenAI client for making API calls.
model : Text
The name of the OpenAI model to use for generating responses.
example_user_queries : Optional[Sequence[Text]], optional
A sequence of example user queries to generate responses for, by default None.
temperature : float, optional
The sampling temperature to use when generating responses, by default 0.3.
verbose : bool, optional
If True, display detailed information about API calls and responses, by default False.
**kwargs : dict
Additional keyword arguments to be passed to the class constructor.
Returns
-------
cls
An instance of the class initialized with the generated prompt and any additional
parameters provided in kwargs.
Raises
------
ValueError
If no markdown code block can be extracted from the API response.
Notes
-----
This method uses the OpenAI API to generate a prompt based on the given description,
and then uses that prompt to generate example responses if example queries are provided.
The final prompt includes these examples and is used to initialize a new instance of the class.
""" # noqa: E501

# Build prompt by chat
chat_answer = chat_completion_once(
messages=[
{"role": "user", "content": question_of_costar},
{"role": "assistant", "content": explanation_co_star},
{"role": "user", "content": request_to_rewrite_as_costar},
],
client=client,
model=model,
prompt_vars={"PROMPT_DESCRIPTION": prompt_description},
verbose=verbose,
temperature=temperature,
wrapped_by=Bracket.CurlyBrackets,
)
# Parse response
code_blocks = extract_code_blocks(
chat_answer, language="markdown", eob_missing_ok=True
)
code_blocks = [b.strip() for b in code_blocks if b.strip()]
if len(code_blocks) == 0:
raise ValueError(
"Failed to extract a markdown code block from the response: "
+ f"{chat_answer}"
)
prompt_costar = code_blocks[-1].strip()

# Generate example messages
examples_messages: List[List["Message"]] = []
for _ex_query in example_user_queries or []:
_ex_query = _ex_query.strip()
_ex_chat_answer = chat_completion_once(
messages=[
{"role": "system", "content": prompt_costar},
{"role": "user", "content": _ex_query},
],
client=client,
model=model,
verbose=verbose,
temperature=temperature,
wrapped_by=Bracket.CurlyBrackets,
)
examples_messages.append(
[
Message.model_validate({"role": "user", "content": _ex_query}),
Message.model_validate(
{"role": "assistant", "content": _ex_chat_answer}
),
]
)

# Add examples into the costar prompt
if examples_messages:
prompt_costar += "\n\n## Examples"
prompt_costar = prompt_costar.strip()
for idx, example_messages in enumerate(examples_messages):
prompt_costar += f"\n\n### Example {idx + 1}\n\n"
prompt_costar += display_messages(example_messages, is_print=False)
prompt_costar = prompt_costar.strip()

# Final prompt
return cls(
prompt=prompt_costar,
prompt_vars=kwargs.get("prompt_vars"),
messages=kwargs.get("messages"),
)

def __call__(
self,
messages: Optional[
Expand All @@ -62,15 +205,25 @@ def __call__(
def __str__(self):
"""Return a string representation of the object."""

_messages = self.prompt_messages()
_messages_md5 = hashlib.md5(
json.dumps(_messages, sort_keys=True, default=str).encode()
).hexdigest()
return f'<{self.__class__.__name__} md5="{_messages_md5}">'
return f'<{self.__class__.__name__} md5="{self.md5_formatted}">'

def __repr__(self):
return self.__str__()

@property
def md5(self) -> Text:
"""Return the MD5 hash of the prompt messages."""

_messages = self.prompt_messages()
return messages_to_md5(_messages)

@property
def md5_formatted(self) -> Text:
"""Return the formatted MD5 hash of the prompt messages."""

_messages = self.format_messages()
return messages_to_md5(_messages)

@property
def prompt_vars(self) -> Dict[Text, Any]:
"""Return a copy of the prompt vars."""
Expand Down Expand Up @@ -136,7 +289,7 @@ def prompt_messages(
_messages.append(
{"role": self.role_system, "content": self.prompt} # type: ignore
)
_messages += self.messages
_messages += copy.deepcopy(self.messages)
if messages:
_messages += [
m.model_dump() if isinstance(m, BaseModel) else copy.deepcopy(m)
Expand Down
40 changes: 40 additions & 0 deletions languru/prompts/repositories/assistant.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
from textwrap import dedent
from typing import Final, Text

explanation_co_star: Final[Text] = dedent(
"""
I'd be happy to explain the CO-STAR prompt framework with gradually increasing complexity. Let's start simple and build up from there.
Basic Explanation:
CO-STAR is a method for writing clear instructions to AI language models. It stands for Context, Objective, Style, Tone, Audience, and Response. By including these elements, you help the AI understand exactly what you want and how you want it delivered.
Intermediate Explanation:
The CO-STAR framework helps create more effective prompts for AI language models:
1. Context: Gives background information to set the scene.
2. Objective: Clearly states what you want the AI to do.
3. Style: Describes how you want the content written.
4. Tone: Specifies the emotional feel of the response.
5. Audience: Identifies who the content is for.
6. Response: Outlines the desired format of the answer.
By addressing each of these areas, you provide a comprehensive set of instructions that guide the AI to produce more accurate and tailored responses.
Advanced Explanation:
The CO-STAR framework is a sophisticated approach to prompt engineering that leverages key aspects of communication theory and natural language processing:
1. Context (C): Provides crucial background information that allows the language model to activate relevant knowledge domains and establish appropriate contextual parameters. This helps mitigate ambiguity and reduces the likelihood of the model making incorrect assumptions.
2. Objective (O): Clearly defines the task or goal, which serves as the primary directive for the language model's output generation process. This helps focus the model's attention on relevant information and guides its decision-making throughout the response formulation.
3. Style (S): Specifies the desired writing style, which influences the model's choice of vocabulary, sentence structure, and overall composition. This can range from formal academic writing to casual conversational text, ensuring the output aligns with the intended use case.
4. Tone (T): Establishes the emotional undercurrent of the response, affecting word choice and phrasing to convey the appropriate sentiment. This is crucial for maintaining consistency in brand voice or personal communication style.
5. Audience (A): Identifies the target readership, allowing the model to tailor its language complexity, use of jargon, and cultural references to suit the intended recipients. This ensures the output is accessible and relevant to its readers.
6. Response (R): Outlines the expected format of the output, which can significantly impact how the information is structured and presented. This is particularly important for integrating AI-generated content into larger systems or workflows.
By systematically addressing these elements, the CO-STAR framework enables more precise control over AI language model outputs. It helps create a shared understanding between the user and the AI, resulting in more accurate, contextually appropriate, and useful responses. This approach can significantly enhance the effectiveness of AI-assisted content creation, analysis, and communication tasks across various domains and applications.
""" # noqa: E501
).strip()
65 changes: 65 additions & 0 deletions languru/prompts/repositories/user.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
from textwrap import dedent
from typing import Final, Text

question_of_costar: Final[Text] = dedent(
"""
The CO-STAR prompt framework is :
**Context (C) :** Providing background information helps the LLM understand the specific scenario.
**Objective (O):** Clearly defining the task directs the LLM’s focus.
**Style (S):** Specifying the desired writing style aligns the LLM response.
**Tone (T):** Setting the tone ensures the response resonates with the required sentiment.
**Audience (A):** Identifying the intended audience tailors the LLM’s response to be targeted to an audience.
**Response (R):** Providing the response format, like text or json, ensures the LLM outputs, and help build pipelines.
Please explain it with gradually increasing complexity.
""" # noqa: E501
).strip()

request_to_rewrite_as_costar = dedent(
"""
```
{PROMPT_DESCRIPTION}
```
Please rewrite the prompt above as CO-STAR framework step by step:
1. Analyze the prompt and provide a detailed explanation.
2. Come up with some hypotheses creatively.
3. Finally, provide a response in the markdown code block format as shown below:
```markdown
## Context
{CONTEXT}
## Objective
{OBJECTIVE}
## Style
{STYLE}
## Tone
{TONE}
## Audience
{AUDIENCE}
## Response
{RESPONSE}
```
Note 1: Please ensure that the response is in the correct markdown format in a code snippet.
Note 2: The chat example is not required for this prompt.
""" # noqa: E501
).strip()
Loading

0 comments on commit 8e143a5

Please sign in to comment.