Skip to content

Commit

Permalink
Fix docstrings
Browse files Browse the repository at this point in the history
  • Loading branch information
nick863 committed Jun 20, 2024
1 parent 00c720f commit a852585
Show file tree
Hide file tree
Showing 9 changed files with 167 additions and 140 deletions.
65 changes: 34 additions & 31 deletions src/promptflow-evals/promptflow/evals/evaluators/_chat/_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,39 +22,41 @@


class ChatEvaluator:
"""
Initialize an evaluator configured for a specific Azure OpenAI model.
:param model_config: Configuration for the Azure OpenAI model.
:type model_config: AzureOpenAIModelConfiguration
:param eval_last_turn: Set to True to evaluate only the most recent exchange in the dialogue,
focusing on the latest user inquiry and the assistant's corresponding response. Defaults to False
:type eval_last_turn: bool
:param parallel: If True, use parallel execution for evaluators. Else, use sequential execution.
Default is True.
:type parallel: bool
:return: A function that evaluates and generates metrics for "chat" scenario.
:rtype: function
**Usage**
.. code-block:: python
chat_eval = ChatEvaluator(model_config)
conversation = [
{"role": "user", "content": "What is the value of 2 + 2?"},
{"role": "assistant", "content": "2 + 2 = 4", "context": {
"citations": [
{"id": "math_doc.md", "content": "Information about additions: 1 + 2 = 3, 2 + 2 = 4"}
]
}
}
]
result = chat_eval(conversation=conversation)
"""

def __init__(
self, model_config: AzureOpenAIModelConfiguration, eval_last_turn: bool = False, parallel: bool = True
):
"""
Initialize an evaluator configured for a specific Azure OpenAI model.
:param model_config: Configuration for the Azure OpenAI model.
:type model_config: AzureOpenAIModelConfiguration
:param eval_last_turn: Set to True to evaluate only the most recent exchange in the dialogue,
focusing on the latest user inquiry and the assistant's corresponding response. Defaults to False
:type eval_last_turn: bool
:param parallel: If True, use parallel execution for evaluators. Else, use sequential execution.
Default is True.
:type parallel: bool
:return: A function that evaluates and generates metrics for "chat" scenario.
:rtype: function
**Usage**
.. code-block:: python
chat_eval = ChatEvaluator(model_config)
conversation = [
{"role": "user", "content": "What is the value of 2 + 2?"},
{"role": "assistant", "content": "2 + 2 = 4", "context": {
"citations": [
{"id": "math_doc.md", "content": "Information about additions: 1 + 2 = 3, 2 + 2 = 4"}
]
}
}
]
result = chat_eval(conversation=conversation)
"""
"""Constructor."""
self._eval_last_turn = eval_last_turn
self._parallel = parallel

Expand All @@ -73,7 +75,8 @@ def __init__(
self._retrieval_chat_evaluator = RetrievalChatEvaluator(model_config)

def __call__(self, *, conversation, **kwargs):
"""Evaluates chat scenario.
"""
Evaluates chat scenario.
:param conversation: The conversation to be evaluated. Each turn should have "role" and "content" keys.
"context" key is optional for assistant's turn and should have "citations" key with list of citations.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,22 +12,24 @@


class CoherenceEvaluator:
def __init__(self, model_config: AzureOpenAIModelConfiguration):
"""
Initialize an evaluator configured for a specific Azure OpenAI model.
"""
Initialize an evaluator configured for a specific Azure OpenAI model.
:param model_config: Configuration for the Azure OpenAI model.
:type model_config: AzureOpenAIModelConfiguration
:param model_config: Configuration for the Azure OpenAI model.
:type model_config: AzureOpenAIModelConfiguration
**Usage**
**Usage**
.. code-block:: python
.. code-block:: python
eval_fn = CoherenceEvaluator(model_config)
result = eval_fn(
question="What is the capital of Japan?",
answer="The capital of Japan is Tokyo.")
"""
eval_fn = CoherenceEvaluator(model_config)
result = eval_fn(
question="What is the capital of Japan?",
answer="The capital of Japan is Tokyo.")
"""

def __init__(self, model_config: AzureOpenAIModelConfiguration):
"""Constructor."""
# TODO: Remove this block once the bug is fixed
# https://msdata.visualstudio.com/Vienna/_workitems/edit/3151324
if model_config.api_version is None:
Expand All @@ -39,7 +41,9 @@ def __init__(self, model_config: AzureOpenAIModelConfiguration):
self._flow = load_flow(source=prompty_path, model=prompty_model_config)

def __call__(self, *, question: str, answer: str, **kwargs):
"""Evaluate coherence.
"""
Evaluate coherence.
:param question: The question to be evaluated.
:type question: str
:param answer: The answer to be evaluated.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,28 +8,31 @@


class F1ScoreEvaluator:
def __init__(self):
"""
Initialize an evaluator for calculating F1 score.
"""
Initialize an evaluator for calculating F1 score.
**Usage**
**Usage**
.. code-block:: python
.. code-block:: python
eval_fn = F1ScoreEvaluator()
result = eval_fn(
answer="The capital of Japan is Tokyo.",
ground_truth="Tokyo is Japan's capital, known for its blend of traditional culture \
and technological advancements.")
"""
eval_fn = F1ScoreEvaluator()
result = eval_fn(
answer="The capital of Japan is Tokyo.",
ground_truth="Tokyo is Japan's capital, known for its blend of traditional culture \
and technological advancements.")
"""

def __init__(self):
"""Constructor."""

# Load the flow as function
current_dir = Path(__file__).resolve().parent
flow_dir = current_dir / "flow"
self._flow = load_flow(source=flow_dir)

def __call__(self, *, answer: str, ground_truth: str, **kwargs):
"""Evaluate F1 score.
"""
Evaluate F1 score.
:param answer: The answer to be evaluated.
:type answer: str
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,22 +12,24 @@


class FluencyEvaluator:
def __init__(self, model_config: AzureOpenAIModelConfiguration):
"""
Initialize an evaluator configured for a specific Azure OpenAI model.
"""
Initialize an evaluator configured for a specific Azure OpenAI model.
:param model_config: Configuration for the Azure OpenAI model.
:type model_config: AzureOpenAIModelConfiguration
:param model_config: Configuration for the Azure OpenAI model.
:type model_config: AzureOpenAIModelConfiguration
**Usage**
**Usage**
.. code-block:: python
.. code-block:: python
eval_fn = FluencyEvaluator(model_config)
result = eval_fn(
question="What is the capital of Japan?",
answer="The capital of Japan is Tokyo.")
"""
eval_fn = FluencyEvaluator(model_config)
result = eval_fn(
question="What is the capital of Japan?",
answer="The capital of Japan is Tokyo.")
"""

def __init__(self, model_config: AzureOpenAIModelConfiguration):
"""Constructor."""
# TODO: Remove this block once the bug is fixed
# https://msdata.visualstudio.com/Vienna/_workitems/edit/3151324
if model_config.api_version is None:
Expand All @@ -39,7 +41,9 @@ def __init__(self, model_config: AzureOpenAIModelConfiguration):
self._flow = load_flow(source=prompty_path, model=prompty_model_config)

def __call__(self, *, question: str, answer: str, **kwargs):
"""Evaluate fluency.
"""
Evaluate fluency.
:param question: The question to be evaluated.
:type question: str
:param answer: The answer to be evaluated.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,23 +12,25 @@


class GroundednessEvaluator:
def __init__(self, model_config: AzureOpenAIModelConfiguration):
"""
Initialize an evaluator configured for a specific Azure OpenAI model.
"""
Initialize an evaluator configured for a specific Azure OpenAI model.
:param model_config: Configuration for the Azure OpenAI model.
:type model_config: AzureOpenAIModelConfiguration
:param model_config: Configuration for the Azure OpenAI model.
:type model_config: AzureOpenAIModelConfiguration
**Usage**
**Usage**
.. code-block:: python
.. code-block:: python
eval_fn = GroundednessEvaluator(model_config)
result = eval_fn(
answer="The capital of Japan is Tokyo.",
context="Tokyo is Japan's capital, known for its blend of traditional culture \
and technological advancements.")
"""
eval_fn = GroundednessEvaluator(model_config)
result = eval_fn(
answer="The capital of Japan is Tokyo.",
context="Tokyo is Japan's capital, known for its blend of traditional culture \
and technological advancements.")
"""

def __init__(self, model_config: AzureOpenAIModelConfiguration):
"""Constructor."""
# TODO: Remove this block once the bug is fixed
# https://msdata.visualstudio.com/Vienna/_workitems/edit/3151324
if model_config.api_version is None:
Expand All @@ -40,7 +42,8 @@ def __init__(self, model_config: AzureOpenAIModelConfiguration):
self._flow = load_flow(source=prompty_path, model=prompty_model_config)

def __call__(self, *, answer: str, context: str, **kwargs):
"""Evaluate groundedness of the answer in the context.
"""
Evaluate groundedness of the answer in the context.
:param answer: The answer to be evaluated.
:type answer: str
Expand Down
39 changes: 21 additions & 18 deletions src/promptflow-evals/promptflow/evals/evaluators/_qa/_qa.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,27 +16,29 @@


class QAEvaluator:
def __init__(self, model_config: AzureOpenAIModelConfiguration, parallel: bool = True):
"""
Initialize an evaluator configured for a specific Azure OpenAI model.
"""
Initialize an evaluator configured for a specific Azure OpenAI model.
:param model_config: Configuration for the Azure OpenAI model.
:type model_config: AzureOpenAIModelConfiguration
:return: A function that evaluates and generates metrics for "question-answering" scenario.
:rtype: function
:param model_config: Configuration for the Azure OpenAI model.
:type model_config: AzureOpenAIModelConfiguration
:return: A function that evaluates and generates metrics for "question-answering" scenario.
:rtype: function
**Usage**
**Usage**
.. code-block:: python
.. code-block:: python
eval_fn = QAEvaluator(model_config)
result = qa_eval(
question="Tokyo is the capital of which country?",
answer="Japan",
context="Tokyo is the capital of Japan.",
ground_truth="Japan"
)
"""
eval_fn = QAEvaluator(model_config)
result = qa_eval(
question="Tokyo is the capital of which country?",
answer="Japan",
context="Tokyo is the capital of Japan.",
ground_truth="Japan"
)
"""

def __init__(self, model_config: AzureOpenAIModelConfiguration, parallel: bool = True):
"""Constructor."""
self._parallel = parallel

self._evaluators = [
Expand All @@ -49,7 +51,8 @@ def __init__(self, model_config: AzureOpenAIModelConfiguration, parallel: bool =
]

def __call__(self, *, question: str, answer: str, context: str, ground_truth: str, **kwargs):
"""Evaluates question-answering scenario.
"""
Evaluates question-answering scenario.
:param question: The question to be evaluated.
:type question: str
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,24 +12,25 @@


class RelevanceEvaluator:
def __init__(self, model_config: AzureOpenAIModelConfiguration):
"""
Initialize an evaluator configured for a specific Azure OpenAI model.
"""
Initialize an evaluator configured for a specific Azure OpenAI model.
:param model_config: Configuration for the Azure OpenAI model.
:type model_config: AzureOpenAIModelConfiguration
:param model_config: Configuration for the Azure OpenAI model.
:type model_config: AzureOpenAIModelConfiguration
**Usage**
**Usage**
.. code-block:: python
.. code-block:: python
eval_fn = RelevanceEvaluator(model_config)
result = eval_fn(
question="What is the capital of Japan?",
answer="The capital of Japan is Tokyo.",
context="Tokyo is Japan's capital, known for its blend of traditional culture \
and technological advancements.")
"""
eval_fn = RelevanceEvaluator(model_config)
result = eval_fn(
question="What is the capital of Japan?",
answer="The capital of Japan is Tokyo.",
context="Tokyo is Japan's capital, known for its blend of traditional culture \
and technological advancements.")
"""
def __init__(self, model_config: AzureOpenAIModelConfiguration):
"""Constructor."""
# TODO: Remove this block once the bug is fixed
# https://msdata.visualstudio.com/Vienna/_workitems/edit/3151324
if model_config.api_version is None:
Expand All @@ -41,7 +42,8 @@ def __init__(self, model_config: AzureOpenAIModelConfiguration):
self._flow = load_flow(source=prompty_path, model=prompty_model_config)

def __call__(self, *, question: str, answer: str, context: str, **kwargs):
"""Evaluate relevance.
"""
Evaluate relevance.
:param question: The question to be evaluated.
:type question: str
Expand Down
Loading

0 comments on commit a852585

Please sign in to comment.