Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion llm-complete-guide/ZENML_VERSION.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
0.74.0
0.75.0
6 changes: 3 additions & 3 deletions llm-complete-guide/pipelines/llm_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from typing import Optional

import click
from steps.create_prompt import create_prompt
from steps.create_prompt import PROMPT, create_prompt
from steps.eval_e2e import e2e_evaluation, e2e_evaluation_llm_judged
from steps.eval_retrieval import (
retrieval_evaluation_full,
Expand All @@ -26,14 +26,14 @@
retrieval_evaluation_small_with_reranking,
)
from steps.eval_visualisation import visualize_evaluation_results
from zenml import pipeline
from zenml import pipeline, save_artifact


@pipeline(enable_cache=True)
def llm_eval(after: Optional[str] = None) -> None:
"""Executes the pipeline to evaluate a RAG pipeline."""
# define prompt
prompt = create_prompt()
prompt = save_artifact(PROMPT, "prompt")

# Retrieval evals
failure_rate_retrieval = retrieval_evaluation_small(after=after)
Expand Down
7 changes: 7 additions & 0 deletions llm-complete-guide/steps/create_prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,13 @@

from zenml import log_metadata, step

PROMPT = """
You are a friendly chatbot. \
You can answer questions about ZenML, its features and its use cases. \
You respond in a concise, technically credible tone. \
You ONLY use the context from the ZenML documentation to provide relevant
answers. \
"""

@step
def create_prompt() -> str:
Expand Down
Loading
Loading