Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add progress bar + runner fixes #10348

Merged
merged 13 commits into from
Sep 8, 2023
Merged
Show file tree
Hide file tree
Changes from 11 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 20 additions & 8 deletions libs/langchain/langchain/callbacks/tracers/evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
import langsmith
from langsmith import schemas as langsmith_schemas

from langchain.callbacks.manager import tracing_v2_enabled
from langchain import callbacks
hinthornw marked this conversation as resolved.
Show resolved Hide resolved
from langchain.callbacks.tracers.base import BaseTracer
from langchain.callbacks.tracers.langchain import _get_client
from langchain.callbacks.tracers.schemas import Run
Expand Down Expand Up @@ -81,9 +81,12 @@ def __init__(
)
self.client = client or _get_client()
self.evaluators = evaluators
self.executor = ThreadPoolExecutor(
max_workers=max(max_workers or len(evaluators), 1)
)
max_workers = max_workers if max_workers is not None else len(evaluators)
if not max_workers:
self.executor = None
hinthornw marked this conversation as resolved.
Show resolved Hide resolved

else:
self.executor = ThreadPoolExecutor(max_workers=max_workers)
self.futures: Set[Future] = set()
self.skip_unfinished = skip_unfinished
self.project_name = project_name
Expand All @@ -105,7 +108,7 @@ def _evaluate_in_project(self, run: Run, evaluator: langsmith.RunEvaluator) -> N
try:
if self.project_name is None:
feedback = self.client.evaluate_run(run, evaluator)
with tracing_v2_enabled(
with callbacks.tracing_v2_enabled(
project_name=self.project_name, tags=["eval"], client=self.client
):
feedback = self.client.evaluate_run(run, evaluator)
Expand Down Expand Up @@ -134,9 +137,18 @@ def _persist_run(self, run: Run) -> None:
run_ = run.copy()
run_.reference_example_id = self.example_id
for evaluator in self.evaluators:
self.futures.add(
self.executor.submit(self._evaluate_in_project, run_, evaluator)
)
if self.executor:
self.futures.add(
self.executor.submit(self._evaluate_in_project, run_, evaluator)
)
else:
self._evaluate_in_project(run_, evaluator)

def close(self) -> None:
"""Wait for all futures to complete."""
self.wait_for_futures()
if self.executor:
self.executor.shutdown()

def wait_for_futures(self) -> None:
"""Wait for all futures to complete."""
Expand Down
Loading
Loading