Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions requirements-dev.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
pytest==8.0.0
pytest-asyncio==0.25.0
pytest-mock==3.14.0
burr[start]==0.22.1
sphinx==6.0
Expand Down
12 changes: 12 additions & 0 deletions scrapegraphai/graphs/abstract_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from abc import ABC, abstractmethod
from typing import Optional
import uuid
import asyncio
import warnings
from pydantic import BaseModel
from langchain.chat_models import init_chat_model
Expand Down Expand Up @@ -293,3 +294,14 @@ def run(self) -> str:
"""
Abstract method to execute the graph and return the result.
"""

async def run_safe_async(self) -> str:
"""
Executes the run process asynchronously safety.

Returns:
str: The answer to the prompt.
"""

loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, self.run)
10 changes: 9 additions & 1 deletion tests/graphs/abstract_graph_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,4 +96,12 @@ def test_create_llm_unknown_provider(self):

def test_create_llm_with_rate_limit(self, llm_config, expected_model):
graph = TestGraph("Test prompt", {"llm": llm_config})
assert isinstance(graph.llm_model, expected_model)
assert isinstance(graph.llm_model, expected_model)

@pytest.mark.asyncio
async def test_run_safe_async(self):
graph = TestGraph("Test prompt", {"llm": {"model": "openai/gpt-3.5-turbo", "openai_api_key": "sk-randomtest001"}})
with patch.object(graph, 'run', return_value="Async result") as mock_run:
result = await graph.run_safe_async()
assert result == "Async result"
mock_run.assert_called_once()