diff --git a/requirements-dev.txt b/requirements-dev.txt index e04399e9..9174e4a4 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,4 +1,5 @@ pytest==8.0.0 +pytest-asyncio==0.25.0 pytest-mock==3.14.0 burr[start]==0.22.1 sphinx==6.0 diff --git a/scrapegraphai/graphs/abstract_graph.py b/scrapegraphai/graphs/abstract_graph.py index 1148cc29..476b4b5b 100644 --- a/scrapegraphai/graphs/abstract_graph.py +++ b/scrapegraphai/graphs/abstract_graph.py @@ -5,6 +5,7 @@ from abc import ABC, abstractmethod from typing import Optional import uuid +import asyncio import warnings from pydantic import BaseModel from langchain.chat_models import init_chat_model @@ -293,3 +294,14 @@ def run(self) -> str: """ Abstract method to execute the graph and return the result. """ + + async def run_safe_async(self) -> str: + """ + Executes the run process asynchronously safety. + + Returns: + str: The answer to the prompt. + """ + + loop = asyncio.get_event_loop() + return await loop.run_in_executor(None, self.run) \ No newline at end of file diff --git a/tests/graphs/abstract_graph_test.py b/tests/graphs/abstract_graph_test.py index 642868fb..27f5b660 100644 --- a/tests/graphs/abstract_graph_test.py +++ b/tests/graphs/abstract_graph_test.py @@ -96,4 +96,12 @@ def test_create_llm_unknown_provider(self): def test_create_llm_with_rate_limit(self, llm_config, expected_model): graph = TestGraph("Test prompt", {"llm": llm_config}) - assert isinstance(graph.llm_model, expected_model) \ No newline at end of file + assert isinstance(graph.llm_model, expected_model) + + @pytest.mark.asyncio + async def test_run_safe_async(self): + graph = TestGraph("Test prompt", {"llm": {"model": "openai/gpt-3.5-turbo", "openai_api_key": "sk-randomtest001"}}) + with patch.object(graph, 'run', return_value="Async result") as mock_run: + result = await graph.run_safe_async() + assert result == "Async result" + mock_run.assert_called_once() \ No newline at end of file