Skip to content

Commit 171b6df

Browse files
authored
Merge pull request #850 from celobusana/fix/run-safe-async
2 parents 1dc2e5b + 489adf7 commit 171b6df

File tree

3 files changed

+22
-1
lines changed

3 files changed

+22
-1
lines changed

requirements-dev.txt

+1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
pytest==8.0.0
2+
pytest-asyncio==0.25.0
23
pytest-mock==3.14.0
34
burr[start]==0.22.1
45
sphinx==6.0

scrapegraphai/graphs/abstract_graph.py

+12
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
from abc import ABC, abstractmethod
66
from typing import Optional
77
import uuid
8+
import asyncio
89
import warnings
910
from pydantic import BaseModel
1011
from langchain.chat_models import init_chat_model
@@ -293,3 +294,14 @@ def run(self) -> str:
293294
"""
294295
Abstract method to execute the graph and return the result.
295296
"""
297+
298+
async def run_safe_async(self) -> str:
299+
"""
300+
Executes the run process asynchronously safety.
301+
302+
Returns:
303+
str: The answer to the prompt.
304+
"""
305+
306+
loop = asyncio.get_event_loop()
307+
return await loop.run_in_executor(None, self.run)

tests/graphs/abstract_graph_test.py

+9-1
Original file line numberDiff line numberDiff line change
@@ -96,4 +96,12 @@ def test_create_llm_unknown_provider(self):
9696

9797
def test_create_llm_with_rate_limit(self, llm_config, expected_model):
9898
graph = TestGraph("Test prompt", {"llm": llm_config})
99-
assert isinstance(graph.llm_model, expected_model)
99+
assert isinstance(graph.llm_model, expected_model)
100+
101+
@pytest.mark.asyncio
102+
async def test_run_safe_async(self):
103+
graph = TestGraph("Test prompt", {"llm": {"model": "openai/gpt-3.5-turbo", "openai_api_key": "sk-randomtest001"}})
104+
with patch.object(graph, 'run', return_value="Async result") as mock_run:
105+
result = await graph.run_safe_async()
106+
assert result == "Async result"
107+
mock_run.assert_called_once()

0 commit comments

Comments
 (0)