Skip to content

Commit 902699c

Browse files
authored
Merge pull request #781 from aziz-ullah-khan/pre/beta
2 parents 7e3598d + c218546 commit 902699c

File tree

3 files changed

+38
-6
lines changed

3 files changed

+38
-6
lines changed

scrapegraphai/graphs/search_graph.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,8 @@ def _create_graph(self) -> BaseGraph:
6666
"llm_model": self.llm_model,
6767
"max_results": self.max_results,
6868
"loader_kwargs": self.loader_kwargs,
69-
"search_engine": self.copy_config.get("search_engine")
69+
"search_engine": self.copy_config.get("search_engine"),
70+
"serper_api_key": self.copy_config.get("serper_api_key")
7071
}
7172
)
7273

scrapegraphai/nodes/search_internet_node.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,13 @@ def __init__(
4747
if node_config.get("search_engine")
4848
else "google"
4949
)
50+
51+
self.serper_api_key = (
52+
node_config["serper_api_key"]
53+
if node_config.get("serper_api_key")
54+
else None
55+
)
56+
5057
self.max_results = node_config.get("max_results", 3)
5158

5259
def execute(self, state: dict) -> dict:
@@ -95,7 +102,7 @@ def execute(self, state: dict) -> dict:
95102
self.logger.info(f"Search Query: {search_query}")
96103

97104
answer = search_on_web(query=search_query, max_results=self.max_results,
98-
search_engine=self.search_engine, proxy=self.proxy)
105+
search_engine=self.search_engine, proxy=self.proxy, serper_api_key=self.serper_api_key)
99106

100107
if len(answer) == 0:
101108
raise ValueError("Zero results found for the search query.")

scrapegraphai/utils/research_web.py

Lines changed: 28 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,18 +7,20 @@
77
from googlesearch import search as google_search
88
import requests
99
from bs4 import BeautifulSoup
10+
import json
1011

1112
def search_on_web(query: str, search_engine: str = "Google",
1213
max_results: int = 10, port: int = 8080,
13-
timeout: int = 10, proxy: str | dict = None) -> List[str]:
14+
timeout: int = 10, proxy: str | dict = None,
15+
serper_api_key: str = None) -> List[str]:
1416
"""Search web function with improved error handling and validation"""
1517

1618
# Input validation
1719
if not query or not isinstance(query, str):
1820
raise ValueError("Query must be a non-empty string")
1921

2022
search_engine = search_engine.lower()
21-
valid_engines = {"google", "duckduckgo", "bing", "searxng"}
23+
valid_engines = {"google", "duckduckgo", "bing", "searxng", "serper"}
2224
if search_engine not in valid_engines:
2325
raise ValueError(f"Search engine must be one of: {', '.join(valid_engines)}")
2426

@@ -42,7 +44,10 @@ def search_on_web(query: str, search_engine: str = "Google",
4244

4345
elif search_engine == "searxng":
4446
results = _search_searxng(query, max_results, port, timeout)
45-
47+
48+
elif search_engine.lower() == "serper":
49+
results = _search_serper(query, max_results, serper_api_key, timeout)
50+
4651
return filter_pdf_links(results)
4752

4853
except requests.Timeout:
@@ -76,6 +81,25 @@ def _search_searxng(query: str, max_results: int, port: int, timeout: int) -> Li
7681
response.raise_for_status()
7782
return [result['url'] for result in response.json().get("results", [])[:max_results]]
7883

84+
def _search_serper(query: str, max_results: int, serper_api_key: str, timeout: int) -> List[str]:
85+
"""Helper function for serper api"""
86+
if not serper_api_key:
87+
raise ValueError("API key is required for serper api.")
88+
89+
url = "https://google.serper.dev/search"
90+
payload = json.dumps({
91+
"q": query,
92+
"num": max_results
93+
})
94+
headers = {
95+
'X-API-KEY': serper_api_key,
96+
'Content-Type': 'application/json'
97+
}
98+
response = requests.post(url, headers=headers, data=payload, timeout=timeout)
99+
response.raise_for_status()
100+
return [result.get("link") for result in response.json().get("organic", [])]
101+
102+
79103
def format_proxy(proxy):
80104
if isinstance(proxy, dict):
81105
server = proxy.get('server')
@@ -102,4 +126,4 @@ def filter_pdf_links(links: List[str]) -> List[str]:
102126
Returns:
103127
List[str]: A list of URLs excluding any that end with '.pdf'.
104128
"""
105-
return [link for link in links if not link.lower().endswith('.pdf')]
129+
return [link for link in links if not link.lower().endswith('.pdf')]

0 commit comments

Comments
 (0)