-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdeepresearch.py
More file actions
94 lines (86 loc) · 3.33 KB
/
deepresearch.py
File metadata and controls
94 lines (86 loc) · 3.33 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
from typing import Callable
import os
import logging
import requests
import time
def start_deep_research_pipeline(
deeprs_framework: str,
deeprs_port: int,
middleware_port: int,
model_name: str,
run_server: Callable,
):
cwd = os.environ.get("DEEP_RESEARCHER_CWD", "")
if deeprs_framework == "open_deep_research":
if not cwd:
cwd = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "open_deep_research")
)
deeprs_pid = run_server(
(
"uvx --refresh --from langgraph-cli[inmem] --with-editable . "
f"--python 3.11 langgraph dev --port {deeprs_port} --allow-blocking"
),
cwd=cwd,
env_vars={
"SUMMARIZATION_MODEL": model_name,
"RESEARCH_MODEL": model_name,
"COMPRESSION_MODEL": model_name,
"FINAL_REPORT_MODEL": model_name,
"SUMMARIZATION_MODEL_BASE_URL": f"http://localhost:{middleware_port}/v1",
"SUMMARIZATION_MODEL_PROVIDER": "openai",
"RESEARCH_MODEL_BASE_URL": f"http://localhost:{middleware_port}/v1",
"RESEARCH_MODEL_PROVIDER": "openai",
"COMPRESSION_MODEL_BASE_URL": f"http://localhost:{middleware_port}/v1",
"COMPRESSION_MODEL_PROVIDER": "openai",
"FINAL_REPORT_MODEL_BASE_URL": f"http://localhost:{middleware_port}/v1",
"FINAL_REPORT_MODEL_PROVIDER": "openai",
},
)
server_ok = False
while server_ok is False:
try:
# Send a GET request to the health check endpoint
response = requests.get(f"http://localhost:{deeprs_port}/docs")
# Check if the server is healthy
if response.status_code == 200:
server_ok = True
else:
time.sleep(1)
except requests.exceptions.RequestException as e:
time.sleep(1)
else:
raise ValueError(f"Unsupported Deep Researcher framework: {deeprs_framework}")
return deeprs_pid
async def perform_deep_research(
deeprs_framework: str,
deeprs_client,
research_question: str,
):
if deeprs_framework == "open_deep_research":
async for chunk in deeprs_client.runs.stream(
None, # Threadless run
"Deep Researcher", # Name of assistant. Defined in langgraph.json.
input={
"messages": [
{
"role": "human",
"content": research_question,
}
],
},
stream_mode="updates",
):
json_data = chunk.data
if "final_report_generation" in json_data:
final_report = json_data["final_report_generation"]["final_report"]
logging.info("=" * 20)
logging.info(
f"Starting deep research for question: {research_question}"
)
logging.info("-" * 20)
logging.info(f"Final Report: {final_report}")
logging.info("=" * 20)
return final_report
else:
raise ValueError(f"Unsupported Deep Researcher framework: {deeprs_framework}")