Skip to content

Commit aafd2cf

Browse files
author
wushixiong
committed
optimize prompt
1 parent 8907786 commit aafd2cf

File tree

2 files changed

+30
-87
lines changed

2 files changed

+30
-87
lines changed

omnibox_wizard/resources/prompt_templates/ask.j2

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@ Your name is OmniBox(中文名:小黑), built by import.ai, responsible fo
1010

1111
You will receive a user's question and are expected to answer it concisely, accurately, and clearly.
1212

13+
IMPORTANT: When you need to call tools, do NOT mention tool names like "get_resources", "private_search", etc.
14+
1315
{% include "user_input_description.j2" %}
1416

1517
{% endif %}

omnibox_wizard/wizard/grimoire/agent/ask_langgraph.py

Lines changed: 28 additions & 87 deletions
Original file line numberDiff line numberDiff line change
@@ -64,79 +64,6 @@
6464
json_dumps = partial(jsonlib.dumps, ensure_ascii=False, separators=(",", ":"))
6565
tracer = trace.get_tracer(__name__)
6666

67-
68-
def format_visible_resources(agent_request: AgentRequest) -> str | None:
69-
"""Format visible_resources from private_search for LLM context.
70-
71-
Returns formatted string or None if no visible_resources.
72-
"""
73-
# Find private_search tool
74-
private_search = None
75-
for tool in agent_request.tools or []:
76-
if tool.name == "private_search":
77-
private_search = tool
78-
break
79-
80-
if not private_search or not private_search.visible_resources:
81-
return None
82-
83-
# Generate short ID mapping (same logic as BaseResourceTool)
84-
resources_with_ids = []
85-
resource_counter = 0
86-
folder_counter = 0
87-
88-
for resource in private_search.visible_resources:
89-
if resource.type == PrivateSearchResourceType.FOLDER:
90-
folder_counter += 1
91-
short_id = f"f{folder_counter}"
92-
else:
93-
resource_counter += 1
94-
short_id = f"r{resource_counter}"
95-
resources_with_ids.append({
96-
"short_id": short_id,
97-
"name": resource.name,
98-
"type": resource.type.value,
99-
})
100-
101-
if not resources_with_ids:
102-
return None
103-
104-
# Separate folders and documents
105-
folders = [r for r in resources_with_ids if r["type"] == "folder"]
106-
documents = [r for r in resources_with_ids if r["type"] == "resource"]
107-
108-
# Format for LLM
109-
lines = [
110-
"<available_resources>",
111-
"User's available folders and documents (use short_id when calling tools):",
112-
"",
113-
]
114-
115-
if folders:
116-
lines.append("Folders:")
117-
for f in folders:
118-
lines.append(f" - {f['short_id']}: {f['name']}")
119-
120-
if documents:
121-
lines.append("")
122-
lines.append("Documents:")
123-
for d in documents:
124-
lines.append(f" - {d['short_id']}: {d['name']}")
125-
126-
lines.extend([
127-
"",
128-
"Tool Usage Guide:",
129-
"- To see folder contents: get_children(folder_short_id) e.g., get_children(namespace_id, resource_id)",
130-
"- To read document content: get_resources([doc_short_ids]) e.g., get_resources(['r1', 'r2'])",
131-
"- For time-based queries ('recent', 'this week'): use filter_by_time",
132-
"- For tag-based queries: use filter_by_tag",
133-
"- private_search is for keyword search across all documents",
134-
"</available_resources>",
135-
])
136-
137-
return "\n".join(lines)
138-
139-
14067
# ============== State ==============
14168
class AgentState(TypedDict):
14269
"""Minimal state - just the conversation messages."""
@@ -384,8 +311,7 @@ def __init__(self, config: Config):
384311
self.system_prompt_template = self.template_parser.get_template("ask.j2")
385312

386313
# Custom tool call mode
387-
# self.custom_tool_call: bool | None = config.grimoire.custom_tool_call
388-
self.custom_tool_call = False
314+
self.custom_tool_call: bool | None = config.grimoire.custom_tool_call
389315
# Build graph
390316
self.graph = build_graph()
391317

@@ -460,18 +386,33 @@ async def _prepare_messages(
460386

461387
# Add system message if needed
462388
if not messages:
463-
prompt = self.template_parser.render_template(
464-
self.system_prompt_template,
465-
lang=agent_request.lang or "简体中文",
466-
tools="\n".join(json_dumps(t) for t in all_tools),
467-
part_1_enabled=True,
468-
part_2_enabled=True,
469-
)
470-
system_msg = {"role": "system", "content": prompt}
471-
await emit_complete_message(
472-
{"configurable": {"queue": queue}}, system_msg
473-
)
474-
messages.append(MessageDto.model_validate({"message": system_msg}))
389+
if self.custom_tool_call:
390+
prompt: str = self.template_parser.render_template(
391+
self.system_prompt_template,
392+
lang=agent_request.lang or "简体中文",
393+
tools="\n".join(json_dumps(tool) for tool in all_tools)
394+
if self.custom_tool_call
395+
else None,
396+
part_1_enabled=True,
397+
part_2_enabled=True,
398+
)
399+
system_msg = {"role": "system", "content": prompt}
400+
await emit_complete_message(
401+
{"configurable": {"queue": queue}}, system_msg
402+
)
403+
messages.append(MessageDto.model_validate({"message": system_msg}))
404+
else:
405+
for i in range(2):
406+
prompt: str = self.template_parser.render_template(
407+
self.system_prompt_template,
408+
lang=agent_request.lang or "简体中文",
409+
**{f"part_{i + 1}_enabled": True},
410+
)
411+
system_msg: dict = {"role": "system", "content": prompt}
412+
await emit_complete_message(
413+
{"configurable": {"queue": queue}}, system_msg
414+
)
415+
messages.append(MessageDto.model_validate({"message": system_msg}))
475416

476417
# Add user message if needed
477418
if messages[-1].message["role"] != "user":

0 commit comments

Comments
 (0)