66from fastmcp .client .transports import StreamableHttpTransport
77
88import fractale .agent .backends as backends
9+ import fractale .agent .defaults as defaults
910import fractale .agent .logger as logger
1011from fractale .agent .base import Agent
1112
@@ -17,7 +18,7 @@ class MCPAgent(Agent):
1718
1819 def init (self ):
1920 # 1. Setup MCP Client
20- port = os .environ .get ("FRACTALE_MCP_PORT" , "8089" )
21+ port = os .environ .get ("FRACTALE_MCP_PORT" , defaults . mcp_port )
2122 token = os .environ .get ("FRACTALE_MCP_TOKEN" )
2223 url = f"http://localhost:{ port } /mcp"
2324
@@ -27,8 +28,18 @@ def init(self):
2728 else :
2829 self .client = Client (url )
2930
30- # 2. Select Backend based on Config/Env
31- provider = os .environ .get ("FRACTALE_LLM_PROVIDER" , "gemini" ).lower ()
31+ # Initialize the provider. We will do this for each step.
32+ self .init_provider ()
33+
34+ def init_provider (self ):
35+ """
36+ Initialize the provider.
37+ """
38+ # select Backend based on Config/Env first, then cached version
39+ provider = self ._provider or os .environ .get ("FRACTALE_LLM_PROVIDER" , "gemini" ).lower ()
40+ self ._provider = provider
41+
42+ # Other envars come from provider backend
3243 if provider in backends .BACKENDS :
3344 self .backend = backends .BACKENDS [provider ]()
3445 else :
@@ -52,79 +63,118 @@ async def get_tools_list(self):
5263 tools = await self .client .list_tools ()
5364 return tools
5465
55- async def execute_mission_async (self , prompt_text : str ):
66+ async def execute (self , context , step ):
5667 """
57- The Async Loop: Think -> Act -> Observe -> Think
68+ The Async Loop that will start with a prompt name, retrieve it,
69+ and then respond to it until the state is successful.
5870 """
5971 start_time = time .perf_counter ()
6072
61- # 1. Connect & Discover Tools
73+ # We keep the client connection open for the duration of the step
6274 async with self .client :
63- mcp_tools = await self .client .list_tools ()
6475
65- # 2. Initialize Backend with these tools
76+ # These are tools available to agent
77+ # TODO need to filter these to be agent specific?
78+ mcp_tools = await self .client .list_tools ()
6679 await self .backend .initialize (mcp_tools )
6780
68- # 3. Initial Prompt
69- # 'response_text' is what the LLM says to the user
70- # 'calls' is a list of tools it wants to run
71- response_text , calls = await self .backend .generate_response (prompt = prompt_text )
72-
73- max_loops = 15
74- loops = 0
75-
76- while loops < max_loops :
77- loops += 1
81+ # Get prompt to give goal/task/personality to agent
82+ args = getattr (context , "data" , context )
7883
79- # If there are tool calls, we MUST execute them and feed back results
80- if calls :
81- tool_outputs = []
84+ # This partitions inputs, adding inputs from the step and separating
85+ # those from extra
86+ args , extra = step .partition_inputs (args )
87+ instruction = await self .fetch_persona (step .prompt , args )
88+ # TODO STOPPED HERE should we add "extra" to context?
89+ print ("INSTRUCTION" )
90+ print (instruction )
91+ print ("EXTRA" )
92+ print (extra )
8293
83- for call in calls :
84- t_name = call ["name" ]
85- t_args = call ["args" ]
86- t_id = call .get ("id" ) # Needed for OpenAI
94+ # Run the loop up to some max attempts (internal state machine with MCP tools)
95+ max_loops = context .get ("max_loops" , 15 )
96+ response_text = await self .run_llm_loop (instruction , max_loops )
8797
88- logger .info (f"🛠️ Tool Call: { t_name } { t_args } " )
89-
90- # --- EXECUTE TOOL ---
91- try :
92- result = await self .client .call_tool (t_name , t_args )
93- # Handle FastMCP result object
94- output_str = (
95- result .content [0 ].text
96- if hasattr (result , "content" )
97- else str (result )
98- )
99- except Exception as e :
100- output_str = f"Error: { str (e )} "
101-
102- # Record Metadata (Your Requirement)
103- self ._record_step (t_name , t_args , output_str )
98+ self .record_usage (time .perf_counter () - start_time )
99+ return response_text
104100
105- tool_outputs .append ({"name" : t_name , "content" : output_str , "id" : t_id })
101+ async def run_llm_loop (self , instruction : str , max_loops : int ) -> str :
102+ """
103+ Process -> Tool -> Process loop.
104+ We need to return on some state of success or ultimate failure.
105+ """
106+ # Initial response to first prompt
107+ response_text , calls = await self .backend .generate_response (prompt = instruction )
108+
109+ loops = 0
110+ while loops < max_loops :
111+ loops += 1
112+
113+ # If no tools called, we are done
114+ if not calls :
115+ break
116+
117+ # Execute all requested tools
118+ tool_outputs = []
119+ for call in calls :
120+ t_name = call ["name" ]
121+ t_args = call ["args" ]
122+ t_id = call .get ("id" )
123+ logger .info (f"🛠️ Calling: { t_name } " )
124+
125+ try :
126+ # Get result and unpack (FastMCP format)
127+ result = await self .client .call_tool (t_name , t_args )
128+ if hasattr (result , "content" ) and isinstance (result .content , list ):
129+ content = result .content [0 ].text
130+ else :
131+ content = str (result )
132+ except Exception as e :
133+ content = f"Error executing { t_name } : { str (e )} "
134+
135+ # Record metadata about the step
136+ self .record_step (t_name , t_args , content )
137+
138+ # Save outputs (name, id, and content)
139+ tool_outputs .append ({"id" : t_id , "name" : t_name , "content" : content })
140+
141+ # Feed results back to backend with history.
142+ response_text , calls = await self .backend .generate_response (tool_outputs = tool_outputs )
143+ if not calls :
144+ logger .info ("🎢 Agent has not requested new calls, ending loop." )
145+
146+ # When we get here, we either have no calls, or we reached max attempts.
147+ return response_text
106148
107- # --- FEEDBACK LOOP ---
108- # We pass the outputs back to the backend.
109- # It returns the NEXT thought.
110- response_text , calls = await self .backend .generate_response (
111- tool_outputs = tool_outputs
112- )
149+ async def fetch_persona (self , prompt_name : str , arguments : dict ) -> str :
150+ """
151+ Asks the MCP Server to render the prompt template.
113152
153+ This is akin to rendering or fetching the person. E.g., "You are X and
154+ here are your instructions for a task."
155+ """
156+ logger .info (f"📥 Bootstrapping Persona: { prompt_name } " )
157+ try :
158+ prompt_result = await self .client .get_prompt (name = prompt_name , arguments = arguments )
159+ # MCP Prompts return a list of messages (User/Assistant/Text).
160+ # We squash them into a single string for the instruction.
161+ msgs = []
162+ for m in prompt_result .messages :
163+ if hasattr (m .content , "text" ):
164+ msgs .append (m .content .text )
114165 else :
115- # No tool calls? The LLM is done thinking.
116- break
117-
118- end_time = time .perf_counter ()
166+ msgs .append (str (m .content ))
119167
120- # Save Summary Metadata
121- self .save_mcp_metadata (end_time - start_time )
168+ return "\n \n " .join (msgs )
122169
123- return response_text
170+ except Exception as e :
171+ raise RuntimeError (f"Failed to load persona '{ prompt_name } ': { e } " )
124172
125- def _record_step (self , tool , args , output ):
126- if "steps" not in self .metadata :
127- self .metadata ["steps" ] = []
173+ def record_step (self , tool , args , output ):
174+ """
175+ Record step metadata.
176+ TODO: refactor this into metadata registry (decorator)
177+ """
128178 self .metadata ["steps" ].append (
129179 {
130180 "tool" : tool ,
@@ -134,33 +184,31 @@ def _record_step(self, tool, args, output):
134184 }
135185 )
136186
137- def save_mcp_metadata (self , duration ):
138- """Save token usage from backend."""
139- usage = self .backend .token_usage
140- if "llm_usage" not in self .metadata :
141- self .metadata ["llm_usage" ] = []
142-
143- self .metadata ["llm_usage" ].append (
144- {
145- "duration" : duration ,
146- "prompt_tokens" : usage .get ("prompt_tokens" , 0 ),
147- "completion_tokens" : usage .get ("completion_tokens" , 0 ),
148- }
149- )
150-
151- def run_step (self , context ):
187+ def record_usage (self , duration ):
152188 """
153- Bridge the sync Base Class to the async implementation.
189+ Record token usage.
190+ TODO: refactor this into metadata registry (decorator)
191+ """
192+ if hasattr (self .backend , "token_usage" ):
193+ usage = self .backend .token_usage
194+ self .metadata ["llm_usage" ].append (
195+ {
196+ "duration" : duration ,
197+ "prompt" : usage .get ("prompt_tokens" , 0 ),
198+ "completion" : usage .get ("completion_tokens" , 0 ),
199+ }
200+ )
201+
202+ def run_step (self , context , step ):
203+ """
204+ Run step is called from the Agent run (base class)
205+ It's here so we can asyncio.run the thing!
154206 """
155- prompt_text = self .get_prompt (context )
156-
157207 try :
158- # Run the loop
159- final_result = asyncio .run (self .execute_mission_async (prompt_text ))
160- context ["result" ] = final_result
208+ final_result = asyncio .run (self .execute (context , step ))
209+ context .result = final_result
161210 except Exception as e :
162211 context ["error_message" ] = str (e )
163212 logger .error (f"Agent failed: { e } " )
164- raise # Or handle gracefully depending on policy
165-
213+ raise e
166214 return context
0 commit comments