Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 8 additions & 3 deletions launch/launch/agent/base_image.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,13 @@
"""
Base Docker image selection agent for repository environment setup.
"""
from langchain.schema import HumanMessage
try: # LangChain >= 0.3.26
from langchain_core.messages import HumanMessage # type: ignore[import-not-found]
except ImportError: # pragma: no cover - fallback for older LangChain
from langchain.schema import HumanMessage # type: ignore

from launch.agent.state import AgentState, auto_catch
from launch.agent.utils import message_content_to_str
from launch.utilities.language_handlers import get_language_handler


Expand Down Expand Up @@ -49,8 +53,9 @@ def select_base_image(state: AgentState) -> dict:
while not base_image or trials < 5:
trials += 1
response = llm.invoke(messages)
if "")[0]
response_text = message_content_to_str(response.content)
if "")[0]
if image in candidate_images:
base_image = image
break
Expand Down
14 changes: 10 additions & 4 deletions launch/launch/agent/locate.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,14 @@
"""
import os

from langchain.schema import HumanMessage
try: # LangChain >= 0.3.26
from langchain_core.messages import HumanMessage # type: ignore[import-not-found]
except ImportError: # pragma: no cover
from langchain.schema import HumanMessage # type: ignore

from launch.agent.state import AgentState, auto_catch
from launch.utilities.get_repo_structure import view_repo_structure
from launch.agent.utils import message_content_to_str

prompt = """Given this repository structure:
------ BEGIN REPOSITORY STRUCTURE ------
Expand Down Expand Up @@ -67,9 +71,10 @@ def locate_related_file(state: AgentState) -> dict:
)

response = llm.invoke([locate_prompt])
response_text = message_content_to_str(response.content)
potential_files = [
line.split("<file>")[1].split("</file>")[0].strip()
for line in response.content.split("\n")
for line in response_text.split("\n")
if line.strip() and "<file>" in line
]
potential_files = [
Expand Down Expand Up @@ -103,11 +108,12 @@ def locate_related_file(state: AgentState) -> dict:
determine_input = HumanMessage(content=determine_prompt.format(file=file_info))
try:
response = llm.invoke([determine_input])
response_text = message_content_to_str(response.content)
except Exception:
logger.error(f"Error determining file: {file}")
continue
logger.info(f"File: {file} - {response.content}")
if "<rel>Yes</rel>" in response.content:
logger.info(f"File: {file} - {response_text}")
if "<rel>Yes</rel>" in response_text:
docs += f"File: {file}\n```\n"
docs += content + "\n"
docs += "```\n"
Expand Down
12 changes: 9 additions & 3 deletions launch/launch/agent/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,21 @@
import time
from typing import Any, Literal

from langchain_core.messages import HumanMessage, SystemMessage
try: # LangChain >= 0.3.26
from langchain_core.messages import ( # type: ignore[import-not-found]
HumanMessage,
SystemMessage,
)
except ImportError: # pragma: no cover
from langchain.schema import HumanMessage, SystemMessage # type: ignore
from pydantic import BaseModel, Field

from launch.agent.action_parser import ActionParser
from launch.agent.prompt import ReAct_prompt
from launch.agent.state import AgentState, auto_catch
from launch.runtime import start_session
from launch.utilities.language_handlers import get_language_handler
from launch.agent.utils import message_content_to_str

system_msg = """You are a developer. Your task is to install dependencies and set up a environment that is able to run the tests of the project.

Expand Down Expand Up @@ -224,11 +231,10 @@ def setup(max_steps: int, state: AgentState) -> dict:

response = llm.invoke(input_messages)


# print(response.pretty_repr())
logger.info("\n" + response.pretty_repr())
messages.append(response)
action = parse_setup_action(response.content)
action = parse_setup_action(message_content_to_str(response.content))
if action and action.action == "command":
commands.append(action.args)
observation = observation_for_setup_action(state, action)
Expand Down
44 changes: 44 additions & 0 deletions launch/launch/agent/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
"""
Utility helpers shared across agent modules.
"""
from __future__ import annotations

from typing import Any


def message_content_to_str(content: Any) -> str:
"""
Normalize LangChain message content into a plain string.

Newer LangChain versions return message.content as either a raw string
or a list of structured blocks (e.g., [{"type": "text", "text": "..."}]).
This helper flattens those structures so legacy string-based parsing
logic keeps working.
"""
if content is None:
return ""

if isinstance(content, str):
return content

if isinstance(content, list):
parts: list[str] = []
for block in content:
if isinstance(block, str):
parts.append(block)
elif isinstance(block, dict):
if block.get("type") == "text":
parts.append(block.get("text", ""))
elif "text" in block:
parts.append(str(block["text"]))
elif "content" in block:
parts.append(str(block["content"]))
else:
# Fallback to the full dict representation to preserve info
parts.append(str(block))
else:
parts.append(str(block))
return "".join(parts)

return str(content)

11 changes: 9 additions & 2 deletions launch/launch/agent/verify.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,20 @@
"""
from typing import Any, Literal

from langchain.schema import HumanMessage, SystemMessage
try: # LangChain >= 0.3.26
from langchain_core.messages import ( # type: ignore[import-not-found]
HumanMessage,
SystemMessage,
)
except ImportError: # pragma: no cover
from langchain.schema import HumanMessage, SystemMessage # type: ignore
from pydantic import BaseModel, Field

from launch.agent.action_parser import ActionParser
from launch.agent.prompt import ReAct_prompt
from launch.agent.state import AgentState, auto_catch
from launch.runtime import SetupRuntime
from launch.agent.utils import message_content_to_str

system_msg = """You are a developer. Your task is to verify whether the environment for the given project is set up correctly. Your colleague has set up a Docker environment for the project. You need to verify if it can successfully run the tests of the project.
- You interact with a Bash session inside this container.
Expand Down Expand Up @@ -170,7 +177,7 @@ def verify(max_steps: int, state: AgentState) -> dict:
# print(response.pretty_repr())
logger.info(response.pretty_repr())
messages.append(response)
action = parse_verify_action(response.content)
action = parse_verify_action(message_content_to_str(response.content))
if action.action == "command":
commands.append(action.args)
observation = observation_for_verify_action(action, session)
Expand Down