Skip to content
Draft
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 2 additions & 30 deletions src/codegen/agents/code_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,22 +37,6 @@ def __init__(
metadata: Optional[dict] = {},
**kwargs,
):
"""Initialize a CodeAgent.

Args:
codebase: The codebase to operate on
model_provider: The model provider to use ("anthropic" or "openai")
model_name: Name of the model to use
memory: Whether to let LLM keep track of the conversation history
tools: Additional tools to use
tags: Tags to add to the agent trace. Must be of the same type.
metadata: Metadata to use for the agent. Must be a dictionary.
**kwargs: Additional LLM configuration options. Supported options:
- temperature: Temperature parameter (0-1)
- top_p: Top-p sampling parameter (0-1)
- top_k: Top-k sampling parameter (>= 1)
- max_tokens: Maximum number of tokens to generate
"""
self.codebase = codebase
self.agent = create_codebase_agent(
self.codebase,
Expand All @@ -65,14 +49,11 @@ def __init__(
self.model_name = model_name
self.langsmith_client = Client()

# Get project name from environment variable or use a default
self.project_name = os.environ.get("LANGCHAIN_PROJECT", "RELACE")
print(f"Using LangSmith project: {self.project_name}")

# Initialize tags for agent trace
self.tags = [*tags, self.model_name]

# Initialize metadata for agent trace
self.metadata = {
"project": self.project_name,
"model": self.model_name,
Expand Down Expand Up @@ -100,15 +81,11 @@ def run(self, prompt: str, thread_id: Optional[str] = None) -> str:
"recursion_limit": 100,
}

# this message has a reducer which appends the current message to the existing history
# see more https://langchain-ai.github.io/langgraph/concepts/low_level/#reducers
input = {"query": prompt}

config = RunnableConfig(configurable={"thread_id": thread_id}, tags=self.tags, metadata=self.metadata, recursion_limit=100)
# we stream the steps instead of invoke because it allows us to access intermediate nodes
stream = self.agent.stream(input, config=config, stream_mode="values")

# Keep track of run IDs from the stream
run_ids = []

for s in stream:
Expand All @@ -120,21 +97,17 @@ def run(self, prompt: str, thread_id: Optional[str] = None) -> str:
if isinstance(message, tuple):
print(message)
else:
if isinstance(message, AIMessage) and isinstance(message.content, list) and "text" in message.content[0]:
if isinstance(message, AIMessage) and isinstance(message.content, list) and len(message.content) > 0 and "text" in message.content[0]:
AIMessage(message.content[0]["text"]).pretty_print()
else:
message.pretty_print()

# Try to extract run ID if available in metadata
if hasattr(message, "additional_kwargs") and "run_id" in message.additional_kwargs:
run_ids.append(message.additional_kwargs["run_id"])

# Get the last message content
result = s["final_answer"]

# Try to find run IDs in the LangSmith client's recent runs
try:
# Find and print the LangSmith run URL
find_and_print_langsmith_run_url(self.langsmith_client, self.project_name)
except Exception as e:
separator = "=" * 60
Expand All @@ -153,7 +126,6 @@ def get_agent_trace_url(self) -> str | None:
The URL for the run in LangSmith if found, None otherwise
"""
try:
# TODO - this is definitely not correct, we should be able to get the URL directly...
return find_and_print_langsmith_run_url(client=self.langsmith_client, project_name=self.project_name)
except Exception as e:
separator = "=" * 60
Expand All @@ -173,7 +145,7 @@ def get_state(self) -> dict:
def get_tags_metadata(self) -> tuple[list[str], dict]:
tags = [self.model_name]
metadata = {"project": self.project_name, "model": self.model_name}
# Add SWEBench run ID and instance ID to the metadata and tags for filtering

if self.run_id is not None:
metadata["swebench_run_id"] = self.run_id
tags.append(self.run_id)
Expand Down
Loading