Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 20 additions & 12 deletions website/docs/user-guide/basic-concepts/human-in-the-loop.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -72,16 +72,20 @@ The workflow follows this pattern:
from autogen import ConversableAgent, LLMConfig
import os
import random
from dotenv import load_dotenv

load_dotenv()

# Note: Make sure to set your API key in your environment first

# Configure the LLM
llm_config = LLMConfig(config_list={
"api_type": "openai",
"model": "gpt-5-nano",
"api_key": os.environ.get("OPENAI_API_KEY"),
"temperature": 0.2,
})
llm_config = LLMConfig(
config_list={
"api_type": "openai",
"model": "gpt-5-nano",
"api_key": os.environ.get("OPENAI_API_KEY"),
}
)

# Define the system message for our finance bot
finance_system_message = """
Expand Down Expand Up @@ -170,16 +174,20 @@ Here's the complete, ready-to-run code for our financial compliance Human in the
from autogen import ConversableAgent, LLMConfig
import os
import random
from dotenv import load_dotenv

load_dotenv()

# Note: Make sure to set your API key in your environment first

# Configure the LLM
llm_config = LLMConfig(config_list={
"api_type": "openai",
"model": "gpt-5-nano",
"api_key": os.environ.get("OPENAI_API_KEY"),
"temperature": 0.2,
})
llm_config = LLMConfig(
config_list={
"api_type": "openai",
"model": "gpt-5-nano",
"api_key": os.environ.get("OPENAI_API_KEY"),
}
)

# Define the system message for our finance bot
finance_system_message = """
Expand Down
64 changes: 42 additions & 22 deletions website/docs/user-guide/basic-concepts/introducing-group-chat.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,18 @@ from autogen import ConversableAgent, LLMConfig
from autogen.agentchat import initiate_group_chat
from autogen.agentchat.group.patterns import AutoPattern

# build a Config List
llm_config = LLMConfig(
config_list={
"api_type": "openai",
"model": "gpt-5-nano",
"api_key": os.environ.get("OPENAI_API_KEY"),
}
)

# Create your specialized agents
agent_1 = ConversableAgent(name="agent_1", llm_config=llm_config, system_message="...")
agent_2 = ConversableAgent(name="agent_2", llm_config=llm_config, system_message="...")
agent_1 = ConversableAgent(name="agent_1", llm_config=llm_config, system_message="goto agent_2")
agent_2 = ConversableAgent(name="agent_2", llm_config=llm_config, system_message="go back to agent_1")

# Create human agent if needed
human = ConversableAgent(name="human", human_input_mode="ALWAYS")
Expand Down Expand Up @@ -109,16 +118,20 @@ from autogen.agentchat.group.patterns import AutoPattern

import os
import random
from dotenv import load_dotenv

load_dotenv()

# Note: Make sure to set your API key in your environment first

# Configure the LLM
llm_config = LLMConfig(config_list={
"api_type": "openai",
"model": "gpt-5-nano",
"api_key": os.environ.get("OPENAI_API_KEY"),
"temperature": 0.2,
})
# Build a LLM Config List
llm_config = LLMConfig(
config_list={
"api_type": "openai",
"model": "gpt-5-nano",
"api_key": os.environ.get("OPENAI_API_KEY"),
}
)

# Define the system message for our finance bot
finance_system_message = """
Expand Down Expand Up @@ -261,16 +274,19 @@ Here's the complete, ready-to-run code for our enhanced financial compliance sys

import os
import random
from dotenv import load_dotenv

# Note: Make sure to set your API key in your environment first
load_dotenv()

# Note: Make sure to set your API key in your environment first, i.e., .env
# Configure the LLM
llm_config = LLMConfig(config_list={
"api_type": "openai",
"model": "gpt-5-nano",
"api_key": os.environ.get("OPENAI_API_KEY"),
"temperature": 0.2
})
llm_config = LLMConfig(
config_list={
"api_type": "openai",
"model": "gpt-5-nano",
"api_key": os.environ.get("OPENAI_API_KEY"),
}
)

# Define the system message for our finance bot
finance_system_message = """
Expand Down Expand Up @@ -539,6 +555,9 @@ Here's the updated code that incorporates automatic termination:
from typing import Any
import os
import random
from dotenv import load_dotenv

load_dotenv()

from autogen import ConversableAgent, LLMConfig
from autogen.agentchat import initiate_group_chat
Expand All @@ -547,12 +566,13 @@ Here's the updated code that incorporates automatic termination:
# Note: Make sure to set your API key in your environment first

# Configure the LLM
llm_config = LLMConfig(config_list={
"api_type": "openai",
"model": "gpt-5-nano",
"api_key": os.environ.get("OPENAI_API_KEY"),
"temperature": 0.2,
})
llm_config = LLMConfig(
config_list={
"api_type": "openai",
"model": "gpt-5-nano",
"api_key": os.environ.get("OPENAI_API_KEY"),
}
)

# Define the system message for our finance bot
finance_system_message = """
Expand Down
32 changes: 19 additions & 13 deletions website/docs/user-guide/basic-concepts/introducing-tools.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -84,20 +84,23 @@ First, let's create a function that checks for duplicate payments:
import os
from typing import Annotated, Any
from datetime import datetime, timedelta
from dotenv import load_dotenv

from autogen import ConversableAgent, LLMConfig
from autogen.agentchat import initiate_group_chat
from autogen.agentchat.group.patterns import AutoPattern

load_dotenv()
# Note: Make sure to set your API key in your environment first

# Configure the LLM
llm_config = LLMConfig(config_list={
"api_type": "openai",
"model": "gpt-5-nano",
"api_key": os.environ.get("OPENAI_API_KEY"),
"temperature": 0.2,
})
llm_config = LLMConfig(
config_list={
"api_type": "openai",
"model": "gpt-5-nano",
"api_key": os.environ.get("OPENAI_API_KEY"),
}
)

# Mock database of previous transactions
def get_previous_transactions() -> list[dict[str, Any]]:
Expand Down Expand Up @@ -254,7 +257,7 @@ transactions = [

# Format the initial message
initial_prompt = (
"Please process the following transactions one at a time, checking for duplicates:\n\n" +
"Please process the following transactions one at a time, checking for duplicates:\n note: strictly goto summary_bot once all tasks are done and generate final summary\n\n" +
"\n".join([f"{i+1}. {tx}" for i, tx in enumerate(transactions)])
)
```
Expand Down Expand Up @@ -324,20 +327,23 @@ Here's the complete, ready-to-run code for our enhanced financial compliance sys
import random
from typing import Annotated, Any
from datetime import datetime, timedelta
from dotenv import load_dotenv

from autogen import ConversableAgent, LLMConfig
from autogen.agentchat import initiate_group_chat
from autogen.agentchat.group.patterns import AutoPattern

load_dotenv()
# Note: Make sure to set your API key in your environment first

# Configure the LLM
llm_config = LLMConfig(config_list={
"api_type": "openai",
"model": "gpt-5-nano",
"api_key": os.environ.get("OPENAI_API_KEY"),
"temperature": 0.2,
})
llm_config = LLMConfig(
config_list={
"api_type": "openai",
"model": "gpt-5-nano",
"api_key": os.environ.get("OPENAI_API_KEY"),
}
)

# Mock database of previous transactions
def get_previous_transactions() -> list[dict[str, Any]]:
Expand Down
26 changes: 13 additions & 13 deletions website/docs/user-guide/basic-concepts/structured-outputs.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -51,11 +51,12 @@ class ResponseModel(BaseModel):
field3: list[str]

# 2. Create LLM configuration with the structured output model
llm_config = LLMConfig(config_list={
llm_config = LLMConfig(
config_list={
"api_type": "openai",
"model": "gpt-5-nano",
"response_format": ResponseModel, # Specify the response format
},
response_format=ResponseModel, # Specify the response format
)

# 3. Create agent with structured output configuration
Expand Down Expand Up @@ -107,13 +108,12 @@ Next, let's update our summary agent to use this structured output model:
```python hl_lines="7 12-16"
# Configure the LLM for summary bot with structured output
summary_llm_config = LLMConfig(
{
config_list = {
"api_type": "openai",
"model": "gpt-5-nano",
"api_key": os.environ.get("OPENAI_API_KEY"),
"temperature": 0.2,
"response_format": AuditLogSummary, # Using our Pydantic model for structured output
},
response_format=AuditLogSummary, # Using our Pydantic model for structured output
)

# Define the system message for the summary agent
Expand Down Expand Up @@ -274,12 +274,13 @@ Here's the complete, ready-to-run code for our financial compliance system with
}

# Configure the LLM for finance bot (standard configuration)
finance_llm_config = LLMConfig(config_list={
"api_type": "openai",
"model": "gpt-5-nano",
"api_key": os.environ.get("OPENAI_API_KEY"),
"temperature": 0.2
})
finance_llm_config = LLMConfig(
config_list={
"api_type": "openai",
"model": "gpt-5-nano",
"api_key": os.environ.get("OPENAI_API_KEY"),
}
)

# Define the system message for our finance bot
finance_system_message = """
Expand Down Expand Up @@ -332,7 +333,6 @@ Here's the complete, ready-to-run code for our financial compliance system with
"api_type": "openai",
"model": "gpt-5-nano",
"api_key": os.environ.get("OPENAI_API_KEY"),
"temperature": 0.2,
},
response_format=AuditLogSummary, # Using the Pydantic model for structured output
)
Expand Down Expand Up @@ -371,7 +371,7 @@ Here's the complete, ready-to-run code for our financial compliance system with

# Format the initial message
initial_prompt = (
"Please process the following transactions one at a time, checking for duplicates:\n\n" +
"Please process the following transactions one at a time, checking for duplicates:\n note: strictly goto summary_bot once all tasks are done and generate final summary\n\n" +
"\n".join([f"{i+1}. {tx}" for i, tx in enumerate(transactions)])
)

Expand Down
Loading