import langwatch
import autogen
from openinference.instrumentation.autogen import AutoGenInstrumentor
import os
# Initialize LangWatch with the AutoGen instrumentor
langwatch.setup(
instrumentors=[AutoGenInstrumentor()]
)
# Set up environment variables
os.environ["OPENAI_API_KEY"] = "your-openai-api-key"
# Configure your agents
config_list = [
{
"model": "gpt-5",
"api_key": os.environ["OPENAI_API_KEY"],
}
]
# Create your agents
assistant = autogen.AssistantAgent(
name="assistant",
llm_config={"config_list": config_list},
system_message="You are a helpful AI assistant."
)
user_proxy = autogen.UserProxyAgent(
name="user_proxy",
human_input_mode="NEVER",
max_consecutive_auto_reply=10,
is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"),
code_execution_config={"work_dir": "workspace"},
llm_config={"config_list": config_list},
)
# Use the agents as usual—traces will be sent to LangWatch automatically
def run_agent_conversation(user_message: str):
user_proxy.initiate_chat(
assistant,
message=user_message
)
return "Conversation completed"
# Example usage
if __name__ == "__main__":
user_prompt = "Write a Python function to calculate fibonacci numbers"
result = run_agent_conversation(user_prompt)
print(f"Result: {result}")