"""
title: Autogen Tool
author: matthewh
version: 1.0
required_open_webui_version: 0.3.9
Instructions:
1. Install and run AutoGen Studio:
```bash
pip install autogenstudio flaml[automl] matplotlib
autogenstudio ui --port=8081 --docs --host=0.0.0.0 --workers=2
```
2. Export the workflow and serve it as an endpoint:
```bash
autogenstudio serve --workflow=workflow.json --port=8082 --host=0.0.0.0 --docs --workers=2
```
3. Use http://localhost:8082/docs to interact with the endpoint.
4. This tool sends a query to the AutoGen API and displays the response correctly in the chat.
"""
import os
import requests
import urllib.parse
from typing import Optional, Dict # Ensure Dict is imported
from pydantic import BaseModel, Field
class Tools:
"""
Tools class to house the AutoGen tool functions, compatible with Open WebUI.
"""
class Valves(BaseModel):
"""
Configurable parameters (valves) for the AutoGen tool.
"""
AUTOGEN_BASE_URL: str = Field(
default=os.getenv(
"AUTOGEN_BASE_URL", "http://host.docker.internal:8082/predict"
),
description="Base URL for the AutoGen endpoint.",
)
request_timeout: int = Field(
default=300, description="Timeout for the HTTP request (in seconds)."
)
debug: bool = Field(
default=False, description="Enable or disable debug logging."
)
def __init__(self):
self.valves = self.Valves()
def autogen_tool(self, query: str) -> Dict[str, str]:
"""
AutoGen tool that performs a query using the AutoGen API.
:param query: User’s input query.
:return: A dictionary to return a formatted response for the LLM.
"""
if self.valves.debug:
print(f"[DEBUG] Starting tool with query: {query}")
encoded_prompt = urllib.parse.quote(query, safe="")
url = f"{self.valves.AUTOGEN_BASE_URL}/{encoded_prompt}"
headers = {"Accept": "application/json"}
timeout = self.valves.request_timeout
if self.valves.debug:
print(f"[DEBUG] Sending request to {url}")
try:
response = requests.get(url, headers=headers, timeout=timeout)
response.raise_for_status()
data = response.json()
if self.valves.debug:
print(f"[DEBUG] Response data: {data}")
# Format the response as an LLM-friendly prompt
if isinstance(data, dict) and "response" in data:
message = data["response"]
formatted_message = (
f"Provide the following response to the user:\n"
f"```autogen\n{message}\n```"
)
return {"content": formatted_message}
return {"content": "Unexpected response format."}
except Exception as e:
error_message = f"Error during request: {str(e)}"
if self.valves.debug:
print(f"[DEBUG] {error_message}")
return {"content": error_message}
# # Example usage for local testing
# if __name__ == "__main__":
# tools = Tools() # Initialize without custom valves
# query = "example query"
# result = tools.autogen_tool(query)
# print(result["content"])