We're Hiring!
Whitepaper
Docs
Sign In
@mleedix
·
a year ago
·
a year ago
tool
CW_PromptGen
Get
Last Updated
a year ago
Created
a year ago
Tool
Name
CW_PromptGen
Downloads
493+
Saves
0+
Description
CW Prompt Gen for Open WebUI helps users create optimized prompts tailored to different Large Language Models, leveraging multi-agent support and CI modes for improved adaptability and response quality.
Tool Code
Show
import requests from typing import Optional from datetime import datetime # Ensure the tool resides within the Tools class structure required by Open WebUI class Tools: class CW_PromptGen: def __init__(self, compliance_level: int = 1): self.compliance_level = compliance_level self.compliance_attributes = {} self.llm_requirements = {} # Define Valves and UserValves for configuration in Open WebUI class Valves: model_name: str = "default-model" compliance_level: int = 1 # Fetch LLM requirements dynamically based on model type def fetch_llm_requirements(self, model_name: str): """Fetches specific LLM prompt requirements from an external API.""" try: response = requests.get(f"https://api.example.com/models/{model_name}/requirements") if response.status_code == 200: self.llm_requirements = response.json() return "LLM requirements fetched successfully." else: return f"Failed to fetch LLM requirements: {response.status_code}" except requests.ConnectionError: return "Internet connection unavailable." def adaptive_prompt(self, user_input: str) -> str: """Generates a prompt customized to the compliance level and LLM requirements.""" base_prompt = "Generate response based on compliance level and user context." llm_specifics = f" (LLM adjustments: {self.llm_requirements.get('prompt_structure', 'Standard')})" if self.compliance_level == 1: return f"{base_prompt} Basic response for input: {user_input}{llm_specifics}" elif self.compliance_level == 2: return f"{base_prompt} Intermediate response for input: {user_input}{llm_specifics}" elif self.compliance_level == 3: return f"{base_prompt} Advanced response with high detail for input: {user_input}{llm_specifics}" else: return f"{base_prompt} Default response for input: {user_input}{llm_specifics}" # Event Emitters for real-time status updates async def emit_status(self, message: str, is_done: bool = False): """Emit real-time status updates via Open WebUI's event emitters.""" await self.__event_emitter__( {"type": "status", "data": {"description": message, "done": is_done}} ) # Main function to handle input, check LLM requirements, and generate the prompt async def handle_user_input(self, user_input: str, model_name: str) -> dict: """Processes input, fetches requirements, and generates a custom prompt.""" await self.emit_status("Fetching LLM requirements...") llm_fetch_status = self.fetch_llm_requirements(model_name) await self.emit_status("Generating adaptive prompt...") prompt = self.adaptive_prompt(user_input) return { "fetch_status": llm_fetch_status, "generated_prompt": prompt, "compliance_level": self.compliance_level, "verbosity": self.compliance_attributes.get("verbosity", "standard") }
Sponsored by Open WebUI Inc.
We are hiring!
Shape the way humanity engages with
intelligence
.