We're Hiring!
Whitepaper
Docs
Sign In
Function
Function
pipe
v1.1.0
ImageRouter
Last Updated
a month ago
Created
5 months ago
Function ID
imagerouter
Creator
@dawe
Downloads
385+
Get
Sponsored by Open WebUI Inc.
We are hiring!
Shape the way humanity engages with
intelligence
.
Description
Image generation with 80+ models - ImageRouter.io official plugin
README
Function Code
Show
""" title: Image Router Image Generator Pipe author: Professor Patterns description: Generate images using any Image Router model and display them in chat version: 1.1.0 license: MIT """ import json import traceback import httpx import base64 import mimetypes from pydantic import BaseModel, Field from typing import Optional, Dict, Any, List, Callable, Awaitable, AsyncGenerator from enum import Enum IMAGE_ROUTER_API_BASE_URL = "https://api.imagerouter.io/v1/openai" class ImageQuality(str, Enum): auto = "auto" high = "high" medium = "medium" low = "low" class Pipe: class Valves(BaseModel): IMAGE_ROUTER_API_KEY: str = Field( default="", title="Image Router API Key", description="Your Image Router API key for authentication" ) QUALITY: ImageQuality = Field( default=ImageQuality.auto, title="Quality", description="Quality of the image (auto, high, medium, low)", ) def __init__(self): self.valves = self.Valves() self.id = "imagerouter" self.name = "Image Router" self.emitter = None def pipes(self) -> List[dict]: """Return a pipe entry for every model returned by the Image Router API. The API response is a JSON object where the keys are model identifiers, e.g. "google/gemini-2.0-flash-exp:free". We convert every key into a pipe description. The *id* of each pipe is slug-ified so that it is safe to use in HTML and WebUI contexts, while the *name* keeps the original model identifier for better readability. If the request fails we fall back to a single default pipe so the extension keeps working offline. """ # Simple in-memory cache to avoid querying the API on every call if hasattr(self, "_pipes_cache") and self._pipes_cache: return self._pipes_cache api_url = "https://api.imagerouter.io/v1/models" try: response = httpx.get(api_url, timeout=10.0) response.raise_for_status() data = response.json() def _slugify(model_id: str) -> str: # Replace characters that might break ids ("/", " ", "::", etc.) return ( "imagerouter-" + model_id.lower().replace("/", "-").replace(" ", "-").replace(":", "-") ) # Build list of pipes (only for IMAGE models) and a reverse lookup table for id -> model pipes_list = [] self._id_to_model = {} for model_key, model_info in data.items(): # The API includes both image and video models. We only expose # those that are able to generate *images* so that they show up # in the WebUI image picker. A model is considered an "image" # model if its "output" field (a list) contains the string # "image". outputs = model_info.get("output") or model_info.get("outputs") # Skip anything that does not explicitly list support for # images (e.g. video-only models) if outputs and "image" not in outputs: continue pipe_id = _slugify(model_key) pipes_list.append({"id": pipe_id, "name": f" - {model_key}"}) self._id_to_model[pipe_id] = model_key # Cache the result for subsequent calls self._pipes_cache = pipes_list return pipes_list except Exception as e: # Gracefully fall back to a default entry if the remote request fails print(f"[Image Router] Failed to fetch models list: {e}") return [ { "id": "test/test", "name": " - test/test", } ] def _get_last_user_message_and_images( self, messages: List[Dict[str, Any]] ) -> (Optional[str], List[str]): """Extract the last user message and images from the conversation""" text_prompt = None images = [] for message in reversed(messages): if message.get("role") == "user": content = message.get("content") if isinstance(content, str): text_prompt = content[:30000] # Once we find the last user message, we can stop return text_prompt, images elif isinstance(content, list): # Handle content that is a list of parts (e.g., text and images) for part in content: if part.get("type") == "text": text_prompt = part.get("text", "")[:30000] elif part.get("type") == "image_url": image_url = part.get("image_url", {}).get("url") if image_url: images.append(image_url) # Once we process the last user message, we can stop return text_prompt, images return None, [] async def _emit_status( self, description: str, done: bool = False ) -> Awaitable[None]: """Send status updates""" if self.emitter: return await self.emitter( { "type": "status", "data": { "description": description, "done": done, }, } ) return None async def pipe( self, body: dict, __event_emitter__: Optional[Callable[[Dict[str, Any]], Awaitable[None]]] = None, ) -> AsyncGenerator[str, None]: """Generate images using Image Router API and display them in chat""" self.emitter = __event_emitter__ try: # Validate API key if not self.valves.IMAGE_ROUTER_API_KEY: yield json.dumps( {"error": "Image Router API key is not configured. Get an API key: https://imagerouter.io/api-keys. Enter your API key in the Admin Panel -> Functions -> ImageRouter.io settings."}, ensure_ascii=False ) return # Extract the prompt from the last user message last_user_message, images = self._get_last_user_message_and_images( body.get("messages", []) ) if not last_user_message and not images: yield json.dumps( {"error": "No user message with text or images found"}, ensure_ascii=False, ) return # Skip prompts that begin with the sentinel "### Task:" (often system-level instructions) if last_user_message and last_user_message.lstrip().startswith("### Task:"): # Silently ignore such prompts and exit early return # Send status update await self._emit_status( "Starting Image Router image generation..." ) # Log what we're about to do print(f"Preparing to generate image with prompt: '{last_user_message}'") # Prepare request parameters headers = { "Authorization": f"Bearer {self.valves.IMAGE_ROUTER_API_KEY}", "Content-Type": "application/json", } # Resolve the target model. # 1. The caller SHOULD pass a canonical model string like "google/gemini-2.0-flash-exp:free". # 2. For backwards-compatibility we also accept slug-ified pipe ids such as # "imagerouter-google-gemini-2.0-flash-exp-free" or those prefixed with # "ir1." (e.g. "ir1.imagerouter-google-gemini-2.0-flash-exp-free"). raw_model: Optional[str] = body.get("model") if not raw_model or not isinstance(raw_model, str): raise ValueError( "No 'model' field found in the request body. " "Please supply a valid model identifier (e.g. 'google/gemini-2.0-flash-exp:free')." ) # If the supplied string contains a slug id, callers may prepend an arbitrary # prefix (e.g. "ir1.") before the actual slug that starts with "imagerouter-". # Remove anything before the first occurrence of "imagerouter-" so that we # are left with the pure slug id that exists in the cache. if "imagerouter-" in raw_model and not raw_model.startswith("imagerouter-"): raw_model = raw_model[raw_model.find("imagerouter-") :] if not hasattr(self, "_id_to_model") or not getattr(self, "_id_to_model", {}): # Attempt to populate the mapping; this can raise if the model list endpoint fails self.pipes() model_to_use = self._id_to_model.get(raw_model) if not model_to_use: raise ValueError( f"Unknown model or pipe id '{body.get('model')}'. " "Please provide a canonical model string or a recognised pipe id." ) data = { "prompt": last_user_message if last_user_message else " ", "model": model_to_use, "quality": self.valves.QUALITY.value, "response_format": "url", } # Log the request that's being sent (removing the API key) debug_headers = headers.copy() if "Authorization" in debug_headers: debug_headers["Authorization"] = "Bearer [REDACTED]" print( f"Sending request to Image Router API: {IMAGE_ROUTER_API_BASE_URL}/images/generations" ) print(f"Headers: {debug_headers}") print(f"Data: {data}") if images: print(f"Images: {len(images)} image(s) will be uploaded.") # Send request to Image Router API async with httpx.AsyncClient() as client: try: if images: files = [] for i, image_url in enumerate(images): try: header, encoded = image_url.split(",", 1) content_type = header.split(":")[1].split(";")[0] image_bytes = base64.b64decode(encoded) extension = mimetypes.guess_extension( content_type ) or ".png" filename = f"image_{i}{extension}" files.append( ( "image[]", (filename, image_bytes, content_type), ) ) except Exception as e: yield json.dumps( {"error": f"Failed to process image: {e}"}, ensure_ascii=False, ) return if "Content-Type" in headers: del headers["Content-Type"] response = await client.post( f"{IMAGE_ROUTER_API_BASE_URL}/images/generations", data=data, files=files, headers=headers, timeout=200.0, ) else: headers["Content-Type"] = "application/json" response = await client.post( f"{IMAGE_ROUTER_API_BASE_URL}/images/generations", json=data, headers=headers, timeout=200.0, ) print(f"Response status: {response.status_code}") if response.status_code != 200: error_text = response.text try: error_json = response.json() if "error" in error_json: error_info = error_json["error"] # Try to extract a more specific error message if isinstance(error_info, dict) and "message" in error_info: error_text = error_info["message"] else: error_text = json.dumps(error_info) except (json.JSONDecodeError, KeyError): # If parsing fails or keys are missing, use the raw text pass error_message = f"Error generating image: {error_text}" print(f"Error response: {error_message}") yield error_message await self._emit_status(error_message, done=True) return except httpx.TimeoutException: error_message = "Request to Image Router API timed out. Please try again." yield error_message await self._emit_status(error_message, done=True) return except httpx.RequestError as e: error_message = f"Request error: {str(e)}" yield error_message await self._emit_status(error_message, done=True) return response_data = response.json() # Check for API errors in the response body if "error" in response_data: error_message = response_data["error"] if isinstance(error_message, dict): error_message = error_message.get("message", json.dumps(error_message)) final_error_message = f"API Error: {error_message}" await self._emit_status(final_error_message, done=True) return # Ensure the 'data' field is present if "data" not in response_data or not response_data["data"]: error_message = f"Invalid API response: {json.dumps(response_data)}" yield error_message await self._emit_status(error_message, done=True) return # For URL responses (shouldn't happen with our configuration) image_url = response_data["data"][0]["url"] yield f"🎨 Image generated successfully'\n\n" yield f"\n\n" # yield f"Note: This URL will expire in 60 minutes.\n\n" # Final status update await self._emit_status("Image generation complete", done=True) except Exception as e: error_message = f"An error occurred: {str(e)}" error_details = f"Exception type: {type(e).__name__}" stack_trace = traceback.format_exc() print(f"Error in Image Router Image pipe: {error_message} - {error_details}") print(f"Stack trace: {stack_trace}") yield error_message await self._emit_status(error_message, done=True)