"""
title: GPT-image-1 Azure Manifold
description: A manifold function to integrate Azure's GPT-image models into Open WebUI.
author: Pierre-Louis BESCOND (
[email protected])
version: 0.0.1
license: MIT
requirements: pydantic
environment_variables: AZURE_OPENAI_API_KEY, AZURE_ENDPOINT
"""
import os
from typing import Iterator, List, Union
from open_webui.utils.misc import get_last_user_message
from pydantic import BaseModel, Field
import json
import requests
from openai import AzureOpenAI
# Code adapted/inspired from:
# https://openwebui.com/f/raskoll/dalle_3
# https://openwebui.com/f/spammenot/gpt_image_1
#
# The script only manages pictures' generation, no editing.
# Feel free to send me any possible improvement you notice or develop
class Pipe:
class Valves(BaseModel):
AZURE_ENDPOINT: str = Field(
default="https://yourAzureEndpoint.openai.azure.com/",
description="Azure OpenAI API Endpoint",
)
AZURE_OPENAI_API_KEY: str = Field(
default="", description="Your Azure OpenAI API key"
)
OPENAI_API_VERSION: str = Field(
default="2025-04-01-preview", description="Your Azure OpenAI version"
)
DEPLOYMENT_NAME: str = Field(
default="gpt-image-1", description="GPT model deployment name"
)
IMAGE_SIZE: str = Field(default="1024x1024", description="Generated image size")
NUM_IMAGES: int = Field(default=1, description="Number of images to generate")
QUALITY: str = Field(default="medium", description="Selected quality level")
def __init__(self):
self.type = "manifold"
self.id = "azure_gpt_image_1"
self.name = "Azure - GPT-image-1"
self.valves = self.Valves(
AZURE_ENDPOINT=os.getenv(
"AZURE_ENDPOINT", "https://yourAzureEndpoint.openai.azure.com/"
),
AZURE_OPENAI_API_KEY=os.getenv("AZURE_OPENAI_API_KEY", ""),
)
def get_openai_assistants(self) -> List[dict]:
if self.valves.AZURE_OPENAI_API_KEY:
self.client = AzureOpenAI(
api_version=self.valves.OPENAI_API_VERSION,
azure_endpoint=self.valves.AZURE_ENDPOINT,
api_key=self.valves.AZURE_OPENAI_API_KEY,
)
return [{"id": "gpt-image-1", "name": ""}]
return []
def pipes(self) -> List[dict]:
return self.get_openai_assistants()
def pipe(self, body: dict) -> Union[str, Iterator[str]]:
if not self.valves.AZURE_OPENAI_API_KEY:
return "Error: AZURE_OPENAI_API_KEY is not set"
# Extract the user message (prompt) from the body
user_message = get_last_user_message(body["messages"])
# Build the path and body of the request
base_path = f"openai/deployments/{self.valves.DEPLOYMENT_NAME}/images"
params = f"?api-version={self.valves.OPENAI_API_VERSION}"
generation_url = f"{self.valves.AZURE_ENDPOINT}{base_path}/generations{params}"
generation_body = {
"prompt": user_message,
"n": self.valves.NUM_IMAGES,
"size": self.valves.IMAGE_SIZE,
"quality": self.valves.QUALITY,
"output_format": "png",
}
try:
# Send the request to Azure GPT-image-1 API
response = requests.post(
generation_url,
headers={
"Api-Key": self.valves.AZURE_OPENAI_API_KEY,
"Content-Type": "application/json",
},
json=generation_body,
).json()
# Extract images generated and display them into the UI
for idx, item in enumerate(response["data"]):
b64_img = item["b64_json"]
yield f""
# yield message
except json.JSONDecodeError as e:
return f"Error parsing JSON response: {str(e)}. Raw response: {response}"
except Exception as e:
return f"An error occurred: {str(e)}. Raw response: {response}"