Whitepaper
Docs
Sign In
Function
Function
pipe
v0.01
GLM BIG MODELS
Function ID
GLM_Models
Creator
@aa995
Downloads
117+
Function to acess the BigModels
Get
README
No README available
Function Code
Show
""" title: GLM-4 Pipe version: 0.01 Author : Aa995 Description :https://bigmodel.cn/ offers free credits on thier models which are comparable to GPT4 """ import json import requests from typing import List, Union, Iterator from pydantic import BaseModel, Field from open_webui.utils.misc import ( pop_system_message, ) # Adjust based on your OpenWebUI structure # Set DEBUG to True to enable detailed logging DEBUG = True class Pipe: class Valves(BaseModel): GLM4_API_KEY: str = Field(default="", description="Your GLM-4 API key.") GLM4_ENDPOINT: str = Field( default="https://open.bigmodel.cn/api/paas/v4/chat/completions", description="GLM-4 synchronous API endpoint.", ) def __init__(self): self.type = "glm4" self.id = "glmfunction" self.name = "GLM-4 Models" # Temporarily hardcode the API key for testing self.valves = self.Valves( GLM4_API_KEY="YOUR API KEY GOES HERE" ) if DEBUG: if self.valves.GLM4_API_KEY: print("GLM4_API_KEY is set.") else: print("GLM4_API_KEY is NOT set.") self.models = [ "glm-4-plus", "glm-4-0520", "glm-4", "glm-4-air", "glm-4-airx", "glm-4-long", "glm-4-flash", ] def get_glm4_models(self) -> List[dict]: """ Returns a list of available GLM-4 models without the Pipe ID prefix. OpenWebUI will handle prefixing based on the Pipe's ID. """ return [ {"id": model, "name": model.replace("glm-4", "GLM-4 ")} for model in self.models ] def pipes(self) -> List[dict]: """ OpenWebUI expects a pipes() method that returns available models. """ return self.get_glm4_models() def pipe(self, body: dict) -> Union[str, Iterator[str]]: """ Handles incoming requests and routes them to the appropriate handler (synchronous or streaming) based on the 'stream' flag. """ model_id = body.get("model") if not model_id: return "Error: No model specified." if DEBUG: print(f"Original model_id: {model_id}") # Remove the function ID prefix if present function_id_with_dot = f"{self.id}." if model_id.startswith(function_id_with_dot): model_id = model_id[len(function_id_with_dot) :] if DEBUG: print( f"Removed prefix '{function_id_with_dot}' from model_id, new model_id: {model_id}" ) else: if DEBUG: print("No prefix to remove from model_id.") if DEBUG: print(f"Using model_id: {model_id}") if model_id not in self.models: return f"Error: Unknown GLM-4 model '{model_id}'. Available models are: {', '.join(self.models)}." # Process messages system_message, messages = pop_system_message(body.get("messages", [])) processed_messages = [] # Add system message if it exists if system_message: processed_messages.append( {"role": "system", "content": str(system_message)} ) # Process user and assistant messages for message in messages: content = message.get("content", "") if isinstance(content, list): # Process multimedia content content_parts = [] for item in content: if item["type"] == "text": content_parts.append(item["text"]) elif item["type"] == "image_url": # Currently, GLM-4 does not process images; skip or handle accordingly if DEBUG: print( "Image URLs are not processed by GLM-4. Skipping image." ) continue combined_content = " ".join(content_parts) if combined_content: processed_messages.append( {"role": message["role"], "content": combined_content} ) else: # Simple text content processed_messages.append({"role": message["role"], "content": content}) stream = body.get("stream", False) try: if stream: return self.call_glm4_stream(body, processed_messages, model_id) else: return self.call_glm4_sync(body, processed_messages, model_id) except Exception as e: if DEBUG: print(f"Error in GLM-4 pipe method: {e}") return f"Error: {e}" def call_glm4_sync(self, body: dict, messages: List[dict], model_id: str) -> str: """ Handles synchronous requests to GLM-4 models. """ api_key = self.valves.GLM4_API_KEY if not api_key: return "Error: GLM-4 API key is not set. Please configure it in OpenWebUI's settings." if DEBUG: print("GLM4_API_KEY retrieved successfully.") url = self.valves.GLM4_ENDPOINT headers = { "Content-Type": "application/json", "Authorization": f"Bearer {api_key}", } if DEBUG: print(f"Authorization header set to: Bearer {'*' * len(api_key)}") print(f"Sending request to URL: {url}") payload = { "model": model_id, "messages": messages, "temperature": body.get("temperature", 0.95), "top_p": body.get("top_p", 0.7), "max_tokens": body.get("max_tokens", 1024), "do_sample": body.get("do_sample", True), "stream": False, } # Include optional parameters from the body optional_fields = ["request_id", "stop", "tools", "tool_choice", "user_id"] for field in optional_fields: if field in body: payload[field] = body[field] if DEBUG: print("GLM-4 Sync Request Payload:") print(json.dumps(payload, indent=2)) response = requests.post(url, headers=headers, json=payload, timeout=60) if response.status_code == 200: response_data = response.json() if DEBUG: print("GLM-4 Sync Response Data:") print(json.dumps(response_data, indent=2)) choices = response_data.get("choices", []) if choices: generated_text = choices[0].get("message", {}).get("content", "") return generated_text else: if DEBUG: print("No choices found in the GLM-4 response.") return "No response generated by the GLM-4 model." else: if DEBUG: print(f"GLM-4 Sync Error: {response.status_code}") print("Response Headers:", response.headers) print("Response Content:", response.text) return f"Error: {response.status_code} {response.text}" def call_glm4_stream( self, body: dict, messages: List[dict], model_id: str ) -> Iterator[str]: """ Handles streaming (SSE) requests to GLM-4 models. """ api_key = self.valves.GLM4_API_KEY if not api_key: yield "Error: GLM-4 API key is not set. Please configure it in OpenWebUI's settings." return url = self.valves.GLM4_ENDPOINT headers = { "Content-Type": "application/json", "Authorization": f"Bearer {api_key}", } payload = { "model": model_id, "messages": messages, "temperature": body.get("temperature", 0.95), "top_p": body.get("top_p", 0.7), "max_tokens": body.get("max_tokens", 1024), "do_sample": body.get("do_sample", True), "stream": True, } # Include optional parameters from the body optional_fields = ["request_id", "stop", "tools", "tool_choice", "user_id"] for field in optional_fields: if field in body: payload[field] = body[field] if DEBUG: print("GLM-4 Stream Request Payload:") print(json.dumps(payload, indent=2)) try: with requests.post( url, headers=headers, json=payload, timeout=60, stream=True ) as response: if response.status_code != 200: if DEBUG: print(f"GLM-4 Stream Error: {response.status_code}") print("Response Headers:", response.headers) print("Response Content:", response.text) yield f"Error: {response.status_code} {response.text}" return for line in response.iter_lines(): if line: decoded_line = line.decode("utf-8") if decoded_line.startswith("data:"): content = decoded_line[5:].strip() if content == "[DONE]": break try: data = json.loads(content) choices = data.get("choices", []) if choices: delta = choices[0].get("delta", {}) if "content" in delta: yield delta["content"] except json.JSONDecodeError: if DEBUG: print(f"Failed to parse JSON: {content}") except KeyError as e: if DEBUG: print(f"Unexpected data structure: {e}") except requests.exceptions.RequestException as e: if DEBUG: print(f"GLM-4 Stream Request Exception: {e}") yield f"Error: {e}"