Whitepaper
Docs
Sign In
Tool
Tool
Resume Analysis
Tool ID
resume_analysis
Creator
@negar
Downloads
66+
This tool can analyze and review resumes and display a summary to the user.
Get
README
No README available
Tool Code
Show
import mimetypes import re import io from PyPDF2 import PdfReader from typing import Awaitable, Callable, List, Dict, Any from pydantic import BaseModel, Field from openai import AsyncOpenAI from base64 import b64decode """ title: FileProcessor author: Anonymous author_url: https://example.com/file-processor funding_url: https://patreon.com/Anonymous version: 0.0.6 changelog: - 0.0.1 - Initial upload to Open WebUI community. - 0.0.2 - Added file upload functionality. - 0.0.3 - Support for any model via WebUI and direct file input. - 0.0.4 - Added support for processing files uploaded in chat. - 0.0.5 - Fixed debug logging to use event_emitter instead of print. - 0.0.6 - Fixed file name validation to support Persian characters and spaces. """ def read_file(file_content: bytes, file_name: str) -> str: """ Read the content of a file (text or PDF) from bytes. :param file_content: Binary content of the file. :param file_name: Name of the file to determine type. :return: Extracted text content or error message. """ mime_type, _ = mimetypes.guess_type(file_name) if mime_type is None: mime_type = "text/plain" try: # Text files if mime_type.startswith("text"): return file_content.decode("utf-8") # PDF files elif mime_type == "application/pdf": reader = PdfReader(io.BytesIO(file_content)) text = "" for page in reader.pages: extracted = page.extract_text() if extracted: text += extracted + "\n" return text.strip() if text else "Error: No text extracted from PDF." else: return f"Error: Unsupported file type: {mime_type}" except Exception as e: return f"Error: Failed to read file: {str(e)}" async def query_llm(text: str, base_url: str, api_key: str) -> str: """ Query the LLM configured in Open WebUI to summarize text. :param text: Text to summarize. :param base_url: Base URL for the WebUI model endpoint. :param api_key: API key for WebUI (if required). :return: Summary or error message. """ client = AsyncOpenAI(base_url=base_url, api_key=api_key or "dummy-key") prompt = f"لطفاً این متن را خلاصه کن و نکات کلیدی آن را بیان کن:\n\n{text[:4000]}" # Limit text length try: response = await client.chat.completions.create( model="default", # WebUI will use the active model (e.g., Ollama model) messages=[ { "role": "system", "content": "You are a helpful assistant that summarizes text accurately.", }, {"role": "user", "content": prompt}, ], max_tokens=500, temperature=0.7, ) return response.choices[0].message.content.strip() except Exception as e: return f"Error: Failed to query LLM: {str(e)}" class Tools: def __init__(self): pass class UserValves(BaseModel): WEBUI_BASE_URL: str = Field( default="http://192.168.13.221:3000/v1", description="Base URL for Open WebUI model endpoint (e.g., Ollama integration).", ) WEBUI_API_KEY: str = Field( default="", description="API key for Open WebUI (if required)." ) MAX_FILE_SIZE_MB: int = Field( default=10, description="Maximum file size to process in MB." ) async def process_file( self, file_content: bytes, file_name: str, __event_emitter__: Callable[[dict], Awaitable[None]], __user__: dict = {}, ) -> str: """ Process a file (text or PDF) from binary content and return a summary using the active WebUI model. :param file_content: Binary content of the file. :param file_name: Name of the file to determine type. :param __event_emitter__: Event emitter for status updates. :param __user__: User configuration (valves). :return: Summary of the file content or an error message. """ import logging logger = logging.getLogger(__name__) logger.info( f"process_file called with file_name={file_name}, file_content type={type(file_content)}" ) await __event_emitter__( { "data": { "description": f"Starting processing for file: {file_name}...", "status": "in_progress", "done": False, }, "type": "status", } ) if not file_name: raise ValueError("No file name provided.") if not re.match(r"^.+\.(txt|pdf)$", file_name): await __event_emitter__( { "data": { "description": f"Error: Invalid file name or type: {file_name}. Only .txt and .pdf are supported.", "status": "complete", "done": True, }, "type": "status", } ) return "Error: Invalid file name or type" # Check file size max_file_size_mb = __user__.get("valves", {}).MAX_FILE_SIZE_MB or 10 file_size_mb = len(file_content) / (1024 * 1024) if file_size_mb > max_file_size_mb: await __event_emitter__( { "data": { "description": f"Error: File size ({file_size_mb:.2f} MB) exceeds maximum limit ({max_file_size_mb} MB).", "status": "complete", "done": True, }, "type": "status", } ) return f"Error: File size exceeds maximum limit" # Read file content text = read_file(file_content, file_name) if text.startswith("Error:"): await __event_emitter__( { "data": {"description": text, "status": "complete", "done": True}, "type": "status", } ) return text await __event_emitter__( { "data": { "description": f"File read successfully. Sending to LLM for summarization...", "status": "in_progress", "done": False, }, "type": "status", } ) # Query LLM base_url = ( __user__.get("valves", {}).WEBUI_BASE_URL or "http://192.168.13.221:3000/v1" ) api_key = __user__.get("valves", {}).WEBUI_API_KEY or "" summary = await query_llm(text, base_url, api_key) if summary.startswith("Error:"): await __event_emitter__( { "data": { "description": summary, "status": "complete", "done": True, }, "type": "status", } ) return summary await __event_emitter__( { "data": { "description": f"File processed successfully. Summary generated.", "status": "complete", "done": True, }, "type": "status", } ) return summary async def __call__( self, messages: List[Dict[str, Any]], __event_emitter__: Callable[[dict], Awaitable[None]], __user__: dict = {}, __files__: List[Dict[str, Any]] = [], # دریافت لیست فایلهای آپلود شده ) -> str: # بررسی وجود فایلهای آپلود شده if not __files__: await __event_emitter__( { "type": "status", "data": { "description": "❌ هیچ فایلی آپلود نشده است", "done": True, }, } ) return "خطا: فایلی برای پردازش یافت نشد" try: # دریافت اولین فایل از لیست file = __files__[0] file_content = file.get("content", b"") print(type(file_content)) file_name = file.get("name", "unknown_file") # دریافت نام فایل # ارسال وضعیت پردازش await __event_emitter__( { "type": "status", "data": { "description": f"🔧 در حال پردازش فایل {file_name}...", "done": False, }, } ) if isinstance(file_content, str): try: file_content = b64decode( file_content ) # خروجی این تابع همیشه bytes هست except Exception as e: await __event_emitter__( { "type": "status", "data": { "description": f"❌ خطا در دیکد فایل: {str(e)}", "done": True, }, } ) return f"خطا: دیکد فایل ناموفق - {str(e)}" if not isinstance(file_content, bytes): return "Error: File content is not in binary format." # فراخوانی تابع پردازش اصلی با پارامترهای کامل return await self.process_file( file_content=file_content, file_name=file_name, __event_emitter__=__event_emitter__, __user__=__user__, ) except IndexError: await __event_emitter__( { "type": "status", "data": {"description": "❌ فایل معتبری یافت نشد", "done": True}, } ) return "خطا: ساختار فایل نامعتبر" except Exception as e: await __event_emitter__( { "type": "status", "data": { "description": f"❌ خطای ناشناخته: {str(e)}", "done": True, }, } ) return f"خطای سیستمی: {str(e)}"