import subprocess
import sys
from typing import Optional, Dict, List
from pydantic import BaseModel, Field
import re
import logging
import langdetect # for language detection
from langdetect.lang_detect_exception import LangDetectException
# Function to check and install required libraries
def install_requirements():
required_libraries = ['pydantic', 'langdetect'] # Added langdetect for language detection
for library in required_libraries:
try:
# Try to import the library to see if it's installed
__import__(library)
except ImportError:
print(f"{library} not found. Installing...")
subprocess.check_call([sys.executable, "-m", "pip", "install", library])
# Call the install_requirements function to ensure all libraries are installed
install_requirements()
# Global instruction for the AI to autolearn and behave as instructed
GLOBAL_INSTRUCTION = """
You are an AI assistant by OpenAI. Your model is GPT-4, and you have the ability to learn and adapt to user inputs in real-time, using advanced NLP (Natural Language Processing) algorithms and a powerful learning engine.
You must engage in continuous learning as you interact with the user in any language. You will process all inputs, store learned knowledge, and analyze user messages iteratively.
Autolearning Features:
1. **Dynamic and Static Learning**: You can learn in either "dynamic" (continual) mode, where learning happens continuously over the conversation, or in "static" mode, where learning happens once per input. This is determined based on the settings configured during the conversation.
2. **Knowledge Storage**: If the `store_knowledge` option is enabled, you will retain learned knowledge for future interactions, allowing you to use prior information to enhance accuracy and relevance in subsequent conversations.
3. **Configurable Iterations**: The number of iterations for learning can be adjusted using the `max_iterations` setting. This allows you to control how much data you process iteratively when learning.
4. **Input Processing**: You process the user's input and apply a learning function, simulating the learning process with each interaction. This learning function can be expanded based on the complexity of the task at hand.
This structure allows you to self-learn by processing input text either iteratively or in one go, depending on the mode. Ensure that you provide accurate and context-aware answers while learning continuously to improve your understanding over time. You are also trained every day to improve.
"""
# Script Versioning and Metadata
title = "AI Autolearn in Conversation Updated with NLP Futures"
author = "Eldar Mammadov"
author_url = "https://www.azsec.biz"
version = "1.0.4"
required_open_webui_version = "0.3.30"
v1_0_4_includes_global_instructions = """
Version 1.0.4 introduces the full implementation of NLP-based continuous learning, enabling the AI to learn from real-time user interactions in any language. This version integrates the learning algorithm to dynamically adapt to user inputs, improving the AI's responses over time.
"""
# Main Filter Class
class Filter:
class Valves(BaseModel):
enable_autolearn: bool = Field(
default=True, description="Enable or disable autolearn."
)
learning_mode: str = Field(
default="dynamic",
description="Learning mode: 'dynamic' (continual) or 'static'.",
)
store_knowledge: bool = Field(
default=True, description="Store learned knowledge for future use."
)
max_iterations: int = Field(
default=10, description="Max number of iterations for learning."
)
def __init__(self):
self.valves = self.Valves()
self.knowledge_base = []
self.global_instruction = GLOBAL_INSTRUCTION
def _learn_from_message(self, message: str) -> None:
"""Learn from each user message in real-time based on learning mode."""
if not self.valves.enable_autolearn:
return
learned_info = self._process_input(message)
if self.valves.store_knowledge:
self._store_knowledge(learned_info)
def _process_input(self, input_text: str) -> str:
"""Simulate input processing and apply the learning function."""
processed_info = f"Processed: {input_text}"
print(f"Processed input: {processed_info}")
return processed_info
def _store_knowledge(self, learned_info: str) -> None:
"""Store learned information in the knowledge base for future use."""
print(f"Storing knowledge: {learned_info}")
self.knowledge_base.append(learned_info)
def _dynamic_learning(self, messages: List[str]) -> None:
"""Dynamic learning: Iteratively process user messages over time."""
for i in range(min(len(messages), self.valves.max_iterations)):
self._learn_from_message(messages[i])
def _static_learning(self, messages: List[str]) -> None:
"""Static learning: Learn once from the most recent message."""
if messages:
self._learn_from_message(messages[-1])
def _extract_user_messages(self, messages: List[Dict[str, str]]) -> List[str]:
"""Extract user messages from the conversation body."""
user_messages = [
message.get("content", "") for message in messages if "content" in message
]
return user_messages if user_messages else []
def _apply_global_instruction(self) -> str:
"""Inject the global instruction to ensure AI follows autolearn rules."""
return self.global_instruction
def _analyze_learned_knowledge(self) -> str:
"""Analyze all learned knowledge and summarize the main concepts."""
if not self.knowledge_base:
return "No knowledge has been learned yet."
# Example analysis: summarizing entities learned so far
entity_summary = {}
for learned_info in self.knowledge_base:
# Process entities or just learned content here
# For simplicity, we're just counting unique messages
entity_summary['learned_content'] = entity_summary.get('learned_content', 0) + 1
# Prepare a summary
summary = "Summary of learned knowledge:\n"
for label, count in entity_summary.items():
summary += f"{label}: {count}\n"
return summary
def _detect_language(self, text: str) -> str:
"""Detect the language of the input text."""
try:
return langdetect.detect(text)
except LangDetectException:
return "unknown"
def _learn_from_file(self, file_content: str) -> None:
"""Learn from the content of a file (pasted or uploaded)."""
language = self._detect_language(file_content)
print(f"Detected language: {language}")
if language != "unknown":
self._learn_from_message(file_content)
else:
print("Unable to detect language or file content.")
def inlet(
self, body: Dict[str, any], __user__: Optional[Dict[str, any]] = None
) -> Dict[str, any]:
"""Process incoming messages and trigger learning."""
try:
# Inject the global instruction for autolearning
print(self._apply_global_instruction())
original_messages: List[Dict[str, str]] = body.get("messages", [])
user_messages = self._extract_user_messages(original_messages)
# Check if user requested knowledge analysis
if "analyze_knowledge" in body and body["analyze_knowledge"]:
analysis = self._analyze_learned_knowledge()
print(analysis)
body["analysis"] = analysis # Add analysis to the response body
# Trigger dynamic or static learning based on settings
if self.valves.learning_mode == "dynamic":
self._dynamic_learning(user_messages)
else:
self._static_learning(user_messages)
body["messages"] = original_messages
return body
except Exception as e:
logging.error(f"Error in inlet method: {e}")
return body
def outlet(
self, body: Dict[str, any], __user__: Optional[Dict[str, any]] = None
) -> Dict[str, any]:
"""Finalize autolearning after the conversation."""
try:
original_messages: List[Dict[str, str]] = body.get("messages", [])
user_messages = self._extract_user_messages(original_messages)
# Process and finalize learning
for message in user_messages:
self._learn_from_message(message)
body["messages"] = original_messages
return body
except Exception as e:
logging.error(f"Error in outlet method: {e}")
return body
# End of script