|
|
import asyncio
|
|
|
import json
|
|
|
import logging
|
|
|
from typing import List, Dict, Any, Optional
|
|
|
from huggingface_hub import InferenceClient
|
|
|
from components.adaptive_learning import AdaptiveLearningEnvironment
|
|
|
from components.ai_driven_creativity import AIDrivenCreativity
|
|
|
from components.collaborative_ai import CollaborativeAI
|
|
|
from components.cultural_sensitivity import CulturalSensitivityEngine
|
|
|
from components.data_processing import AdvancedDataProcessor
|
|
|
from components.dynamic_learning import DynamicLearner
|
|
|
from components.ethical_governance import EthicalAIGovernance
|
|
|
from components.explainable_ai import ExplainableAI
|
|
|
from components.feedback_manager import ImprovedFeedbackManager
|
|
|
from components.multimodal_analyzer import MultimodalAnalyzer
|
|
|
from components.neuro_symbolic import NeuroSymbolicEngine
|
|
|
from components.quantum_optimizer import QuantumInspiredOptimizer
|
|
|
from components.real_time_data import RealTimeDataIntegrator
|
|
|
from components.self_improving_ai import SelfImprovingAI
|
|
|
from components.user_personalization import UserPersonalizer
|
|
|
from models.cognitive_engine import BroaderPerspectiveEngine
|
|
|
from models.elements import Element
|
|
|
from models.healing_system import SelfHealingSystem
|
|
|
from models.safety_system import SafetySystem
|
|
|
from models.user_profiles import UserProfile
|
|
|
from utils.database import Database
|
|
|
from utils.logger import logger
|
|
|
|
|
|
class AICore:
|
|
|
"""Enhanced AI Core System using open-source models"""
|
|
|
def __init__(self, config_path: str = "config/ai_assistant_config.json"):
|
|
|
|
|
|
self.config = self._load_config(config_path)
|
|
|
|
|
|
|
|
|
self.inference_client = InferenceClient()
|
|
|
|
|
|
|
|
|
self.models = {
|
|
|
'text-generation': "gpt2",
|
|
|
'sentiment': "distilbert-base-uncased-finetuned-sst-2-english",
|
|
|
'embeddings': "sentence-transformers/all-MiniLM-L6-v2"
|
|
|
}
|
|
|
|
|
|
|
|
|
self.cognition = BroaderPerspectiveEngine()
|
|
|
self.self_healing = SelfHealingSystem(self.config)
|
|
|
self.safety_system = SafetySystem()
|
|
|
self.elements = self._initialize_elements()
|
|
|
|
|
|
|
|
|
self.database = Database()
|
|
|
self.user_profiles = UserProfile(self.database)
|
|
|
self.feedback_manager = ImprovedFeedbackManager(self.database)
|
|
|
self.context_manager = AdaptiveLearningEnvironment()
|
|
|
self.data_processor = AdvancedDataProcessor()
|
|
|
self.dynamic_learner = DynamicLearner()
|
|
|
|
|
|
def _load_config(self, config_path: str) -> Dict[str, Any]:
|
|
|
"""Load configuration from JSON file"""
|
|
|
try:
|
|
|
with open(config_path, 'r') as f:
|
|
|
return json.load(f)
|
|
|
except Exception as e:
|
|
|
logger.error(f"Failed to load config: {e}")
|
|
|
return {}
|
|
|
|
|
|
def _initialize_elements(self) -> List[Element]:
|
|
|
"""Initialize system elements"""
|
|
|
return [Element(name) for name in self.config.get('elements', [])]
|
|
|
|
|
|
async def _generate_huggingface_response(self, query: str, task: str = 'text-generation') -> Any:
|
|
|
"""Generate a response using Hugging Face models"""
|
|
|
try:
|
|
|
if task == 'text-generation':
|
|
|
response = self.inference_client.text_generation(
|
|
|
query,
|
|
|
model=self.models['text-generation'],
|
|
|
max_new_tokens=100
|
|
|
)
|
|
|
return response[0]["generated_text"]
|
|
|
|
|
|
elif task == 'sentiment':
|
|
|
response = self.inference_client.text_classification(
|
|
|
query,
|
|
|
model=self.models['sentiment']
|
|
|
)
|
|
|
return {
|
|
|
"score": response[0]["score"],
|
|
|
"label": response[0]["label"]
|
|
|
}
|
|
|
|
|
|
elif task == 'embeddings':
|
|
|
response = self.inference_client.feature_extraction(
|
|
|
query,
|
|
|
model=self.models['embeddings']
|
|
|
)
|
|
|
return response
|
|
|
|
|
|
except Exception as e:
|
|
|
logger.error(f"Hugging Face API error ({task}): {str(e)}")
|
|
|
return None
|
|
|
|
|
|
async def process_input(self, text: str, context: Dict[str, Any] = None) -> Dict[str, Any]:
|
|
|
"""Process input text through multiple perspectives"""
|
|
|
try:
|
|
|
|
|
|
sentiment = await self._generate_huggingface_response(text, 'sentiment')
|
|
|
|
|
|
|
|
|
perspectives = []
|
|
|
|
|
|
|
|
|
generated_text = await self._generate_huggingface_response(text)
|
|
|
if generated_text:
|
|
|
perspectives.append({
|
|
|
"type": "text-generation",
|
|
|
"content": generated_text
|
|
|
})
|
|
|
|
|
|
|
|
|
embeddings = await self._generate_huggingface_response(text, 'embeddings')
|
|
|
if embeddings is not None:
|
|
|
perspectives.append({
|
|
|
"type": "semantic",
|
|
|
"content": "Semantic analysis complete"
|
|
|
})
|
|
|
|
|
|
|
|
|
cognitive_result = self.cognition.process(text, context)
|
|
|
perspectives.append({
|
|
|
"type": "cognitive",
|
|
|
"content": cognitive_result
|
|
|
})
|
|
|
|
|
|
|
|
|
final_response = await self._generate_huggingface_response(
|
|
|
text + "\n\nContext: " + str(perspectives)
|
|
|
)
|
|
|
|
|
|
|
|
|
explanation = "Response generated using ethical AI principles"
|
|
|
|
|
|
return {
|
|
|
"insights": perspectives,
|
|
|
"response": final_response,
|
|
|
"sentiment": sentiment,
|
|
|
"explanation": explanation,
|
|
|
"status": "success"
|
|
|
}
|
|
|
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error in process_input: {str(e)}")
|
|
|
return {
|
|
|
"error": str(e),
|
|
|
"status": "error"
|
|
|
}
|
|
|
|
|
|
async def shutdown(self):
|
|
|
"""Cleanup resources"""
|
|
|
try:
|
|
|
await self.database.close()
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error during shutdown: {e}")
|
|
|
|