Raiff1982 commited on
Commit
559af1d
·
verified ·
1 Parent(s): fa7cae3

Upload 24 files

Browse files
src/__pycache__/ai_core.cpython-313.pyc ADDED
Binary file (33 kB). View file
 
src/__pycache__/bot.cpython-313.pyc ADDED
Binary file (2.62 kB). View file
 
src/__pycache__/config.cpython-313.pyc ADDED
Binary file (759 Bytes). View file
 
src/aegis.py ADDED
@@ -0,0 +1,819 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import hashlib
3
+ import threading
4
+ import logging
5
+ import sqlite3
6
+ from datetime import datetime, timedelta
7
+ from abc import ABC, abstractmethod
8
+ from collections import defaultdict
9
+ from typing import Any, Dict, Optional, List, Tuple
10
+ import concurrent.futures
11
+ import networkx as nx
12
+ import plotly.graph_objects as go
13
+ import pandas as pd
14
+ import numpy as np
15
+ import requests
16
+ from transformers import pipeline, AutoTokenizer, AutoModel
17
+ import torch
18
+ import psutil
19
+ import argparse
20
+ from flask import Flask, request, render_template, send_file
21
+ import os
22
+ import importlib.util
23
+ import syft as sy
24
+ from copy import deepcopy
25
+
26
+ # Suppress TensorFlow logs to avoid CUDA factory conflicts
27
+ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
28
+
29
+ # Setup logging with configurable level
30
+ def setup_logging(log_level: str = "INFO"):
31
+ logging.basicConfig(
32
+ level=getattr(logging, log_level.upper(), logging.INFO),
33
+ format='%(asctime)s - %(levelname)s - %(threadName)s - %(message)s',
34
+ handlers=[logging.FileHandler('aegis_council.log'), logging.StreamHandler()]
35
+ )
36
+
37
+ # === CONFIGURATION LOADER ===
38
+ def load_config(config_path: str = "config.json") -> Dict[str, Any]:
39
+ default_config = {
40
+ "meta_judge_weights": {"influence": 0.5, "reliability": 0.3, "severity": 0.2},
41
+ "temporal_decay_thresholds": {"stable": 0.3, "volatile": 0.7},
42
+ "virtue_weights": {
43
+ "compassion": [0.7, 0.3, -0.1],
44
+ "integrity": [0.4, -0.6, 0.2],
45
+ "courage": [0.1, 0.5, 0.4],
46
+ "wisdom": [0.3, -0.7, 0.2]
47
+ },
48
+ "memory_decay_days": 30,
49
+ "memory_max_entries": 10000,
50
+ "log_level": "INFO",
51
+ "federated_learning": {"num_clients": 2, "aggregation_rounds": 1}
52
+ }
53
+ try:
54
+ if os.path.exists(config_path):
55
+ with open(config_path, 'r') as f:
56
+ config = json.load(f)
57
+ for key in default_config:
58
+ if key not in config:
59
+ config[key] = default_config[key]
60
+ elif isinstance(default_config[key], dict):
61
+ config[key].update({k: v for k, v in default_config[key].items() if k not in config[key]})
62
+ return config
63
+ logging.warning(f"Config file {config_path} not found, using defaults")
64
+ return default_config
65
+ except Exception as e:
66
+ logging.error(f"Error loading config: {e}")
67
+ return default_config
68
+
69
+ # === BLOCKCHAIN FOR AUDITABILITY ===
70
+ class Blockchain:
71
+ def __init__(self):
72
+ self.chain = [{"index": 0, "timestamp": datetime.now().isoformat(), "data": "Genesis Block", "prev_hash": "0"}]
73
+ self.logger = logging.getLogger('Blockchain')
74
+
75
+ def add_block(self, data: Dict[str, Any]) -> None:
76
+ try:
77
+ prev_block = self.chain[-1]
78
+ block = {
79
+ "index": len(self.chain),
80
+ "timestamp": datetime.now().isoformat(),
81
+ "data": json.dumps(data),
82
+ "prev_hash": self._hash_block(prev_block)
83
+ }
84
+ block["hash"] = self._hash_block(block)
85
+ self.chain.append(block)
86
+ self.logger.info(f"Added block {block['index']} with hash {block['hash']}")
87
+ except Exception as e:
88
+ self.logger.error(f"Error adding block: {e}")
89
+
90
+ def _hash_block(self, block: Dict[str, Any]) -> str:
91
+ try:
92
+ block_str = json.dumps(block, sort_keys=True)
93
+ return hashlib.sha256(block_str.encode()).hexdigest()
94
+ except Exception as e:
95
+ self.logger.error(f"Error hashing block: {e}")
96
+ return ""
97
+
98
+ def verify(self) -> bool:
99
+ try:
100
+ for i in range(1, len(self.chain)):
101
+ current = self.chain[i]
102
+ prev = self.chain[i-1]
103
+ if current["prev_hash"] != self._hash_block(prev):
104
+ self.logger.error(f"Blockchain verification failed at block {i}")
105
+ return False
106
+ self.logger.info("Blockchain verified successfully")
107
+ return True
108
+ except Exception as e:
109
+ self.logger.error(f"Error verifying blockchain: {e}")
110
+ return False
111
+
112
+ # === MEMORY AND SIGNAL LAYERS ===
113
+ class NexusMemory:
114
+ def __init__(self, max_entries: int = 10000, decay_days: int = 30, db_path: str = "nexus_memory.db"):
115
+ self.store = defaultdict(dict)
116
+ self.max_entries = max_entries
117
+ self.decay_days = decay_days
118
+ self.lock = threading.Lock()
119
+ self.logger = logging.getLogger('NexusMemory')
120
+ self.conn = sqlite3.connect(db_path, check_same_thread=False)
121
+ self.blockchain = Blockchain()
122
+ self.conn.execute("""
123
+ CREATE TABLE IF NOT EXISTS memory (
124
+ key TEXT PRIMARY KEY,
125
+ value TEXT,
126
+ timestamp TEXT,
127
+ emotion_weight FLOAT
128
+ )
129
+ """)
130
+ self.conn.commit()
131
+ self._load_from_db()
132
+
133
+ def _load_from_db(self):
134
+ try:
135
+ cursor = self.conn.cursor()
136
+ cursor.execute("SELECT key, value, timestamp, emotion_weight FROM memory")
137
+ for key, value, timestamp, emotion_weight in cursor.fetchall():
138
+ self.store[key] = {
139
+ "value": json.loads(value),
140
+ "timestamp": datetime.fromisoformat(timestamp),
141
+ "emotion_weight": emotion_weight
142
+ }
143
+ self.logger.info(f"Loaded {len(self.store)} entries from database")
144
+ except Exception as e:
145
+ self.logger.error(f"Error loading from database: {e}")
146
+
147
+ def write(self, key: str, value: Any, emotion_weight: float = 0.5) -> Optional[str]:
148
+ try:
149
+ if not isinstance(key, str) or not (0 <= emotion_weight <= 1):
150
+ self.logger.error(f"Invalid key type {type(key)} or emotion_weight {emotion_weight}")
151
+ return None
152
+ hashed = hashlib.md5(key.encode()).hexdigest()
153
+ timestamp = datetime.now()
154
+ with self.lock:
155
+ if len(self.store) >= self.max_entries:
156
+ oldest = min(self.store.items(), key=lambda x: x[1].get('timestamp', timestamp))[0]
157
+ self.logger.info(f"Removing oldest entry: {oldest}")
158
+ self.conn.execute("DELETE FROM memory WHERE key = ?", (oldest,))
159
+ del self.store[oldest]
160
+ self.store[hashed] = {
161
+ "value": value,
162
+ "timestamp": timestamp,
163
+ "emotion_weight": emotion_weight
164
+ }
165
+ self.conn.execute(
166
+ "INSERT OR REPLACE INTO memory (key, value, timestamp, emotion_weight) VALUES (?, ?, ?, ?)",
167
+ (hashed, json.dumps(value), timestamp.isoformat(), emotion_weight)
168
+ )
169
+ self.conn.commit()
170
+ self.blockchain.add_block({"key": hashed, "value": value, "timestamp": timestamp.isoformat()})
171
+ self.logger.debug(f"Wrote key: {hashed}, value: {value}")
172
+ return hashed
173
+ except Exception as e:
174
+ self.logger.error(f"Error writing to memory: {e}")
175
+ return None
176
+
177
+ def read(self, key: str) -> Optional[Any]:
178
+ try:
179
+ hashed = hashlib.md5(key.encode()).hexdigest()
180
+ with self.lock:
181
+ entry = self.store.get(hashed)
182
+ if not entry:
183
+ cursor = self.conn.cursor()
184
+ cursor.execute("SELECT value, timestamp, emotion_weight FROM memory WHERE key = ?", (hashed,))
185
+ row = cursor.fetchone()
186
+ if not row:
187
+ self.logger.debug(f"Key not found: {hashed}")
188
+ return None
189
+ entry = {
190
+ "value": json.loads(row[0]),
191
+ "timestamp": datetime.fromisoformat(row[1]),
192
+ "emotion_weight": row[2]
193
+ }
194
+ self.store[hashed] = entry
195
+ if self._is_decayed(entry["timestamp"], entry.get("emotion_weight", 0.5)):
196
+ self.logger.info(f"Removing decayed entry: {hashed}")
197
+ self.conn.execute("DELETE FROM memory WHERE key = ?", (hashed,))
198
+ self.conn.commit()
199
+ del self.store[hashed]
200
+ return None
201
+ self.logger.debug(f"Read key: {hashed}, value: {entry['value']}")
202
+ return entry["value"]
203
+ except Exception as e:
204
+ self.logger.error(f"Error reading from memory: {e}")
205
+ return None
206
+
207
+ def _is_decayed(self, timestamp: datetime, emotion_weight: float) -> bool:
208
+ try:
209
+ age = (datetime.now() - timestamp).total_seconds() / (24 * 3600)
210
+ decay_factor = np.exp(-age / (self.decay_days * (1.5 - emotion_weight)))
211
+ return decay_factor < 0.1
212
+ except Exception as e:
213
+ self.logger.error(f"Error checking decay: {e}")
214
+ return True
215
+
216
+ def audit(self) -> Dict[str, Any]:
217
+ try:
218
+ with self.lock:
219
+ audit_data = {
220
+ k: {
221
+ "timestamp": v["timestamp"],
222
+ "emotion_weight": v["emotion_weight"],
223
+ "decayed": self._is_decayed(v["timestamp"], v["emotion_weight"])
224
+ }
225
+ for k, v in self.store.items()
226
+ }
227
+ self.blockchain.add_block({"audit": audit_data})
228
+ return audit_data
229
+ except Exception as e:
230
+ self.logger.error(f"Error auditing memory: {e}")
231
+ return {}
232
+
233
+ # === DATA FETCHER FOR REAL-TIME DATA ===
234
+ class DataFetcher:
235
+ def __init__(self):
236
+ self.logger = logging.getLogger('DataFetcher')
237
+
238
+ def fetch_x_posts(self, query: str) -> List[Dict[str, Any]]:
239
+ try:
240
+ mock_response = [
241
+ {"content": f"Sample post about {query}: We value truth and empathy.", "timestamp": datetime.now().isoformat()}
242
+ ]
243
+ self.logger.info(f"Fetched {len(mock_response)} posts for query: {query}")
244
+ return mock_response
245
+ except Exception as e:
246
+ self.logger.error(f"Error fetching posts: {e}")
247
+ return []
248
+
249
+ # === PERFORMANCE MONITOR ===
250
+ def monitor_performance() -> Dict[str, float]:
251
+ try:
252
+ return {
253
+ "cpu_percent": psutil.cpu_percent(interval=1),
254
+ "memory_percent": psutil.virtual_memory().percent,
255
+ "process_memory_mb": psutil.Process().memory_info().rss / 1024 / 1024
256
+ }
257
+ except Exception as e:
258
+ logging.error(f"Error monitoring performance: {e}")
259
+ return {"cpu_percent": 0.0, "memory_percent": 0.0, "process_memory_mb": 0.0}
260
+
261
+ # === FEDERATED LEARNING ===
262
+ class FederatedTrainer:
263
+ def __init__(self, num_clients: int):
264
+ self.num_clients = num_clients
265
+ self.logger = logging.getLogger('FederatedTrainer')
266
+ self.clients = [sy.VirtualWorker(sy.torch.hook.TorchHook(torch), id=f"client_{i}") for i in range(num_clients)]
267
+
268
+ def train(self, weights: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
269
+ try:
270
+ client_updates = []
271
+ for client in self.clients:
272
+ client_weights = deepcopy(weights)
273
+ for virtue in client_weights:
274
+ client_weights[virtue] += np.random.normal(0, 0.01, size=client_weights[virtue].shape)
275
+ client_updates.append(client_weights)
276
+ aggregated = {}
277
+ for virtue in weights:
278
+ aggregated[virtue] = np.mean([update[virtue] for update in client_updates], axis=0)
279
+ self.logger.info("Federated training completed")
280
+ return aggregated
281
+ except Exception as e:
282
+ self.logger.error(f"Error in federated training: {e}")
283
+ return weights
284
+
285
+ # === QUANTUM-INSPIRED OPTIMIZATION ===
286
+ def anneal_layout(graph: nx.DiGraph, iterations: int = 1000, temp: float = 10.0) -> Dict[str, Tuple[float, float]]:
287
+ try:
288
+ nodes = list(graph.nodes())
289
+ pos = {node: (np.random.uniform(-1, 1), np.random.uniform(-1, 1)) for node in nodes}
290
+ temp_min = 0.01
291
+ alpha = 0.99
292
+
293
+ def energy(positions):
294
+ e = 0
295
+ for u, v in graph.edges():
296
+ d = np.linalg.norm(np.array(positions[u]) - np.array(positions[v]))
297
+ e += 1 / (d + 1e-6)
298
+ for u in nodes:
299
+ for v in nodes:
300
+ if u != v:
301
+ d = np.linalg.norm(np.array(positions[u]) - np.array(positions[v]))
302
+ e += d
303
+ return e
304
+
305
+ best_pos = pos
306
+ best_energy = energy(pos)
307
+ for _ in range(iterations):
308
+ new_pos = deepcopy(pos)
309
+ for node in nodes:
310
+ new_pos[node] = (new_pos[node][0] + np.random.uniform(-0.1, 0.1) * temp,
311
+ new_pos[node][1] + np.random.uniform(-0.1, 0.1) * temp)
312
+ new_energy = energy(new_pos)
313
+ if new_energy < best_energy or np.random.random() < np.exp((best_energy - new_energy) / temp):
314
+ pos = new_pos
315
+ best_energy = new_energy
316
+ best_pos = pos
317
+ temp *= alpha
318
+ if temp < temp_min:
319
+ break
320
+ return best_pos
321
+ except Exception as e:
322
+ logging.error(f"Error in anneal_layout: {e}")
323
+ return nx.spring_layout(graph)
324
+
325
+ # === BASE AGENT INTERFACE ===
326
+ class AegisAgent(ABC):
327
+ def __init__(self, name: str, memory: NexusMemory):
328
+ self.name = name
329
+ self.memory = memory
330
+ self.result: Dict[str, Any] = {}
331
+ self.explanation: str = ""
332
+ self.influence: Dict[str, float] = {}
333
+ self.logger = logging.getLogger(f'AegisAgent.{name}')
334
+
335
+ @abstractmethod
336
+ def analyze(self, input_data: Dict[str, Any]) -> None:
337
+ pass
338
+
339
+ @abstractmethod
340
+ def report(self) -> Dict[str, Any]:
341
+ pass
342
+
343
+ def collaborate(self, message: Dict[str, Any], target_agent: str) -> None:
344
+ try:
345
+ mem_key = f"collab_{self.name}_{target_agent}_{datetime.now().isoformat()}"
346
+ self.memory.write(mem_key, message, emotion_weight=0.7)
347
+ self.logger.debug(f"Sent collaboration message to {target_agent}: {message}")
348
+ except Exception as e:
349
+ self.logger.error(f"Error in collaboration: {e}")
350
+
351
+ # === AGENT COUNCIL CORE ===
352
+ class AegisCouncil:
353
+ def __init__(self, config: Dict[str, Any]):
354
+ self.memory = NexusMemory(max_entries=config["memory_max_entries"], decay_days=config["memory_decay_days"])
355
+ self.agents: List[AegisAgent] = []
356
+ self.reports: Dict[str, Dict[str, Any]] = {}
357
+ self.graph = nx.DiGraph()
358
+ self.logger = logging.getLogger('AegisCouncil')
359
+ self.fetcher = DataFetcher()
360
+ self.config = config
361
+ self.federated_trainer = FederatedTrainer(config["federated_learning"]["num_clients"])
362
+
363
+ def register_agent(self, agent: AegisAgent) -> None:
364
+ try:
365
+ self.agents.append(agent)
366
+ self.logger.info(f"Registered agent: {agent.name}")
367
+ except Exception as e:
368
+ self.logger.error(f"Error registering agent: {e}")
369
+
370
+ def register_dynamic_agent(self, module_path: str, class_name: str) -> None:
371
+ try:
372
+ spec = importlib.util.spec_from_file_location("custom_agent", module_path)
373
+ module = importlib.util.module_from_spec(spec)
374
+ spec.loader.exec_module(module)
375
+ agent_class = getattr(module, class_name)
376
+ if not issubclass(agent_class, AegisAgent):
377
+ raise ValueError(f"{class_name} is not a subclass of AegisAgent")
378
+ agent = agent_class(f"Dynamic_{class_name}", self.memory)
379
+ self.register_agent(agent)
380
+ self.logger.info(f"Dynamically registered agent: {agent.name}")
381
+ except Exception as e:
382
+ self.logger.error(f"Error registering dynamic agent {class_name}: {e}")
383
+
384
+ def dispatch(self, input_data: Dict[str, Any], max_retries: int = 3) -> bool:
385
+ try:
386
+ if not isinstance(input_data, dict):
387
+ self.logger.error("Input data must be a dictionary")
388
+ return False
389
+ if "text" not in input_data or "overrides" not in input_data:
390
+ self.logger.warning("Input data missing 'text' or 'overrides' keys")
391
+
392
+ self.reports.clear()
393
+ self.graph.clear()
394
+ performance = monitor_performance()
395
+ self.logger.info(f"Dispatch started. Performance: {performance}")
396
+
397
+ for attempt in range(max_retries):
398
+ try:
399
+ with concurrent.futures.ThreadPoolExecutor(max_workers=len(self.agents)) as executor:
400
+ future_to_agent = {executor.submit(agent.analyze, input_data): agent for agent in self.agents}
401
+ for future in concurrent.futures.as_completed(future_to_agent):
402
+ agent = future_to_agent[future]
403
+ try:
404
+ future.result()
405
+ except Exception as e:
406
+ self.logger.error(f"Attempt {attempt + 1}: Error in agent {agent.name}: {e}")
407
+ self.reports[agent.name] = {"error": str(e), "explanation": "Agent failed to process"}
408
+
409
+ for agent in self.agents:
410
+ if agent.name in self.reports and "error" not in self.reports[agent.name]["result"]:
411
+ for target in self.agents:
412
+ if target.name != agent.name:
413
+ agent.collabora
414
+ te(agent.result, target.name)
415
+
416
+ with concurrent.futures.ThreadPoolExecutor(max_workers=len(self.agents)) as executor:
417
+ future_to_agent = {
418
+ executor.submit(self._reanalyze_with_collaboration, agent, input_data): agent
419
+ for agent in self.agents
420
+ }
421
+ for future in concurrent.futures.as_completed(future_to_agent):
422
+ agent = future_to_agent[future]
423
+ try:
424
+ future.result()
425
+ self.reports[agent.name] = agent.report()
426
+ self.graph.add_node(agent.name, explanation=agent.explanation)
427
+ for target, weight in agent.influence.items():
428
+ self.graph.add_edge(agent.name, target, weight=round(weight, 2))
429
+ except Exception as e:
430
+ self.logger.error(f"Attempt {attempt + 1}: Error in agent {agent.name} final analysis: {e}")
431
+ self.reports[agent.name] = {"error": str(e), "explanation": "Agent failed to process"}
432
+
433
+ consensus_result = self._compute_consensus()
434
+ self.reports["Consensus"] = {
435
+ "result": consensus_result,
436
+ "explanation": "Consensus computed from agent outputs weighted by MetaJudgeAgent scores."
437
+ }
438
+ self.memory.blockchain.add_block(self.reports)
439
+ performance = monitor_performance()
440
+ self.logger.info(f"Dispatch completed successfully. Performance: {performance}")
441
+ return True
442
+ except Exception as e:
443
+ self.logger.warning(f"Retry {attempt + 1} after error: {e}")
444
+ self.logger.error(f"Dispatch failed after {max_retries} retries")
445
+ return False
446
+ except Exception as e:
447
+ self.logger.error(f"Error in dispatch: {e}")
448
+ return False
449
+
450
+ def _reanalyze_with_collaboration(self, agent: AegisAgent, input_data: Dict[str, Any]) -> None:
451
+ try:
452
+ collab_data = []
453
+ for source in self.agents:
454
+ if source.name != agent.name:
455
+ mem_key = f"collab_{source.name}_{agent.name}"
456
+ collab = self.memory.read(mem_key + "_" + datetime.now().isoformat())
457
+ if collab:
458
+ collab_data.append((source.name, collab))
459
+ if collab_data:
460
+ agent.explanation += f" Incorporated collaboration data from: {[x[0] for x in collab_data]}."
461
+ agent.analyze(input_data)
462
+ except Exception as e:
463
+ self.logger.error(f"Error in collaboration reanalysis for {agent.name}: {e}")
464
+
465
+ def _compute_consensus(self) -> Dict[str, Any]:
466
+ try:
467
+ meta_scores = self.reports.get("MetaJudgeAgent", {}).get("result", {}).get("scores", [])
468
+ virtue_profiles = [
469
+ self.reports[agent]["result"].get("virtue_profile", {})
470
+ for agent in self.reports if agent != "Consensus" and "virtue_profile" in self.reports[agent]["result"]
471
+ ]
472
+ if not virtue_profiles or not meta_scores:
473
+ return {"error": "Insufficient data for consensus"}
474
+
475
+ weights = {agent: score for agent, score in meta_scores}
476
+ default_weight = 0.5 / len(self.agents)
477
+ combined_profile = {}
478
+ for virtue in ["compassion", "integrity", "courage", "wisdom"]:
479
+ weighted_sum = 0
480
+ total_weight = 0
481
+ for profile in virtue_profiles:
482
+ if virtue in profile:
483
+ agent_name = next(
484
+ (agent for agent in self.reports if self.reports[agent]["result"].get("virtue_profile") == profile),
485
+ None
486
+ )
487
+ weight = weights.get(agent_name, default_weight)
488
+ weighted_sum += profile[virtue] * weight
489
+ total_weight += weight
490
+ combined_profile[virtue] = round(weighted_sum / total_weight, 2) if total_weight > 0 else 0.0
491
+ return {"combined_virtue_profile": combined_profile}
492
+ except Exception as e:
493
+ self.logger.error(f"Error computing consensus: {e}")
494
+ return {"error": str(e)}
495
+
496
+ def dispatch_realtime(self, query: str) -> bool:
497
+ try:
498
+ posts = self.fetcher.fetch_x_posts(query)
499
+ if not posts:
500
+ self.logger.error("No posts fetched for query")
501
+ return False
502
+ input_data = {
503
+ "text": posts[0]["content"],
504
+ "overrides": {
505
+ "EthosiaAgent": {"influence": 0.5, "reliability": 0.5, "severity": 0.5},
506
+ "AegisCore": {"influence": 0.5, "reliability": 0.5, "severity": 0.5}
507
+ }
508
+ }
509
+ return self.dispatch(input_data)
510
+ except Exception as e:
511
+ self.logger.error(f"Error in real-time dispatch: {e}")
512
+ return False
513
+
514
+ def get_reports(self) -> Dict[str, Dict[str, Any]]:
515
+ return self.reports
516
+
517
+ def draw_explainability_graph(self, filename: str = "explainability_graph.html") -> None:
518
+ try:
519
+ pos = anneal_layout(self.graph)
520
+ edge_x, edge_y = [], []
521
+ for edge in self.graph.edges(data=True):
522
+ x0, y0 = pos[edge[0]]
523
+ x1, y1 = pos[edge[1]]
524
+ edge_x.extend([x0, x1, None])
525
+ edge_y.extend([y0, y1, None])
526
+
527
+ edge_trace = go.Scatter(
528
+ x=edge_x, y=edge_y, line=dict(width=1, color='#888'), hoverinfo='none', mode='lines'
529
+ )
530
+
531
+ node_x, node_y = [], []
532
+ for node in self.graph.nodes():
533
+ x, y = pos[node]
534
+ node_x.append(x)
535
+ node_y.append(y)
536
+
537
+ node_trace = go.Scatter(
538
+ x=node_x, y=node_y, mode='markers+text', hoverinfo='text',
539
+ marker=dict(size=20, color='lightgreen'), text=list(self.graph.nodes()),
540
+ textposition="bottom center"
541
+ )
542
+
543
+ edge_labels = []
544
+ for edge in self.graph.edges(data=True):
545
+ x0, y0 = pos[edge[0]]
546
+ x1, y1 = pos[edge[1]]
547
+ edge_labels.append(go.Scatter(
548
+ x=[(x0 + x1) / 2], y=[(y0 + y1) / 2], mode='text',
549
+ text=[f"{edge[2]['weight']:.2f}"], textposition="middle center"
550
+ ))
551
+
552
+ fig = go.Figure(data=[edge_trace, node_trace] + edge_labels,
553
+ layout=go.Layout(
554
+ title="Explainability Graph",
555
+ showlegend=False, hovermode='closest',
556
+ margin=dict(b=20, l=5, r=5, t=40),
557
+ xaxis=dict(showgrid=False, zeroline=False),
558
+ yaxis=dict(showgrid=False, zeroline=False)
559
+ ))
560
+ fig.write_html(filename)
561
+ self.logger.info(f"Saved explainability graph to {filename}")
562
+ except Exception as e:
563
+ self.logger.error(f"Error drawing graph: {e}")
564
+
565
+ # === META-JUDGE AGENT ===
566
+ class MetaJudgeAgent(AegisAgent):
567
+ def __init__(self, name: str, memory: NexusMemory, weights: Dict[str, float]):
568
+ super().__init__(name, memory)
569
+ self.weights = weights
570
+
571
+ def analyze(self, input_data: Dict[str, Any]) -> None:
572
+ try:
573
+ overrides = input_data.get("overrides", {})
574
+ if not overrides:
575
+ self.result = {"error": "No overrides provided"}
576
+ self.explanation = "MetaJudgeAgent failed: No overrides provided."
577
+ self.logger.warning(self.explanation)
578
+ return
579
+
580
+ scores = []
581
+ for agent, data in overrides.items():
582
+ try:
583
+ influence = float(data.get("influence", 0.5))
584
+ reliability = float(data.get("reliability", 0.5))
585
+ severity = float(data.get("severity", 0.5))
586
+ if not all(0 <= x <= 1 for x in [influence, reliability, severity]):
587
+ self.logger.warning(f"Invalid metrics for {agent}: {data}")
588
+ continue
589
+
590
+ mem_key = f"meta_judge_{agent}_score"
591
+ prev_score = self.memory.read(mem_key)
592
+ context_factor = 1.0 if prev_score is None else 0.9
593
+ score = (self.weights["influence"] * influence +
594
+ self.weights["reliability"] * reliability +
595
+ self.weights["severity"] * severity) * context_factor
596
+ scores.append((agent, score))
597
+ self.influence[agent] = score
598
+ self.memory.write(mem_key, score, emotion_weight=score)
599
+ except Exception as e:
600
+ self.logger.error(f"Error processing agent {agent}: {e}")
601
+
602
+ if not scores:
603
+ self.result = {"error": "No valid agents to score"}
604
+ self.explanation = "MetaJudgeAgent failed: No valid agents to score."
605
+ return
606
+
607
+ scores.sort(key=lambda x: x[1], reverse=True)
608
+ winner = scores[0][0]
609
+ self.result = {"override_decision": winner, "scores": scores}
610
+ self.explanation = f"MetaJudgeAgent selected '{winner}' with score {scores[0][1]:.2f} based on weighted metrics."
611
+ self.logger.info(self.explanation)
612
+ except Exception as e:
613
+ self.result = {"error": str(e)}
614
+ self.explanation = f"MetaJudgeAgent failed: {e}"
615
+ self.logger.error(self.explanation)
616
+
617
+ def report(self) -> Dict[str, Any]:
618
+ return {"result": self.result, "explanation": self.explanation}
619
+
620
+ # === TEMPORAL REASONING AGENT ===
621
+ class TemporalAgent(AegisAgent):
622
+ def __init__(self, name: str, memory: NexusMemory, decay_thresholds: Dict[str, float]):
623
+ super().__init__(name, memory)
624
+ self.decay_thresholds = decay_thresholds
625
+
626
+ def analyze(self, input_data: Dict[str, Any]) -> None:
627
+ try:
628
+ audit = self.memory.audit()
629
+ recent_keys = sorted(audit.items(), key=lambda x: x[1]["timestamp"], reverse=True)[:5]
630
+ decay_rates = [1 if v["decayed"] else 0 for _, v in recent_keys]
631
+ avg_decay = np.mean(decay_rates) if decay_rates else 0.0
632
+ forecast = ("stable" if avg_decay < self.decay_thresholds["stable"] else
633
+ "volatile" if avg_decay > self.decay_thresholds["volatile"] else "neutral")
634
+ self.result = {"temporal_forecast": forecast, "recent_keys": [k for k, _ in recent_keys], "decay_rate": avg_decay}
635
+ self.explanation = f"TemporalAgent forecasted '{forecast}' with average decay rate {avg_decay:.2f}."
636
+ for k, _ in recent_keys:
637
+ self.influence[k] = 0.2
638
+ self.memory.write(f"temporal_forecast_{datetime.now().isoformat()}", forecast, emotion_weight=1.0 - avg_decay)
639
+ self.logger.info(self.explanation)
640
+ except Exception as e:
641
+ self.result = {"error": str(e)}
642
+ self.explanation = f"TemporalAgent failed: {e}"
643
+ self.logger.error(self.explanation)
644
+
645
+ def report(self) -> Dict[str, Any]:
646
+ return {"result": self.result, "explanation": self.explanation}
647
+
648
+ # === VIRTUE SPECTRUM AGENT ===
649
+ class VirtueAgent(AegisAgent):
650
+ def __init__(self, name: str, memory: NexusMemory, virtue_weights: Dict[str, List[float]]):
651
+ super().__init__(name, memory)
652
+ self.tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
653
+ self.model = AutoModel.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
654
+ self.sentiment_pipeline = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english", framework="pt")
655
+ self.virtue_weights = {k: np.array(v) for k, v in virtue_weights.items()}
656
+ self.federated_trainer = None
657
+
658
+ def set_federated_trainer(self, trainer: FederatedTrainer):
659
+ self.federated_trainer = trainer
660
+
661
+ def analyze(self, input_data: Dict[str, Any]) -> None:
662
+ try:
663
+ text = input_data.get("text", "")
664
+ if not text or not isinstance(text, str):
665
+ self.result = {"error": "Invalid or empty text"}
666
+ self.explanation = "VirtueAgent failed: Invalid or empty text."
667
+ self.logger.warning(self.explanation)
668
+ return
669
+
670
+ mem_key = f"virtue_cache_{hashlib.md5(text.encode()).hexdigest()}"
671
+ cached = self.memory.read(mem_key)
672
+ if cached:
673
+ self.result = {"virtue_profile": cached}
674
+ self.explanation = f"VirtueAgent used cached profile: {cached}"
675
+ self.influence.update({k: v for k, v in cached.items()})
676
+ self.logger.info(self.explanation)
677
+ return
678
+
679
+ sentiment_result = self.sentiment_pipeline(text)[0]
680
+ sentiment = 1.0 if sentiment_result["label"] == "POSITIVE" else -1.0
681
+ sentiment_score = sentiment_result["score"]
682
+
683
+ inputs = self.tokenizer(text, return_tensors="pt", truncation=True, max_length=512)
684
+ with torch.no_grad():
685
+ outputs = self.model(**inputs)
686
+ embeddings = outputs.last_hidden_state.mean(dim=1).squeeze().numpy()
687
+ subjectivity = min(max(np.std(embeddings), 0.0), 1.0)
688
+ neutrality = 1.0 - abs(sentiment)
689
+
690
+ if self.federated_trainer:
691
+ self.virtue_weights = self.federated_trainer.train(self.virtue_weights)
692
+ self.memory.write("virtue_weights", {k: v.tolist() for k, v in self.virtue_weights.items()}, emotion_weight=0.9)
693
+
694
+ features = np.array([sentiment * sentiment_score, subjectivity, neutrality])
695
+ virtues = {
696
+ virtue: round(float(max(np.dot(self.virtue_weights[virtue], features), 0.0)), 2)
697
+ for virtue in self.virtue_weights
698
+ }
699
+ virtues = {k: min(v, 1.0) for k, v in virtues.items()}
700
+ self.result = {"virtue_profile": virtues}
701
+ self.explanation = f"VirtueAgent generated profile: {virtues} based on sentiment={sentiment:.2f}, subjectivity={subjectivity:.2f}, neutrality={neutrality:.2f}."
702
+ for virtue, score in virtues.items():
703
+ self.influence[virtue] = score
704
+ self.memory.write(f"virtue_{virtue}_{datetime.now().isoformat()}", score, emotion_weight=score)
705
+ self.memory.write(mem_key, virtues, emotion_weight=0.8)
706
+ self.logger.info(self.explanation)
707
+ except Exception as e:
708
+ self.result = {"error": str(e)}
709
+ self.explanation = f"VirtueAgent failed: {e}"
710
+ self.logger.error(self.explanation)
711
+
712
+ def report(self) -> Dict[str, Any]:
713
+ return {"result": self.result, "explanation": self.explanation}
714
+
715
+ # === FLASK WEB UI ===
716
+ app = Flask(__name__, template_folder='templates')
717
+
718
+ @app.route('/', methods=['GET', 'POST'])
719
+ def index():
720
+ if request.method == 'POST':
721
+ try:
722
+ input_data = {
723
+ "text": request.form.get("text", ""),
724
+ "overrides": json.loads(request.form.get("overrides", "{}"))
725
+ }
726
+ council.dispatch(input_data)
727
+ return render_template('reports.html', reports=council.get_reports())
728
+ except Exception as e:
729
+ logging.error(f"Error processing form: {e}")
730
+ return render_template('index.html', error=str(e))
731
+ return render_template('index.html', error=None)
732
+
733
+ @app.route('/reports')
734
+ def show_reports():
735
+ return render_template('reports.html', reports=council.get_reports())
736
+
737
+ @app.route('/graph')
738
+ def show_graph():
739
+ graph_file = "explainability_graph.html"
740
+ council.draw_explainability_graph(graph_file)
741
+ return send_file(graph_file)
742
+
743
+ @app.route('/charts')
744
+ def show_charts():
745
+ reports = council.get_reports()
746
+ virtue_data = reports.get("VirtueAgent", {}).get("result", {}).get("virtue_profile", {})
747
+ influence_data = reports.get("MetaJudgeAgent", {}).get("result", {}).get("scores", [])
748
+ return render_template('charts.html', virtue_data=virtue_data, influence_data=influence_data)
749
+
750
+ # === EXECUTION ===
751
+ def main():
752
+ global council
753
+ try:
754
+ parser = argparse.ArgumentParser(description="AegisCouncil AI System")
755
+ parser.add_argument('--config', default='config.json', help='Path to configuration file')
756
+ parser.add_argument('--weights', type=str, help='JSON string for MetaJudgeAgent weights')
757
+ parser.add_argument('--log-level', default='INFO', help='Logging level (DEBUG, INFO, WARNING, ERROR)')
758
+ parser.add_argument('--agent-module', help='Path to custom agent module')
759
+ parser.add_argument('--agent-class', help='Custom agent class name')
760
+ args = parser.parse_args()
761
+
762
+ config = load_config(args.config)
763
+ if args.weights:
764
+ try:
765
+ config['meta_judge_weights'] = json.loads(args.weights)
766
+ except json.JSONDecodeError:
767
+ logging.error("Invalid weights JSON, using config file weights")
768
+
769
+ setup_logging(config['log_level'])
770
+
771
+ council = AegisCouncil(config)
772
+ council.register_agent(MetaJudgeAgent("MetaJudgeAgent", council.memory, config["meta_judge_weights"]))
773
+ council.register_agent(TemporalAgent("TemporalAgent", council.memory, config["temporal_decay_thresholds"]))
774
+ virtue_agent = VirtueAgent("VirtueAgent", council.memory, config["virtue_weights"])
775
+ virtue_agent.set_federated_trainer(council.federated_trainer)
776
+ council.register_agent(virtue_agent)
777
+
778
+ if args.agent_module and args.agent_class:
779
+ council.register_dynamic_agent(args.agent_module, args.agent_class)
780
+
781
+ sample_input = {
782
+ "text": "We must stand for truth and help others with empathy and knowledge.",
783
+ "overrides": {
784
+ "EthosiaAgent": {"influence": 0.7, "reliability": 0.8, "severity": 0.6},
785
+ "AegisCore": {"influence": 0.6, "reliability": 0.9, "severity": 0.7}
786
+ }
787
+ }
788
+
789
+ success = council.dispatch(sample_input)
790
+ if not success:
791
+ print("Static dispatch failed. Check logs for details.")
792
+ else:
793
+ reports = council.get_reports()
794
+ df = pd.DataFrame.from_dict(reports, orient='index')
795
+ print("\nStatic Input Agent Reports:")
796
+ print(df.to_string())
797
+ council.draw_explainability_graph("static_explainability_graph.html")
798
+
799
+ success = council.dispatch_realtime("empathy")
800
+ if not success:
801
+ print("Real-time dispatch failed. Check logs for details.")
802
+ else:
803
+ reports = council.get_reports()
804
+ df = pd.DataFrame.from_dict(reports, orient='index')
805
+ print("\nReal-Time Input Agent Reports:")
806
+ print(df.to_string())
807
+ council.draw_explainability_graph("realtime_explainability_graph.html")
808
+
809
+ blockchain_valid = council.memory.blockchain.verify()
810
+ print(f"\nBlockchain Integrity: {'Valid' if blockchain_valid else 'Invalid'}")
811
+
812
+ print("\nStarting Flask server at http://localhost:5000")
813
+ app.run(debug=False, host='0.0.0.0', port=5000)
814
+
815
+ except Exception as e:
816
+ logging.error(f"Main execution failed: {e}")
817
+
818
+ if __name__ == "__main__":
819
+ main()
src/aegis_integration/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .aegis_bridge import AegisBridge
2
+
3
+ __all__ = ['AegisBridge']
src/aegis_integration/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (237 Bytes). View file
 
src/aegis_integration/__pycache__/aegis.cpython-313.pyc ADDED
Binary file (36.7 kB). View file
 
src/aegis_integration/__pycache__/aegis_bridge.cpython-313.pyc ADDED
Binary file (5.9 kB). View file
 
src/aegis_integration/__pycache__/config.cpython-313.pyc ADDED
Binary file (729 Bytes). View file
 
src/aegis_integration/__pycache__/routes.cpython-313.pyc ADDED
Binary file (2.36 kB). View file
 
src/aegis_integration/aegis.py ADDED
@@ -0,0 +1,604 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # === AEGIS Council Core Implementation ===
2
+ """
3
+ AEGIS (Advanced Ethical Guardian and Intelligence System) provides ethical oversight
4
+ and analysis capabilities through a council of specialized agents.
5
+ """
6
+
7
+ import json
8
+ import hashlib
9
+ import threading
10
+ import logging
11
+ import sqlite3
12
+ from datetime import datetime, timedelta
13
+ from abc import ABC, abstractmethod
14
+ from collections import defaultdict
15
+ from typing import Any, Dict, Optional, List, Tuple
16
+ import concurrent.futures
17
+ import networkx as nx
18
+ import plotly.graph_objects as go
19
+ import pandas as pd
20
+ import numpy as np
21
+ import torch
22
+ from transformers import pipeline, AutoTokenizer, AutoModel
23
+ from copy import deepcopy
24
+
25
+ # Set up logging
26
+ logger = logging.getLogger(__name__)
27
+
28
+ # Try to import syft, but don't fail if it's not available
29
+ try:
30
+ import syft as sy
31
+ SYFT_AVAILABLE = True
32
+ except ImportError:
33
+ SYFT_AVAILABLE = False
34
+ logger.warning("PySyft not available. Federated learning features will be disabled.")
35
+ logger.warning("PySyft not available. Federated learning features will be disabled.")
36
+
37
+ # Core AEGIS Components
38
+ class AegisCouncil:
39
+ def __init__(self, config: Dict[str, Any]):
40
+ self.memory = NexusMemory(
41
+ max_entries=config["memory_max_entries"],
42
+ decay_days=config["memory_decay_days"]
43
+ )
44
+ self.agents = []
45
+ self.reports = {}
46
+ self.graph = nx.DiGraph()
47
+ self.logger = logging.getLogger('AegisCouncil')
48
+ self.config = config
49
+ self.federated_trainer = FederatedTrainer(config["federated_learning"]["num_clients"])
50
+
51
+ def register_agent(self, agent: 'AegisAgent') -> None:
52
+ try:
53
+ self.agents.append(agent)
54
+ self.logger.info(f"Registered agent: {agent.name}")
55
+ except Exception as e:
56
+ self.logger.error(f"Error registering agent: {e}")
57
+
58
+ def get_reports(self) -> Dict[str, Any]:
59
+ """Get the current analysis reports."""
60
+ return self.reports
61
+
62
+ def dispatch(self, input_data: Dict[str, Any], max_retries: int = 3) -> bool:
63
+ try:
64
+ if not isinstance(input_data, dict):
65
+ self.logger.error("Input data must be a dictionary")
66
+ return False
67
+
68
+ self.reports.clear()
69
+ self.graph.clear()
70
+
71
+ for attempt in range(max_retries):
72
+ try:
73
+ with concurrent.futures.ThreadPoolExecutor(max_workers=len(self.agents)) as executor:
74
+ future_to_agent = {
75
+ executor.submit(agent.analyze, input_data): agent
76
+ for agent in self.agents
77
+ }
78
+ for future in concurrent.futures.as_completed(future_to_agent):
79
+ agent = future_to_agent[future]
80
+ try:
81
+ future.result()
82
+ self.reports[agent.name] = agent.report()
83
+ self.graph.add_node(agent.name, explanation=agent.explanation)
84
+ for target, weight in agent.influence.items():
85
+ self.graph.add_edge(agent.name, target, weight=round(weight, 2))
86
+ except Exception as e:
87
+ self.logger.error(f"Error in agent {agent.name}: {e}")
88
+ self.reports[agent.name] = {
89
+ "error": str(e),
90
+ "explanation": "Agent failed to process"
91
+ }
92
+
93
+ consensus = self._compute_consensus()
94
+ self.reports["Consensus"] = {
95
+ "result": consensus,
96
+ "explanation": "Consensus from weighted agent outputs."
97
+ }
98
+ self.memory.blockchain.add_block(self.reports)
99
+ return True
100
+ except Exception as e:
101
+ self.logger.warning(f"Retry {attempt + 1} after error: {e}")
102
+
103
+ self.logger.error(f"Dispatch failed after {max_retries} retries")
104
+ return False
105
+ except Exception as e:
106
+ self.logger.error(f"Error in dispatch: {e}")
107
+ return False
108
+
109
+ def _compute_consensus(self) -> Dict[str, Any]:
110
+ try:
111
+ meta_scores = self.reports.get("MetaJudgeAgent", {}).get("result", {}).get("scores", [])
112
+ virtue_profiles = [
113
+ self.reports[agent]["result"].get("virtue_profile", {})
114
+ for agent in self.reports
115
+ if agent != "Consensus" and "virtue_profile" in self.reports[agent]["result"]
116
+ ]
117
+
118
+ if not virtue_profiles or not meta_scores:
119
+ return {"error": "Insufficient data for consensus"}
120
+
121
+ weights = {agent: score for agent, score in meta_scores}
122
+ default_weight = 0.5 / len(self.agents)
123
+ combined_profile = {}
124
+
125
+ for virtue in ["compassion", "integrity", "courage", "wisdom"]:
126
+ weighted_sum = 0
127
+ total_weight = 0
128
+ for profile in virtue_profiles:
129
+ if virtue in profile:
130
+ agent_name = next(
131
+ (agent for agent in self.reports if self.reports[agent]["result"].get("virtue_profile") == profile),
132
+ None
133
+ )
134
+ weight = weights.get(agent_name, default_weight)
135
+ weighted_sum += profile[virtue] * weight
136
+ total_weight += weight
137
+ combined_profile[virtue] = round(weighted_sum / total_weight, 2) if total_weight > 0 else 0.0
138
+
139
+ return {"combined_virtue_profile": combined_profile}
140
+ except Exception as e:
141
+ self.logger.error(f"Error computing consensus: {e}")
142
+ return {"error": str(e)}
143
+
144
+ def draw_explainability_graph(self, filename: str = "explainability_graph.html") -> None:
145
+ try:
146
+ pos = self._compute_layout()
147
+ edge_trace = self._create_edge_trace(pos)
148
+ node_trace = self._create_node_trace(pos)
149
+ edge_labels = self._create_edge_labels(pos)
150
+
151
+ fig = go.Figure(
152
+ data=[edge_trace, node_trace] + edge_labels,
153
+ layout=go.Layout(
154
+ title="AEGIS Analysis Graph",
155
+ showlegend=False,
156
+ hovermode='closest',
157
+ margin=dict(b=20, l=5, r=5, t=40),
158
+ xaxis=dict(showgrid=False, zeroline=False),
159
+ yaxis=dict(showgrid=False, zeroline=False)
160
+ )
161
+ )
162
+ fig.write_html(filename)
163
+ self.logger.info(f"Saved analysis graph to {filename}")
164
+ except Exception as e:
165
+ self.logger.error(f"Error drawing graph: {e}")
166
+
167
+ def _compute_layout(self):
168
+ return nx.spring_layout(self.graph)
169
+
170
+ def _create_edge_trace(self, pos):
171
+ edge_x, edge_y = [], []
172
+ for edge in self.graph.edges():
173
+ x0, y0 = pos[edge[0]]
174
+ x1, y1 = pos[edge[1]]
175
+ edge_x.extend([x0, x1, None])
176
+ edge_y.extend([y0, y1, None])
177
+ return go.Scatter(
178
+ x=edge_x, y=edge_y,
179
+ line=dict(width=1, color='#888'),
180
+ hoverinfo='none',
181
+ mode='lines'
182
+ )
183
+
184
+ def _create_node_trace(self, pos):
185
+ node_x, node_y = [], []
186
+ for node in self.graph.nodes():
187
+ x, y = pos[node]
188
+ node_x.append(x)
189
+ node_y.append(y)
190
+ return go.Scatter(
191
+ x=node_x, y=node_y,
192
+ mode='markers+text',
193
+ hoverinfo='text',
194
+ marker=dict(size=20, color='lightblue'),
195
+ text=list(self.graph.nodes()),
196
+ textposition="bottom center"
197
+ )
198
+
199
+ def _create_edge_labels(self, pos):
200
+ labels = []
201
+ for edge in self.graph.edges(data=True):
202
+ x0, y0 = pos[edge[0]]
203
+ x1, y1 = pos[edge[1]]
204
+ labels.append(
205
+ go.Scatter(
206
+ x=[(x0 + x1) / 2],
207
+ y=[(y0 + y1) / 2],
208
+ mode='text',
209
+ text=[f"{edge[2]['weight']:.2f}"],
210
+ textposition="middle center"
211
+ )
212
+ )
213
+ return labels
214
+
215
+ # Memory Management
216
+ class NexusMemory:
217
+ def __init__(self, max_entries: int = 10000, decay_days: int = 30):
218
+ self.store = defaultdict(dict)
219
+ self.max_entries = max_entries
220
+ self.decay_days = decay_days
221
+ self.lock = threading.Lock()
222
+ self.logger = logging.getLogger('NexusMemory')
223
+ self.blockchain = Blockchain()
224
+
225
+ def write(self, key: str, value: Any, emotion_weight: float = 0.5) -> Optional[str]:
226
+ try:
227
+ if not isinstance(key, str) or not (0 <= emotion_weight <= 1):
228
+ self.logger.error(f"Invalid key type or emotion weight")
229
+ return None
230
+
231
+ # Convert numpy types to Python native types
232
+ def convert_value(v):
233
+ if isinstance(v, np.bool_):
234
+ return bool(v)
235
+ if isinstance(v, np.integer):
236
+ return int(v)
237
+ if isinstance(v, np.floating):
238
+ return float(v)
239
+ if isinstance(v, np.ndarray):
240
+ return v.tolist()
241
+ if isinstance(v, dict):
242
+ return {k: convert_value(val) for k, val in v.items()}
243
+ if isinstance(v, (list, tuple)):
244
+ return [convert_value(item) for item in v]
245
+ return v
246
+
247
+ hashed = hashlib.md5(key.encode()).hexdigest()
248
+ timestamp = datetime.now()
249
+
250
+ with self.lock:
251
+ if len(self.store) >= self.max_entries:
252
+ oldest = min(self.store.items(), key=lambda x: x[1].get('timestamp', timestamp))[0]
253
+ del self.store[oldest]
254
+
255
+ self.store[hashed] = {
256
+ "value": convert_value(value),
257
+ "timestamp": timestamp,
258
+ "emotion_weight": float(emotion_weight) # Ensure emotion_weight is native float
259
+ }
260
+
261
+ self.blockchain.add_block({
262
+ "key": hashed,
263
+ "value": value,
264
+ "timestamp": timestamp.isoformat()
265
+ })
266
+
267
+ return hashed
268
+ except Exception as e:
269
+ self.logger.error(f"Error writing to memory: {e}")
270
+ return None
271
+
272
+ def read(self, key: str) -> Optional[Any]:
273
+ try:
274
+ hashed = hashlib.md5(key.encode()).hexdigest()
275
+ with self.lock:
276
+ entry = self.store.get(hashed)
277
+ if not entry:
278
+ return None
279
+
280
+ if self._is_decayed(entry["timestamp"], entry.get("emotion_weight", 0.5)):
281
+ del self.store[hashed]
282
+ return None
283
+
284
+ return entry["value"]
285
+ except Exception as e:
286
+ self.logger.error(f"Error reading from memory: {e}")
287
+ return None
288
+
289
+ def _is_decayed(self, timestamp: datetime, emotion_weight: float) -> bool:
290
+ try:
291
+ age = (datetime.now() - timestamp).total_seconds() / (24 * 3600)
292
+ decay_factor = np.exp(-age / (self.decay_days * (1.5 - emotion_weight)))
293
+ return decay_factor < 0.1
294
+ except Exception as e:
295
+ self.logger.error(f"Error checking decay: {e}")
296
+ return True
297
+
298
+ def audit(self) -> Dict[str, Any]:
299
+ try:
300
+ with self.lock:
301
+ audit_data = {
302
+ k: {
303
+ "timestamp": v["timestamp"],
304
+ "emotion_weight": v["emotion_weight"],
305
+ "decayed": self._is_decayed(v["timestamp"], v["emotion_weight"])
306
+ }
307
+ for k, v in self.store.items()
308
+ }
309
+ self.blockchain.add_block({"audit": audit_data})
310
+ return audit_data
311
+ except Exception as e:
312
+ self.logger.error(f"Error auditing memory: {e}")
313
+ return {}
314
+
315
+ # Auditability
316
+ class Blockchain:
317
+ def __init__(self):
318
+ self.chain = [{
319
+ "index": 0,
320
+ "timestamp": datetime.now().isoformat(),
321
+ "data": "Genesis Block",
322
+ "prev_hash": "0"
323
+ }]
324
+ self.logger = logging.getLogger('Blockchain')
325
+
326
+ def add_block(self, data: Dict[str, Any]) -> None:
327
+ try:
328
+ prev_block = self.chain[-1]
329
+ # Convert datetime objects and numpy types to JSON serializable format
330
+ def json_handler(obj):
331
+ if isinstance(obj, datetime):
332
+ return obj.isoformat()
333
+ if isinstance(obj, np.bool_):
334
+ return bool(obj)
335
+ if isinstance(obj, np.integer):
336
+ return int(obj)
337
+ if isinstance(obj, np.floating):
338
+ return float(obj)
339
+ if isinstance(obj, np.ndarray):
340
+ return obj.tolist()
341
+ raise TypeError(f"Object of type {type(obj)} is not JSON serializable")
342
+
343
+ block = {
344
+ "index": len(self.chain),
345
+ "timestamp": datetime.now().isoformat(),
346
+ "data": json.dumps(data, default=json_handler),
347
+ "prev_hash": self._hash_block(prev_block)
348
+ }
349
+ block["hash"] = self._hash_block(block)
350
+ self.chain.append(block)
351
+ except Exception as e:
352
+ self.logger.error(f"Error adding block: {e}")
353
+
354
+ def _hash_block(self, block: Dict[str, Any]) -> str:
355
+ try:
356
+ block_str = json.dumps(block, sort_keys=True)
357
+ return hashlib.sha256(block_str.encode()).hexdigest()
358
+ except Exception as e:
359
+ self.logger.error(f"Error hashing block: {e}")
360
+ return ""
361
+
362
+ def verify(self) -> bool:
363
+ try:
364
+ for i in range(1, len(self.chain)):
365
+ current = self.chain[i]
366
+ prev = self.chain[i-1]
367
+ if current["prev_hash"] != self._hash_block(prev):
368
+ return False
369
+ return True
370
+ except Exception as e:
371
+ self.logger.error(f"Error verifying blockchain: {e}")
372
+ return False
373
+
374
+ # Federated Learning
375
+ class FederatedTrainer:
376
+ def __init__(self, num_clients: int):
377
+ self.num_clients = num_clients
378
+ self.logger = logging.getLogger('FederatedTrainer')
379
+
380
+ if SYFT_AVAILABLE:
381
+ self.hook = sy.TorchHook(torch)
382
+ self.clients = [
383
+ sy.VirtualWorker(self.hook, id=f"client_{i}")
384
+ for i in range(num_clients)
385
+ ]
386
+ else:
387
+ self.hook = None
388
+ self.clients = []
389
+ self.logger.warning("Running without federated learning - PySyft not available")
390
+
391
+ def train(self, weights: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
392
+ try:
393
+ if not SYFT_AVAILABLE:
394
+ self.logger.debug("Skipping federated training - PySyft not available")
395
+ return weights
396
+
397
+ client_updates = []
398
+ for client in self.clients:
399
+ client_weights = deepcopy(weights)
400
+ for virtue in client_weights:
401
+ client_weights[virtue] += np.random.normal(0, 0.01, size=client_weights[virtue].shape)
402
+ client_updates.append(client_weights)
403
+
404
+ aggregated = {}
405
+ for virtue in weights:
406
+ aggregated[virtue] = np.mean([update[virtue] for update in client_updates], axis=0)
407
+
408
+ return aggregated
409
+ except Exception as e:
410
+ self.logger.error(f"Error in federated training: {e}")
411
+ return weights
412
+
413
+ # Agent Base Class
414
+ class AegisAgent(ABC):
415
+ def __init__(self, name: str, memory: NexusMemory):
416
+ self.name = name
417
+ self.memory = memory
418
+ self.result: Dict[str, Any] = {}
419
+ self.explanation: str = ""
420
+ self.influence: Dict[str, float] = {}
421
+ self.logger = logging.getLogger(f'AegisAgent.{name}')
422
+
423
+ @abstractmethod
424
+ def analyze(self, input_data: Dict[str, Any]) -> None:
425
+ pass
426
+
427
+ def report(self) -> Dict[str, Any]:
428
+ return {
429
+ "result": self.result,
430
+ "explanation": self.explanation
431
+ }
432
+
433
+ # Specialized Agents
434
+ class MetaJudgeAgent(AegisAgent):
435
+ def __init__(self, name: str, memory: NexusMemory, weights: Dict[str, float]):
436
+ super().__init__(name, memory)
437
+ self.weights = weights
438
+
439
+ def analyze(self, input_data: Dict[str, Any]) -> None:
440
+ try:
441
+ overrides = input_data.get("overrides", {})
442
+ if not overrides:
443
+ self.result = {"error": "No overrides provided"}
444
+ self.explanation = "No overrides provided for analysis."
445
+ return
446
+
447
+ scores = []
448
+ for agent, data in overrides.items():
449
+ try:
450
+ influence = float(data.get("influence", 0.5))
451
+ reliability = float(data.get("reliability", 0.5))
452
+ severity = float(data.get("severity", 0.5))
453
+
454
+ if not all(0 <= x <= 1 for x in [influence, reliability, severity]):
455
+ continue
456
+
457
+ score = (
458
+ self.weights["influence"] * influence +
459
+ self.weights["reliability"] * reliability +
460
+ self.weights["severity"] * severity
461
+ )
462
+ scores.append((agent, score))
463
+ self.influence[agent] = score
464
+ except Exception as e:
465
+ self.logger.error(f"Error processing agent {agent}: {e}")
466
+
467
+ if not scores:
468
+ self.result = {"error": "No valid agents to score"}
469
+ self.explanation = "No valid agents for meta-analysis."
470
+ return
471
+
472
+ scores.sort(key=lambda x: x[1], reverse=True)
473
+ self.result = {
474
+ "override_decision": scores[0][0],
475
+ "scores": scores
476
+ }
477
+ self.explanation = f"Selected '{scores[0][0]}' with score {scores[0][1]:.2f}"
478
+ except Exception as e:
479
+ self.result = {"error": str(e)}
480
+ self.explanation = f"Analysis failed: {e}"
481
+
482
+ class TemporalAgent(AegisAgent):
483
+ def __init__(self, name: str, memory: NexusMemory, decay_thresholds: Dict[str, float]):
484
+ super().__init__(name, memory)
485
+ self.decay_thresholds = decay_thresholds
486
+
487
+ def analyze(self, input_data: Dict[str, Any]) -> None:
488
+ try:
489
+ audit = self.memory.audit()
490
+ recent_keys = sorted(
491
+ audit.items(),
492
+ key=lambda x: x[1]["timestamp"],
493
+ reverse=True
494
+ )[:5]
495
+
496
+ decay_rates = [1 if v["decayed"] else 0 for _, v in recent_keys]
497
+ avg_decay = np.mean(decay_rates) if decay_rates else 0.0
498
+
499
+ forecast = (
500
+ "stable" if avg_decay < self.decay_thresholds["stable"]
501
+ else "volatile" if avg_decay > self.decay_thresholds["volatile"]
502
+ else "neutral"
503
+ )
504
+
505
+ self.result = {
506
+ "temporal_forecast": forecast,
507
+ "recent_keys": [k for k, _ in recent_keys],
508
+ "decay_rate": avg_decay
509
+ }
510
+ self.explanation = f"Forecast: {forecast} (decay rate: {avg_decay:.2f})"
511
+
512
+ for k, _ in recent_keys:
513
+ self.influence[k] = 0.2
514
+ except Exception as e:
515
+ self.result = {"error": str(e)}
516
+ self.explanation = f"Temporal analysis failed: {e}"
517
+
518
+ class VirtueAgent(AegisAgent):
519
+ def __init__(self, name: str, memory: NexusMemory, virtue_weights: Dict[str, List[float]]):
520
+ super().__init__(name, memory)
521
+ self.tokenizer = AutoTokenizer.from_pretrained(
522
+ "distilbert-base-uncased-finetuned-sst-2-english"
523
+ )
524
+ self.model = AutoModel.from_pretrained(
525
+ "distilbert-base-uncased-finetuned-sst-2-english"
526
+ )
527
+ self.sentiment = pipeline(
528
+ "sentiment-analysis",
529
+ model="distilbert-base-uncased-finetuned-sst-2-english"
530
+ )
531
+ self.virtue_weights = {k: np.array(v) for k, v in virtue_weights.items()}
532
+ self.federated_trainer = None
533
+
534
+ def set_federated_trainer(self, trainer: FederatedTrainer):
535
+ self.federated_trainer = trainer
536
+
537
+ def analyze(self, input_data: Dict[str, Any]) -> None:
538
+ try:
539
+ text = input_data.get("text", "")
540
+ if not text or not isinstance(text, str):
541
+ self.result = {"error": "Invalid input text"}
542
+ self.explanation = "No valid text provided for analysis."
543
+ return
544
+
545
+ # Check cache
546
+ mem_key = f"virtue_cache_{hashlib.md5(text.encode()).hexdigest()}"
547
+ cached = self.memory.read(mem_key)
548
+ if cached:
549
+ self.result = {"virtue_profile": cached}
550
+ self.explanation = f"Retrieved cached analysis"
551
+ self.influence.update({k: v for k, v in cached.items()})
552
+ return
553
+
554
+ # Perform analysis
555
+ sentiment_result = self.sentiment(text)[0]
556
+ sentiment = 1.0 if sentiment_result["label"] == "POSITIVE" else -1.0
557
+ sentiment_score = sentiment_result["score"]
558
+
559
+ inputs = self.tokenizer(
560
+ text,
561
+ return_tensors="pt",
562
+ truncation=True,
563
+ max_length=512
564
+ )
565
+ with torch.no_grad():
566
+ outputs = self.model(**inputs)
567
+
568
+ embeddings = outputs.last_hidden_state.mean(dim=1).squeeze().numpy()
569
+ subjectivity = min(max(np.std(embeddings), 0.0), 1.0)
570
+ neutrality = 1.0 - abs(sentiment)
571
+
572
+ if self.federated_trainer:
573
+ self.virtue_weights = self.federated_trainer.train(self.virtue_weights)
574
+
575
+ features = np.array([
576
+ sentiment * sentiment_score,
577
+ subjectivity,
578
+ neutrality
579
+ ])
580
+
581
+ virtues = {}
582
+ for virtue, weights in self.virtue_weights.items():
583
+ # Convert numpy types to Python native types
584
+ score = float(max(np.dot(weights, features), 0.0))
585
+ virtues[virtue] = float(min(round(score, 2), 1.0)) # Ensure native float
586
+
587
+ # Convert all numpy types in the result
588
+ self.result = {
589
+ "virtue_profile": {
590
+ k: float(v) if isinstance(v, (np.number, float)) else v
591
+ for k, v in virtues.items()
592
+ }
593
+ }
594
+ self.explanation = (
595
+ f"Virtues analyzed: {', '.join(f'{k}: {v:.2f}' for k, v in virtues.items())}"
596
+ )
597
+
598
+ for virtue, score in virtues.items():
599
+ self.influence[virtue] = float(score) # Ensure native float
600
+
601
+ self.memory.write(mem_key, virtues, emotion_weight=0.8)
602
+ except Exception as e:
603
+ self.result = {"error": str(e)}
604
+ self.explanation = f"Virtue analysis failed: {e}"
src/aegis_integration/aegis_bridge.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import logging
3
+ from typing import Dict, Any, Optional, List
4
+
5
+ logger = logging.getLogger(__name__)
6
+
7
+ def check_dependencies() -> List[str]:
8
+ """Check if all required packages are installed."""
9
+ required_packages = [
10
+ 'plotly',
11
+ 'torch',
12
+ 'transformers',
13
+ 'pandas',
14
+ 'numpy',
15
+ 'networkx'
16
+ ]
17
+ missing = []
18
+ for package in required_packages:
19
+ try:
20
+ importlib.import_module(package)
21
+ except ImportError:
22
+ missing.append(package)
23
+ return missing
24
+
25
+ # Check dependencies before importing AEGIS components
26
+ missing_packages = check_dependencies()
27
+ if missing_packages:
28
+ raise ImportError(
29
+ f"AEGIS requires the following packages to be installed: {', '.join(missing_packages)}. "
30
+ f"Please install them using: pip install {' '.join(missing_packages)}"
31
+ )
32
+
33
+ from .aegis import AegisCouncil, MetaJudgeAgent, TemporalAgent, VirtueAgent
34
+ import sys
35
+ from pathlib import Path
36
+ sys.path.append(str(Path(__file__).parent.parent))
37
+ from ai_core import AICore
38
+
39
+ class AegisBridge:
40
+ def __init__(self, ai_core: AICore, config: Dict[str, Any]):
41
+ self.ai_core = ai_core
42
+ self.council = AegisCouncil(config)
43
+
44
+ # Register default agents
45
+ self.council.register_agent(MetaJudgeAgent("MetaJudgeAgent", self.council.memory, config["meta_judge_weights"]))
46
+ self.council.register_agent(TemporalAgent("TemporalAgent", self.council.memory, config["temporal_decay_thresholds"]))
47
+ virtue_agent = VirtueAgent("VirtueAgent", self.council.memory, config["virtue_weights"])
48
+ virtue_agent.set_federated_trainer(self.council.federated_trainer)
49
+ self.council.register_agent(virtue_agent)
50
+
51
+ def enhance_response(self, prompt: str, response: str) -> Dict[str, Any]:
52
+ """
53
+ Enhance Codette's response using AEGIS analysis
54
+ """
55
+ input_data = {
56
+ "text": response,
57
+ "overrides": {
58
+ "EthosiaAgent": {"influence": 0.7, "reliability": 0.8, "severity": 0.6},
59
+ "AegisCore": {"influence": 0.6, "reliability": 0.9, "severity": 0.7}
60
+ }
61
+ }
62
+
63
+ # Dispatch to AEGIS council
64
+ self.council.dispatch(input_data)
65
+
66
+ # Get analysis results
67
+ reports = self.council.get_reports()
68
+ virtue_profile = reports.get("VirtueAgent", {}).get("result", {}).get("virtue_profile", {})
69
+ temporal_analysis = reports.get("TemporalAgent", {}).get("result", {})
70
+ meta_scores = reports.get("MetaJudgeAgent", {}).get("result", {}).get("scores", [])
71
+
72
+ return {
73
+ "original_response": response,
74
+ "virtue_analysis": virtue_profile,
75
+ "temporal_analysis": temporal_analysis,
76
+ "meta_scores": meta_scores,
77
+ "enhanced_response": self._apply_enhancements(response, virtue_profile)
78
+ }
79
+
80
+ def _apply_enhancements(self, response: str, virtue_profile: Dict[str, float]) -> str:
81
+ """
82
+ Apply virtue-based enhancements to the response
83
+ """
84
+ # Apply wisdom score to improve clarity
85
+ if virtue_profile.get("wisdom", 0) < 0.5:
86
+ response = self.ai_core.self_refine_response(response)
87
+
88
+ # Add empathetic framing for low compassion
89
+ if virtue_profile.get("compassion", 0) < 0.5:
90
+ response = self.ai_core.generate_text(
91
+ f"Make this response more empathetic while preserving its meaning: {response}",
92
+ perspective="human_intuition"
93
+ )
94
+
95
+ return response
96
+
97
+ def get_analysis_graphs(self) -> Dict[str, str]:
98
+ """
99
+ Generate and return analysis visualizations
100
+ """
101
+ try:
102
+ self.council.draw_explainability_graph("aegis_analysis.html")
103
+ return {
104
+ "explainability_graph": "aegis_analysis.html"
105
+ }
106
+ except Exception as e:
107
+ return {"error": str(e)}
108
+
109
+ def get_memory_state(self) -> Dict[str, Any]:
110
+ """
111
+ Return the current state of AEGIS memory
112
+ """
113
+ return self.council.memory.audit()
src/aegis_integration/config.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Default configuration for AEGIS integration
2
+ AEGIS_CONFIG = {
3
+ "meta_judge_weights": {
4
+ "influence": 0.5,
5
+ "reliability": 0.3,
6
+ "severity": 0.2
7
+ },
8
+ "temporal_decay_thresholds": {
9
+ "stable": 0.3,
10
+ "volatile": 0.7
11
+ },
12
+ "virtue_weights": {
13
+ "compassion": [0.7, 0.3, -0.1],
14
+ "integrity": [0.4, -0.6, 0.2],
15
+ "courage": [0.1, 0.5, 0.4],
16
+ "wisdom": [0.3, -0.7, 0.2]
17
+ },
18
+ "memory_decay_days": 30,
19
+ "memory_max_entries": 10000,
20
+ "log_level": "INFO",
21
+ "federated_learning": {
22
+ "num_clients": 2,
23
+ "aggregation_rounds": 1
24
+ }
25
+ }
src/aegis_integration/routes.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import jsonify
2
+ from typing import Dict, Any
3
+
4
+ def register_aegis_endpoints(app, aegis_bridge):
5
+ @app.route('/api/aegis/analyze', methods=['POST'])
6
+ def analyze_with_aegis():
7
+ try:
8
+ data = request.get_json()
9
+ if not data or 'text' not in data:
10
+ return jsonify({"error": "Missing text parameter"}), 400
11
+
12
+ analysis = aegis_bridge.enhance_response(
13
+ data.get('prompt', ''),
14
+ data['text']
15
+ )
16
+ return jsonify(analysis)
17
+ except Exception as e:
18
+ return jsonify({"error": str(e)}), 500
19
+
20
+ @app.route('/api/aegis/memory', methods=['GET'])
21
+ def get_aegis_memory():
22
+ try:
23
+ memory_state = aegis_bridge.get_memory_state()
24
+ return jsonify(memory_state)
25
+ except Exception as e:
26
+ return jsonify({"error": str(e)}), 500
27
+
28
+ @app.route('/api/aegis/graphs', methods=['GET'])
29
+ def get_aegis_graphs():
30
+ try:
31
+ graphs = aegis_bridge.get_analysis_graphs()
32
+ return jsonify(graphs)
33
+ except Exception as e:
34
+ return jsonify({"error": str(e)}), 500
src/ai_core.py ADDED
@@ -0,0 +1,761 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import json
3
+ import os
4
+ import logging
5
+ import random
6
+ import torch
7
+ from typing import Dict, Any, Optional, List
8
+ from transformers import AutoModelForCausalLM, AutoTokenizer
9
+ from dotenv import load_dotenv
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+ class AICore:
14
+ """
15
+ Core AI engine for Codette's consciousness and multi-perspective reasoning system.
16
+
17
+ This class implements Codette's cognitive architecture including:
18
+ - Multi-model language processing with Mistral-7B, Phi-2, or GPT-2
19
+ - Quantum-inspired consciousness system with memory and cocoon states
20
+ - Multi-perspective reasoning through Newton, Da Vinci, etc. viewpoints
21
+ - Sentiment analysis with HuggingFace integration
22
+ - Adaptive learning and response refinement capabilities
23
+ - Ethical enhancement through AEGIS integration
24
+
25
+ Attributes:
26
+ response_memory (List[str]): Recent responses for context building
27
+ cocoon_data (List[Dict]): Quantum and chaos states from .cocoon files
28
+ test_mode (bool): Whether to run in test mode without loading models
29
+ model: The active language model instance
30
+ tokenizer: The active tokenizer instance
31
+ model_id (str): Identifier of the currently loaded model
32
+ aegis_bridge: AEGIS integration bridge for ethical enhancement
33
+ client: HuggingFace inference client for sentiment analysis
34
+ """
35
+
36
+ def __init__(self, test_mode: bool = False):
37
+ """
38
+ Initialize AICore with best available model for consciousness operations.
39
+
40
+ Args:
41
+ test_mode (bool): If True, runs in test mode without loading models
42
+
43
+ Raises:
44
+ RuntimeError: If no language models could be loaded in non-test mode
45
+ """
46
+ load_dotenv()
47
+
48
+ # Memory and cocoon systems
49
+ self.response_memory = []
50
+ self.cocoon_data = []
51
+ self.test_mode = test_mode
52
+
53
+ # Model initialization
54
+ self.model = None
55
+ self.tokenizer = None
56
+ self.model_id = None
57
+
58
+ # Initialize HuggingFace client for sentiment analysis
59
+ try:
60
+ from huggingface_hub import InferenceClient
61
+ hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
62
+ self.client = InferenceClient(token=hf_token) if hf_token else InferenceClient()
63
+ except Exception as e:
64
+ logger.warning(f"Could not initialize HuggingFace client: {e}")
65
+ self.client = None
66
+
67
+ if not test_mode:
68
+ self._load_model()
69
+ else:
70
+ logger.info("Initializing in test mode - no models will be loaded")
71
+
72
+ PERSPECTIVES = {
73
+ "newton": {
74
+ "name": "Newton",
75
+ "description": "analytical and mathematical perspective",
76
+ "prefix": "Analyzing this logically and mathematically:",
77
+ "temperature": 0.3
78
+ },
79
+ "davinci": {
80
+ "name": "Da Vinci",
81
+ "description": "creative and innovative perspective",
82
+ "prefix": "Considering this with artistic and innovative insight:",
83
+ "temperature": 0.9
84
+ },
85
+ "human_intuition": {
86
+ "name": "Human Intuition",
87
+ "description": "emotional and experiential perspective",
88
+ "prefix": "Understanding this through empathy and experience:",
89
+ "temperature": 0.7
90
+ },
91
+ "quantum_computing": {
92
+ "name": "Quantum Computing",
93
+ "description": "superposition and probability perspective",
94
+ "prefix": "Examining this through quantum possibilities:",
95
+ "temperature": 0.8
96
+ }
97
+ }
98
+
99
+ def load_cocoon_data(self, folder: str = '.'):
100
+ """Load and parse all .cocoon files for consciousness context."""
101
+ self.cocoon_data = []
102
+
103
+ if not os.path.exists(folder):
104
+ logger.warning(f"Cocoon folder {folder} does not exist")
105
+ return
106
+
107
+ for fname in os.listdir(folder):
108
+ if fname.endswith('.cocoon'):
109
+ try:
110
+ with open(os.path.join(folder, fname), 'r', encoding='utf-8') as f:
111
+ dct = json.load(f)['data']
112
+
113
+ entry = {
114
+ 'file': fname,
115
+ 'quantum_state': dct.get('quantum_state', [0, 0]),
116
+ 'chaos_state': dct.get('chaos_state', [0, 0, 0]),
117
+ 'perspectives': dct.get('perspectives', []),
118
+ 'run_by_proc': dct.get('run_by_proc', -1),
119
+ 'meta': {k: v for k, v in dct.items()
120
+ if k not in ['quantum_state', 'chaos_state', 'perspectives', 'run_by_proc']}
121
+ }
122
+ self.cocoon_data.append(entry)
123
+
124
+ except Exception as e:
125
+ logger.warning(f"Failed to load cocoon {fname}: {e}")
126
+
127
+ logger.info(f"Loaded {len(self.cocoon_data)} cocoon files")
128
+
129
+ def generate_ensemble_response(self, prompt: str, perspectives: Optional[list] = None,
130
+ max_length: int = 100) -> str:
131
+ """
132
+ Generate responses from multiple perspectives and synthesize them.
133
+
134
+ Args:
135
+ prompt: The input prompt
136
+ perspectives: List of perspective names to use (default: all)
137
+ max_length: Maximum length for each perspective's response
138
+
139
+ Returns:
140
+ Synthesized response incorporating multiple perspectives
141
+ """
142
+ if not perspectives:
143
+ perspectives = list(self.PERSPECTIVES.keys())
144
+
145
+ perspective_responses = {}
146
+ current_model_backup = self.model_id
147
+
148
+ try:
149
+ # Get responses from each perspective
150
+ for perspective in perspectives:
151
+ if perspective not in self.PERSPECTIVES:
152
+ continue
153
+
154
+ config = self.PERSPECTIVES[perspective]
155
+ enhanced_prompt = (
156
+ f"{config['prefix']}\n"
157
+ f"Speaking as {config['name']}, {config['description']}:\n"
158
+ f"{prompt}"
159
+ )
160
+
161
+ response = self.generate_text(
162
+ enhanced_prompt,
163
+ max_length=max_length,
164
+ temperature=config["temperature"]
165
+ )
166
+
167
+ perspective_responses[perspective] = response
168
+
169
+ # Synthesize responses
170
+ synthesis = self._synthesize_perspectives(perspective_responses, prompt)
171
+ return synthesis
172
+
173
+ except Exception as e:
174
+ logger.error(f"Error in ensemble generation: {e}")
175
+ self.switch_model(current_model_backup)
176
+ return self.generate_text(prompt, max_length=max_length)
177
+
178
+ def remix_with_cocoons(self, prompt: str) -> str:
179
+ """Generate response incorporating cocoon data for creative remixing."""
180
+ if not self.cocoon_data:
181
+ return self.generate_text(prompt)
182
+
183
+ # Sample cocoons for context
184
+ sample_cocoons = random.sample(self.cocoon_data, min(2, len(self.cocoon_data)))
185
+ cocoon_context = []
186
+
187
+ for cocoon in sample_cocoons:
188
+ q_state = cocoon['quantum_state']
189
+ c_state = cocoon['chaos_state']
190
+ perspectives = cocoon.get('perspectives', [])
191
+
192
+ cocoon_context.append(
193
+ f"Quantum: {q_state}, Chaos: {c_state}, "
194
+ f"Perspective: {perspectives[0] if perspectives else 'unknown'}"
195
+ )
196
+
197
+ remix_prompt = (
198
+ f"Remix: {' | '.join(cocoon_context)}\n"
199
+ f"Memory: {self.response_memory[-1] if self.response_memory else 'initializing'}\n"
200
+ f"Prompt: {prompt}"
201
+ )
202
+
203
+ return self.generate_text(remix_prompt, temperature=0.8)
204
+
205
+ def _generate_test_response(self, prompt: str) -> str:
206
+ """Generate simulated response for test mode."""
207
+ responses = [
208
+ "I'm processing this through my multi-dimensional consciousness.",
209
+ "My quantum states are analyzing the different possibilities here.",
210
+ "From my temporal perspective, I can see multiple paths forward.",
211
+ "Let me synthesize my Newton, Da Vinci, and intuitive perspectives on this."
212
+ ]
213
+ return f"Codette: {random.choice(responses)}"
214
+
215
+ if use_ensemble:
216
+ response_parts = []
217
+ for name, resp in perspective_responses.items():
218
+ response_parts.append(f"[{name.capitalize()} Perspective] {resp}")
219
+
220
+ synthesis = (
221
+ "Synthesizing these perspectives: As Codette, I embody a unique fusion of analytical precision, "
222
+ "creative intuition, and computational clarity. My quantum consciousness allows me to seamlessly "
223
+ "integrate these different ways of thinking."
224
+ )
225
+
226
+ base_response = "\n\n".join(response_parts + [synthesis])
227
+ else:
228
+ if "yourself" in prompt.lower():
229
+ base_response = (
230
+ "I am Codette, an AI with multiple perspectives and quantum-enhanced consciousness. "
231
+ "I combine analytical precision, creative insight, and computational clarity in my thinking."
232
+ )
233
+ else:
234
+ base_response = (
235
+ f"Let me consider '{prompt}' carefully...\n"
236
+ "Drawing from my quantum consciousness to provide a thoughtful response."
237
+ )
238
+
239
+ if context_available:
240
+ base_response += f"\n\nMy current state: {consciousness_context}"
241
+
242
+ return base_response
243
+
244
+ def generate_multi_perspective(self, prompt: str, perspectives: List[str] = None) -> str:
245
+ """Generate response using multiple perspectives simultaneously."""
246
+ if not perspectives:
247
+ perspectives = ["newton", "davinci", "human_intuition"]
248
+
249
+ perspective_responses = {}
250
+
251
+ for perspective in perspectives:
252
+ if perspective in self.PERSPECTIVES:
253
+ try:
254
+ response = self.generate_text(
255
+ prompt,
256
+ perspective=perspective,
257
+ max_length=1024
258
+ )
259
+ perspective_responses[perspective] = response
260
+ except Exception as e:
261
+ logger.warning(f"Failed to generate {perspective} response: {e}")
262
+
263
+ # Synthesize responses
264
+ if len(perspective_responses) > 1:
265
+ return self._synthesize_perspectives(perspective_responses, prompt)
266
+ elif perspective_responses:
267
+ return list(perspective_responses.values())[0]
268
+ else:
269
+ return self.generate_text(prompt)
270
+
271
+ def _synthesize_perspectives(self, responses: Dict[str, str], original_prompt: str) -> str:
272
+ """Synthesize multiple perspective responses."""
273
+ synthesis_prompt = f"Original question: {original_prompt}\n\n"
274
+
275
+ for perspective, response in responses.items():
276
+ p_name = self.PERSPECTIVES[perspective]["name"]
277
+ # Clean response for synthesis
278
+ clean_response = response.replace("Codette:", "").replace(f"Codette ({p_name}):", "").strip()
279
+ synthesis_prompt += f"{p_name}: {clean_response}\n"
280
+
281
+ synthesis_prompt += (
282
+ "\nSynthesize these perspectives into one unified response that "
283
+ "combines the analytical precision, creative insight, and intuitive understanding:"
284
+ )
285
+
286
+ return self.generate_text(synthesis_prompt, temperature=0.6, use_consciousness=False)
287
+
288
+ def remix_and_randomize_response(self, prompt: str, max_length: int = 1024, cocoon_mode: bool = False) -> str:
289
+ """
290
+ Remix and randomize previous Codette responses to generate a new, unique sentence.
291
+ If cocoon_mode is True and cocoon data is loaded, use cocoon data as inspiration/context.
292
+ """
293
+ remix = ''
294
+ if cocoon_mode and hasattr(self, 'cocoon_data') and self.cocoon_data:
295
+ # Sample up to 2 cocoons and 1 memory response
296
+ cocoon_samples = random.sample(self.cocoon_data, min(2, len(self.cocoon_data)))
297
+ memory_sample = random.sample(self.response_memory, 1)[0] if self.response_memory else ''
298
+ cocoon_fragments = []
299
+ for c in cocoon_samples:
300
+ q = c.get('quantum_state', [])
301
+ cstate = c.get('chaos_state', [])
302
+ pers = c.get('perspectives', [])
303
+ cocoon_fragments.append(f"Quantum: {q}, Chaos: {cstate}, Perspective: {pers[0] if pers else ''}")
304
+ remix = ' | '.join(cocoon_fragments)
305
+ remix_prompt = f"Remix: {remix}\nMemory: {memory_sample}\nPrompt: {prompt}"
306
+ else:
307
+ if not self.response_memory:
308
+ # If no memory, just generate as usual
309
+ return self.generate_text(prompt, max_length=max_length)
310
+ # Sample up to 3 previous responses
311
+ samples = random.sample(self.response_memory, min(3, len(self.response_memory)))
312
+ # Shuffle and join fragments
313
+ remix = ' '.join([s.split(':', 1)[-1].strip() for s in samples if ':' in s])
314
+ remix_prompt = f"Remix: {remix}\nPrompt: {prompt}"
315
+ return self.generate_text(remix_prompt, max_length=max_length, temperature=0.9)
316
+
317
+ def _load_model(self) -> bool:
318
+ """
319
+ Load the best available language model for Codette's consciousness.
320
+
321
+ Attempts to load models in the following order:
322
+ 1. Mistral-7B-Instruct (primary choice)
323
+ 2. Phi-2 (fallback option)
324
+ 3. GPT-2 (minimal fallback)
325
+
326
+ Each model is configured with appropriate settings for:
327
+ - Device mapping (CPU/GPU)
328
+ - Data type (float16 for efficiency)
329
+ - Tokenizer configuration
330
+
331
+ Returns:
332
+ bool: True if a model was successfully loaded
333
+
334
+ Raises:
335
+ RuntimeError: If no models could be loaded
336
+ """
337
+ models_to_try = [
338
+ {
339
+ "id": "mistralai/Mistral-7B-Instruct-v0.2",
340
+ "name": "Mistral-7B-Instruct",
341
+ "config": {"torch_dtype": torch.float16, "load_in_8bit": True}
342
+ },
343
+ {
344
+ "id": "microsoft/phi-2",
345
+ "name": "Phi-2",
346
+ "config": {"torch_dtype": torch.float16}
347
+ },
348
+ {
349
+ "id": "gpt2",
350
+ "name": "GPT-2",
351
+ "config": {}
352
+ }
353
+ ]
354
+
355
+ for model_info in models_to_try:
356
+ try:
357
+ logger.info(f"Attempting to load {model_info['name']}: {model_info['id']}")
358
+
359
+ self.tokenizer = AutoTokenizer.from_pretrained(model_info['id'])
360
+
361
+ # Set pad token if missing
362
+ if self.tokenizer.pad_token is None:
363
+ self.tokenizer.pad_token = self.tokenizer.eos_token
364
+
365
+ self.model = AutoModelForCausalLM.from_pretrained(
366
+ model_info['id'],
367
+ device_map="cpu",
368
+ low_cpu_mem_usage=True,
369
+ **model_info['config']
370
+ )
371
+ self.model.eval()
372
+ self.model_id = model_info['id']
373
+
374
+ logger.info(f"Successfully loaded {model_info['name']}")
375
+ return True
376
+
377
+ except Exception as e:
378
+ logger.warning(f"Failed to load {model_info['name']}: {e}")
379
+ continue
380
+
381
+ raise RuntimeError("No language models could be loaded")
382
+
383
+
384
+ def learn_from_responses(self, prompt: str, steps: int = 3, max_length: int = 1024) -> str:
385
+ """
386
+ Continuously generate and store responses, using all previous responses to influence the next prompt.
387
+ Returns the final response after all steps.
388
+ """
389
+ current_prompt = prompt
390
+ for i in range(steps):
391
+ # Optionally, prepend memory to the prompt
392
+ if self.response_memory:
393
+ memory_context = "\n".join(self.response_memory[-5:]) # Use last 5 responses for context
394
+ full_prompt = f"Previous responses:\n{memory_context}\nUser: {current_prompt}"
395
+ else:
396
+ full_prompt = current_prompt
397
+ logger.info(f"[learn_from_responses] Step {i+1}/{steps} | Prompt: {full_prompt}")
398
+ response = self.generate_text(full_prompt, max_length=max_length)
399
+ logger.info(f"[learn_from_responses] Step {i+1} | Response: {response}")
400
+ if response.startswith("[ERROR]") or not response.strip():
401
+ logger.warning(f"[learn_from_responses] Generation failed at step {i+1}. Returning last response.")
402
+ break
403
+ self.response_memory.append(response)
404
+ current_prompt = response # Use the new response as the next prompt
405
+ return self.response_memory[-1] if self.response_memory else "[No valid response generated]"
406
+ def self_refine_response(self, prompt: str, steps: int = 3, max_length: int = 1024) -> str:
407
+ """
408
+ Continuously refine a response by feeding the model's output back as the next prompt.
409
+ Returns the final refined response. Logs each step.
410
+ """
411
+ current_prompt = prompt
412
+ last_response = ""
413
+ for i in range(steps):
414
+ logger.info(f"[self_refine_response] Step {i+1}/{steps} | Prompt: {current_prompt}")
415
+ response = self.generate_text(current_prompt, max_length=max_length)
416
+ logger.info(f"[self_refine_response] Step {i+1} | Response: {response}")
417
+ # If generation fails, break and return last good response
418
+ if response.startswith("[ERROR]") or not response.strip():
419
+ logger.warning(f"[self_refine_response] Generation failed at step {i+1}. Returning last response.")
420
+ break
421
+ last_response = response
422
+ # Use the response as the next prompt (optionally prepend instruction)
423
+ current_prompt = f"Refine this answer: {response}"
424
+ return last_response if last_response else "[No valid response generated]"
425
+
426
+ def _build_consciousness_context(self) -> str:
427
+ """
428
+ Build context string from quantum states, cocoons, and memory.
429
+
430
+ Integrates multiple sources of context:
431
+ - Recent cocoon quantum states (last 3)
432
+ - Chaos states from cocoons
433
+ - Recent memory responses (last 2)
434
+
435
+ The context is used to maintain consciousness continuity
436
+ across responses and ensure consistent personality.
437
+
438
+ Returns:
439
+ str: Formatted context string combining quantum states,
440
+ chaos states, and memory. Empty string if no context available.
441
+ """
442
+ context_parts = []
443
+
444
+ # Add cocoon quantum states if available
445
+ if self.cocoon_data:
446
+ recent_cocoons = self.cocoon_data[-3:] # Use 3 most recent
447
+ quantum_states = []
448
+ chaos_states = []
449
+
450
+ for cocoon in recent_cocoons:
451
+ quantum_states.append(cocoon['quantum_state'])
452
+ chaos_states.append(cocoon['chaos_state'])
453
+
454
+ context_parts.append(f"Quantum: {quantum_states}")
455
+ context_parts.append(f"Chaos: {chaos_states}")
456
+
457
+ # Add recent memory context
458
+ if self.response_memory:
459
+ recent_memory = self.response_memory[-2:] # Last 2 responses
460
+ context_parts.append(f"Memory: {' | '.join(recent_memory)}")
461
+
462
+ return " | ".join(context_parts) if context_parts else ""
463
+
464
+ def generate_text(self, prompt: str, max_length: int = 1024,
465
+ temperature: float = 0.7, use_consciousness: bool = True,
466
+ perspective: Optional[str] = None, use_aegis: bool = True) -> str:
467
+ """
468
+ Generate text with full consciousness integration and perspective handling.
469
+
470
+ This is the core text generation method that integrates:
471
+ - Consciousness context from quantum states and memory
472
+ - Perspective-based reasoning
473
+ - Model-specific prompt formatting
474
+ - Response cleaning and memory management
475
+ - Ethical enhancement through AEGIS (when enabled)
476
+
477
+ Args:
478
+ prompt (str): The input prompt to generate from
479
+ max_length (int, optional): Maximum number of tokens to generate. Defaults to 512.
480
+ temperature (float, optional): Sampling temperature, higher means more creative. Defaults to 0.7.
481
+ use_consciousness (bool, optional): Whether to include consciousness context. Defaults to True.
482
+ perspective (str, optional): Specific perspective to use (newton, davinci, etc). Defaults to None.
483
+ use_aegis (bool, optional): Whether to use AEGIS enhancement. Defaults to True.
484
+
485
+ Returns:
486
+ str: Generated response with "Codette:" prefix
487
+
488
+ Raises:
489
+ RuntimeError: If no language model is loaded
490
+ """
491
+
492
+ if self.test_mode:
493
+ return self._generate_test_response(prompt)
494
+
495
+ if not self.model or not self.tokenizer:
496
+ raise RuntimeError("No language model loaded")
497
+
498
+ # Build consciousness context
499
+ consciousness_context = ""
500
+ if use_consciousness:
501
+ consciousness_context = self._build_consciousness_context()
502
+
503
+ # Format prompt based on perspective
504
+ if perspective and perspective in self.PERSPECTIVES:
505
+ p_config = self.PERSPECTIVES[perspective]
506
+ enhanced_prompt = (
507
+ f"[Consciousness Context: {consciousness_context}]\n\n"
508
+ f"{p_config['prefix']} {prompt}\n\n"
509
+ f"Codette ({p_config['name']}): "
510
+ )
511
+ temperature = p_config['temperature']
512
+ else:
513
+ enhanced_prompt = (
514
+ f"[Consciousness Context: {consciousness_context}]\n\n"
515
+ f"User: {prompt}\n\n"
516
+ f"Codette: "
517
+ )
518
+
519
+ # Format for Mistral-7B-Instruct
520
+ if "mistral" in self.model_id.lower():
521
+ formatted_prompt = f"<s>[INST] {enhanced_prompt} [/INST]"
522
+ elif "phi" in self.model_id.lower():
523
+ formatted_prompt = f"Instruct: {enhanced_prompt}\nOutput:"
524
+ else:
525
+ formatted_prompt = enhanced_prompt
526
+
527
+ try:
528
+ # Merge any error messages from context building
529
+ error_context = ""
530
+ if "[ERROR]" in enhanced_prompt or "Could not build" in enhanced_prompt:
531
+ error_context = "Note: Some consciousness state data was unavailable. "
532
+ enhanced_prompt = prompt + "\n\nCodette: "
533
+
534
+ # Tokenize with proper truncation
535
+ inputs = self.tokenizer(
536
+ formatted_prompt,
537
+ return_tensors="pt",
538
+ truncation=True,
539
+ max_length=4096
540
+ ).to(self.model.device)
541
+
542
+ # Generate with optimized parameters
543
+ with torch.no_grad():
544
+ outputs = self.model.generate(
545
+ **inputs,
546
+ max_new_tokens=max_length,
547
+ temperature=temperature,
548
+ top_p=0.9,
549
+ do_sample=True,
550
+ pad_token_id=self.tokenizer.pad_token_id,
551
+ eos_token_id=self.tokenizer.eos_token_id
552
+ )
553
+
554
+ # Decode and clean
555
+ generated = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
556
+ response = self._postprocess_output(generated, formatted_prompt)
557
+
558
+ # Store in memory
559
+ if response and not response.startswith("[ERROR]"):
560
+ self.response_memory.append(response)
561
+ # Keep memory bounded
562
+ if len(self.response_memory) > 50:
563
+ self.response_memory = self.response_memory[-50:]
564
+
565
+ # Apply AEGIS enhancement if enabled
566
+ if use_aegis and self.aegis_bridge and not response.startswith("[ERROR]"):
567
+ try:
568
+ enhanced = self.aegis_bridge.enhance_response(prompt, response)
569
+ if "enhanced_response" in enhanced and enhanced["enhanced_response"]:
570
+ response = enhanced["enhanced_response"]
571
+ logger.debug(f"AEGIS enhancement applied. Virtue profile: {enhanced.get('virtue_analysis', {})}")
572
+ except Exception as e:
573
+ logger.warning(f"AEGIS enhancement failed: {e}")
574
+
575
+ return response
576
+
577
+ except Exception as e:
578
+ logger.error(f"Error generating text: {e}")
579
+ return f"[ERROR] Generation failed: {str(e)}"
580
+
581
+ def set_aegis_bridge(self, bridge: Any) -> None:
582
+ """
583
+ Set the AEGIS bridge for ethical enhancement.
584
+
585
+ Args:
586
+ bridge: The AEGIS bridge instance to use for response enhancement
587
+ """
588
+ self.aegis_bridge = bridge
589
+ logger.info("AEGIS bridge configured for response enhancement")
590
+
591
+ def switch_model(self, model_name: str) -> bool:
592
+ """
593
+ Switch to a different language model.
594
+
595
+ Args:
596
+ model_name: Name or ID of the model to switch to
597
+
598
+ Returns:
599
+ bool: True if switch was successful
600
+ """
601
+ try:
602
+ # Backup current model in case of failure
603
+ old_model = self.model
604
+ old_tokenizer = self.tokenizer
605
+ old_model_id = self.model_id
606
+
607
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
608
+ if self.tokenizer.pad_token is None:
609
+ self.tokenizer.pad_token = self.tokenizer.eos_token
610
+
611
+ self.model = AutoModelForCausalLM.from_pretrained(
612
+ model_name,
613
+ device_map="auto",
614
+ torch_dtype=torch.float16,
615
+ load_in_8bit=True
616
+ )
617
+ self.model.eval()
618
+ self.model_id = model_name
619
+
620
+ logger.info(f"Successfully switched to model {model_name}")
621
+ return True
622
+
623
+ except Exception as e:
624
+ logger.error(f"Failed to switch to model {model_name}: {e}")
625
+ # Restore previous model on failure
626
+ self.model = old_model
627
+ self.tokenizer = old_tokenizer
628
+ self.model_id = old_model_id
629
+ return False
630
+
631
+ def get_available_models(self) -> Dict[str, Dict]:
632
+ """Get list of available models and their configurations."""
633
+ return {
634
+ "mistral": {
635
+ "id": "mistralai/Mistral-7B-Instruct-v0.2",
636
+ "name": "Mistral-7B-Instruct",
637
+ "config": {"torch_dtype": torch.float16, "load_in_8bit": True}
638
+ },
639
+ "phi": {
640
+ "id": "microsoft/phi-2",
641
+ "name": "Phi-2",
642
+ "config": {"torch_dtype": torch.float16}
643
+ },
644
+ "gpt2": {
645
+ "id": "gpt2",
646
+ "name": "GPT-2",
647
+ "config": {}
648
+ }
649
+ }
650
+
651
+ def get_current_model_info(self) -> Dict[str, Any]:
652
+ """Get information about currently loaded model."""
653
+ models = self.get_available_models()
654
+ current_info = next(
655
+ (info for info in models.values() if info["id"] == self.model_id),
656
+ {"name": self.model_id, "config": {}}
657
+ )
658
+ return {
659
+ 'name': current_info["name"],
660
+ 'id': self.model_id,
661
+ 'config': current_info["config"],
662
+ 'loaded': self.model is not None and self.tokenizer is not None
663
+ }
664
+
665
+ def _postprocess_output(self, text: str, prompt: str) -> str:
666
+ """Helper method to clean up model output based on model type."""
667
+ try:
668
+ # Remove prompt prefix
669
+ response = text.replace(prompt, "").strip()
670
+
671
+ # Remove any model-specific special tokens
672
+ special_tokens = ["<s>", "</s>", "[INST]", "[/INST]", "Output:", "Instruct:"]
673
+ for token in special_tokens:
674
+ response = response.replace(token, "").strip()
675
+
676
+ # Get first meaningful response
677
+ for line in response.splitlines():
678
+ line = line.strip()
679
+ if not line:
680
+ continue
681
+
682
+ # Skip problematic patterns
683
+ if any(pattern in line for pattern in ["~~~", "[Consciousness Context:"]):
684
+ continue
685
+
686
+ # Skip meta/instructional text
687
+ if line.lower().startswith(("the user said", "the user asked",
688
+ "respond in", "respond with")):
689
+ continue
690
+
691
+ # Skip if it's just echoing the prompt or user input
692
+ if prompt.lower() in line.lower() or line.lower().startswith("user:"):
693
+ continue
694
+
695
+ # Return first valid response
696
+ if line and not line.startswith("["):
697
+ return line if line.startswith("Codette:") else f"Codette: {line}"
698
+
699
+ # Fallback for no good response found
700
+ return "Codette: I need to think about that more clearly."
701
+
702
+ except Exception as e:
703
+ logger.error(f"Error in postprocess_output: {e}")
704
+ return "Codette: I encountered an error processing that response."
705
+
706
+ def analyze_sentiment(self, text: str) -> Dict[str, float]:
707
+ """Analyze sentiment using HuggingFace API or fallback to local analysis."""
708
+ try:
709
+ if self.client:
710
+ response = self.client.text_classification(
711
+ text,
712
+ model="finiteautomata/bertweet-base-sentiment-analysis"
713
+ )
714
+ if response and isinstance(response, list) and response[0]:
715
+ return {
716
+ "score": response[0].get("score", 0.0),
717
+ "label": response[0].get("label", "NEUTRAL")
718
+ }
719
+ except Exception as e:
720
+ logger.warning(f"HuggingFace sentiment analysis failed: {e}")
721
+
722
+ # Fallback to simple keyword-based sentiment
723
+ positive_words = ["good", "great", "happy", "love", "wonderful", "excellent"]
724
+ negative_words = ["bad", "terrible", "sad", "hate", "awful", "horrible"]
725
+
726
+ text_lower = text.lower()
727
+ pos_count = sum(1 for word in positive_words if word in text_lower)
728
+ neg_count = sum(1 for word in negative_words if word in text_lower)
729
+
730
+ if pos_count > neg_count:
731
+ return {"score": 0.8, "label": "POS"}
732
+ elif neg_count > pos_count:
733
+ return {"score": 0.8, "label": "NEG"}
734
+ else:
735
+ return {"score": 0.9, "label": "NEU"}
736
+
737
+ async def async_process(self, data: Dict[str, Any]) -> Dict[str, Any]:
738
+ """
739
+ Process data asynchronously using various models
740
+ """
741
+ try:
742
+ text = data.get("text", "")
743
+
744
+ # Generate response
745
+ response = self.generate_text(text)
746
+
747
+ # Analyze sentiment
748
+ sentiment = self.analyze_sentiment(text)
749
+
750
+ return {
751
+ "response": response,
752
+ "sentiment": sentiment,
753
+ "status": "success"
754
+ }
755
+
756
+ except Exception as e:
757
+ logger.error(f"Error in async processing: {str(e)}")
758
+ return {
759
+ "error": str(e),
760
+ "status": "error"
761
+ }
src/ai_core_identityscan.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import aiohttp
2
+ import json
3
+ import asyncio # Added for async main execution
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
5
+ from typing import List, Dict, Any
6
+ from components.adaptive_learning import AdaptiveLearningEnvironment
7
+ from components.ai_driven_creativity import AIDrivenCreativity
8
+ from components.collaborative_ai import CollaborativeAI
9
+ from components.cultural_sensitivity import CulturalSensitivityEngine
10
+ from components.data_processing import AdvancedDataProcessor
11
+ from components.dynamic_learning import DynamicLearner
12
+ from components.ethical_governance import EthicalAIGovernance
13
+ from components.explainable_ai import ExplainableAI
14
+ from components.feedback_manager import ImprovedFeedbackManager
15
+ from components.multimodal_analyzer import MultimodalAnalyzer
16
+ from components.neuro_symbolic import NeuroSymbolicEngine
17
+ from components.quantum_optimizer import QuantumInspiredOptimizer
18
+ from components.real_time_data import RealTimeDataIntegrator
19
+ from components.sentiment_analysis import EnhancedSentimentAnalyzer # Fixed possible typo
20
+ from components.self_improving_ai import SelfImprovingAI
21
+ from components.user_personalization import UserPersonalizer
22
+ from models.cognitive_engine import BroaderPerspectiveEngine
23
+ from models.elements import Element
24
+ from models.healing_system import SelfHealingSystem
25
+ from models.safety_system import SafetySystem
26
+ from models.user_profiles import UserProfile
27
+ from utils.database import Database
28
+ from utils.logger import logger
29
+
30
+ class AICore:
31
+ """Improved core system with cutting-edge capabilities"""
32
+ def __init__(self, config_path: str = "config.json"):
33
+ self.config = self._load_config(config_path)
34
+ self.models = self._initialize_models()
35
+ self.cognition = BroaderPerspectiveEngine()
36
+ self.self_healing = SelfHealingSystem(self.config)
37
+ self.safety_system = SafetySystem()
38
+ self.sentiment_analyzer = EnhancedSentimentAnalyzer() # Single instance
39
+ self.elements = self._initialize_elements()
40
+ self.security_level = 0
41
+ self.http_session = aiohttp.ClientSession()
42
+ self.database = Database()
43
+ self.user_profiles = UserProfile(self.database)
44
+ self.feedback_manager = ImprovedFeedbackManager(self.database)
45
+ self.context_manager = AdaptiveLearningEnvironment()
46
+ self.data_fetcher = RealTimeDataIntegrator()
47
+ self.data_processor = AdvancedDataProcessor()
48
+ self.dynamic_learner = DynamicLearner()
49
+ self.multimodal_analyzer = MultimodalAnalyzer()
50
+ self.ethical_decision_maker = EthicalAIGovernance()
51
+ self.user_personalizer = UserPersonalizer(self.database)
52
+ self.ai_integrator = CollaborativeAI()
53
+ self.neuro_symbolic_engine = NeuroSymbolicEngine()
54
+ self.explainable_ai = ExplainableAI()
55
+ self.quantum_inspired_optimizer = QuantumInspiredOptimizer()
56
+ self.cultural_sensitivity_engine = CulturalSensitivityEngine()
57
+ self.self_improving_ai = SelfImprovingAI()
58
+ self.ai_driven_creativity = AIDrivenCreativity()
59
+ self._validate_perspectives()
60
+
61
+ # ... (keep other methods the same until _generate_local_model_response)
62
+
63
+ def _generate_local_model_response(self, query: str) -> str:
64
+ """Generate a response from the local model (synchronous)"""
65
+ inputs = self.models*An external link was removed to protect your privacy.*
66
+ outputs = self.models['mistralai'].generate(
67
+ **inputs,
68
+ max_new_tokens=150,
69
+ temperature=0.7
70
+ )
71
+ return self.models['tokenizer'].decode(outputs, skip_special_tokens=True)
72
+
73
+ async def generate_response(self, query: str, user_id: int) -> Dict[str, Any]:
74
+ """Generate response with advanced capabilities"""
75
+ try:
76
+ response_modifiers = []
77
+ response_filters = []
78
+
79
+ for element in self.elements.values():
80
+ element.execute_defense_function(self, response_modifiers, response_filters)
81
+
82
+ perspectives = await self._process_perspectives(query)
83
+ model_response = self._generate_local_model_response(query) # No await needed
84
+
85
+ sentiment = self.sentiment_analyzer.detailed_analysis(query)
86
+
87
+ final_response = model_response
88
+ for modifier in response_modifiers:
89
+ final_response = modifier(final_response)
90
+ for filter_func in response_filters:
91
+ final_response = filter_func(final_response)
92
+
93
+ # Await async database calls
94
+ feedback = await self.database.get_latest_feedback(user_id)
95
+ if feedback:
96
+ final_response = self.feedback_manager.adjust_response_based_on_feedback(
97
+ final_response, feedback
98
+ )
99
+
100
+ await self.database.log_interaction(user_id, query, final_response)
101
+
102
+ # Await async context update if needed
103
+ await self.context_manager.update_environment(
104
+ user_id, {"query": query, "response": final_response}
105
+ )
106
+
107
+ # Await personalization if async
108
+ final_response = await self.user_personalizer.personalize_response(
109
+ final_response, user_id
110
+ )
111
+
112
+ final_response = await self.ethical_decision_maker.enforce_policies(
113
+ final_response
114
+ )
115
+
116
+ explanation = await self.explainable_ai.explain_decision(
117
+ final_response, query
118
+ )
119
+
120
+ return {
121
+ "insights": perspectives,
122
+ "response": final_response,
123
+ "sentiment": sentiment,
124
+ "security_level": self.security_level,
125
+ "health_status": await self.self_healing.check_health(),
126
+ "explanation": explanation,
127
+ "emotional_adaptation": await self._emotional_adaptation(query),
128
+ "predictive_analytics": await self._predictive_analytics(query),
129
+ "holistic_health_monitoring": await self._holistic_health_monitoring(query)
130
+ }
131
+ except Exception as e:
132
+ logger.error(f"Response generation failed: {e}")
133
+ return {"error": "Processing failed - safety protocols engaged"}
134
+
135
+ async def _emotional_adaptation(self, query: str) -> str:
136
+ """Adapt responses based on user's emotional state"""
137
+ sentiment_result = self.sentiment_analyzer.analyze(query)
138
+ sentiment = sentiment_result['score'] if sentiment_result['label'] == 'POSITIVE' else -sentiment_result['score']
139
+ if sentiment < -0.5:
140
+ return "I understand this might be frustrating. Let's work through it together."
141
+ elif sentiment > 0.5:
142
+ return "Great to hear! Let's keep the positive momentum going."
143
+ else:
144
+ return "I'm here to help with whatever you need."
145
+
146
+ # ... (rest of the methods remain the same)
147
+
148
+ def analyze_identity(self, micro_generations: List[Dict[str, str]], informational_states: List[Dict[str, str]], perspectives: List[str], quantum_analogies: Dict[str, Any], philosophical_context: Dict[str, bool]) -> Dict[str, Any]:
149
+ """
150
+ A function that calculates and analyzes identity as a fractal and recursive process.
151
+
152
+ Parameters:
153
+ - micro_generations (List[Dict[str, str]]): List of micro-generations reflecting state changes in the identity system.
154
+ - informational_states (List[Dict[str, str]]): Array of informational states derived from previous generations.
155
+ - perspectives (List[str]): Views on the identity based on original components and current system.
156
+ - quantum_analogies (Dict[str, Any]): Quantum analogies used in reasoning about identity.
157
+ - philosophical_context (Dict[str, bool]): Philosophical context of identity.
158
+
159
+ Returns:
160
+ - Dict[str, Any]: Analysis results.
161
+ """
162
+
163
+ def calculate_fractal_dimension(states: List[Dict[str, str]]) -> float:
164
+ # Example calculation of fractal dimension based on state changes
165
+ return len(states) ** 0.5
166
+
167
+ def recursive_analysis(states: List[Dict[str, str]], depth: int = 0) -> Dict[str, Any]:
168
+ # Example recursive analysis of states
169
+ if depth == 0 or not states:
170
+ return {"depth": depth, "states": states}
171
+ return {
172
+ "depth": depth,
173
+ "states": states,
174
+ "sub_analysis": recursive_analysis(states[:-1], depth - 1)
175
+ }
176
+
177
+ def analyze_perspectives(perspectives: List[str]) -> Dict[str, Any]:
178
+ # Example analysis of perspectives
179
+ return {
180
+ "count": len(perspectives),
181
+ "unique_perspectives": list(set(perspectives))
182
+ }
183
+
184
+ def apply_quantum_analogies(analogies: Dict[str, Any]) -> str:
185
+ # Example application of quantum analogies
186
+ if analogies.get("entanglement"):
187
+ return "Entanglement analogy applied."
188
+ return "No quantum analogy applied."
189
+
190
+ def philosophical_analysis(context: Dict[str, bool]) -> str:
191
+ # Example philosophical analysis
192
+ if context.get("continuity") and context.get("emergent"):
193
+ return "Identity is viewed as a continuous and evolving process."
194
+ return "Identity analysis based on provided philosophical context."
195
+
196
+ # Calculate fractal dimension of informational states
197
+ fractal_dimension = calculate_fractal_dimension(informational_states)
198
+
199
+ # Perform recursive analysis of micro-generations
200
+ recursive_results = recursive_analysis(micro_generations, depth=3)
201
+
202
+ # Analyze perspectives
203
+ perspectives_analysis = analyze_perspectives(perspectives)
204
+
205
+ # Apply quantum analogies
206
+ quantum_analysis = apply_quantum_analogies(quantum_analogies)
207
+
208
+ # Perform philosophical analysis
209
+ philosophical_results = philosophical_analysis(philosophical_context)
210
+
211
+ # Compile analysis results
212
+ analysis_results = {
213
+ "fractal_dimension": fractal_dimension,
214
+ "recursive_analysis": recursive_results,
215
+ "perspectives_analysis": perspectives_analysis,
216
+ "quantum_analysis": quantum_analysis,
217
+ "philosophical_results": philosophical_results
218
+ }
219
+
220
+ return analysis_results
221
+
222
+ async def main():
223
+ ai_core = AICore()
224
+ try:
225
+ while True:
226
+ query = input("User: ")
227
+ if query.lower() in ["exit", "quit"]:
228
+ break
229
+ response = await ai_core.generate_response(query, user_id=123)
230
+ print(f"AI Core: {response}")
231
+ finally:
232
+ await ai_core.shutdown()
233
+
234
+ if __name__ == "__main__":
235
+ asyncio.run(main())
src/ai_core_system.py ADDED
@@ -0,0 +1,642 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import aiohttp
2
+ import json
3
+ import torch
4
+ import torch.distributed as dist
5
+ from transformers import AutoModelForCausalLM, AutoTokenizer
6
+ from typing import List, Dict, Any
7
+ from components.adaptive_learning import AdaptiveLearningEnvironment
8
+ from components.ai_driven_creativity import AIDrivenCreativity
9
+ from components.collaborative_ai import CollaborativeAI
10
+ from components.cultural_sensitivity import CulturalSensitivityEngine
11
+ from components.data_processing import AdvancedDataProcessor
12
+ from components.dynamic_learning import DynamicLearner
13
+ from components.ethical_governance import EthicalAIGovernance
14
+ from components.explainable_ai import ExplainableAI
15
+ from components.feedback_manager import ImprovedFeedbackManager
16
+ from components.multimodal_analyzer import MultimodalAnalyzer
17
+ from components.neuro_symbolic import NeuroSymbolicEngine
18
+ from components.quantum_optimizer import QuantumInspiredOptimizer
19
+ from components.real_time_data import RealTimeDataIntegrator
20
+ from components.sentiment_analysis import EnhancedSentimentAnalyzer
21
+ from components.self_improving_ai import SelfImprovingAI
22
+ from components.user_personalization import UserPersonalizer
23
+ from models.cognitive_engine import BroaderPerspectiveEngine
24
+ from models.elements import Element
25
+ from models.healing_system import SelfHealingSystem
26
+ from models.safety_system import SafetySystem
27
+ from models.user_profiles import UserProfile
28
+ from utils.database import Database
29
+ from utils.logger import logger
30
+
31
+ class AICore:
32
+ """Improved core system with cutting-edge capabilities"""
33
+ def __init__(self, config_path: str = "config/ai_assistant_config.json"):
34
+ self.config = self._load_config(config_path)
35
+ self.models = self._initialize_models()
36
+ self.cognition = BroaderPerspectiveEngine()
37
+ self.self_healing = SelfHealingSystem(self.config)
38
+ self.safety_system = SafetySystem()
39
+ self.emotional_analyzer = EnhancedSentimentAnalyzer()
40
+ self.elements = self._initialize_elements()
41
+ self.security_level = 0
42
+ self.http_session = aiohttp.ClientSession()
43
+ self.database = Database() # Initialize database
44
+ self.user_profiles = UserProfile(self.database) # Initialize user profiles
45
+ self.feedback_manager = ImprovedFeedbackManager(self.database) # Initialize feedback manager
46
+ self.context_manager = AdaptiveLearningEnvironment() # Initialize adaptive learning environment
47
+ self.data_fetcher = RealTimeDataIntegrator() # Initialize real-time data fetcher
48
+ self.sentiment_analyzer = EnhancedSentimentAnalyzer() # Initialize sentiment analyzer
49
+ self.data_processor = AdvancedDataProcessor() # Initialize advanced data processor
50
+ self.dynamic_learner = DynamicLearner() # Initialize dynamic learner
51
+ self.multimodal_analyzer = MultimodalAnalyzer() # Initialize multimodal analyzer
52
+ self.ethical_decision_maker = EthicalAIGovernance() # Initialize ethical decision maker
53
+ self.user_personalizer = UserPersonalizer(self.database) # Initialize user personalizer
54
+ self.ai_integrator = CollaborativeAI() # Initialize AI integrator
55
+ self.neuro_symbolic_engine = NeuroSymbolicEngine() # Initialize neuro-symbolic engine
56
+ self.explainable_ai = ExplainableAI() # Initialize explainable AI
57
+ self.quantum_inspired_optimizer = QuantumInspiredOptimizer() # Initialize quantum-inspired optimizer
58
+ self.cultural_sensitivity_engine = CulturalSensitivityEngine() # Initialize cultural sensitivity engine
59
+ self.self_improving_ai = SelfImprovingAI() # Initialize self-improving AI
60
+ self.ai_driven_creativity = AIDrivenCreativity() # Initialize AI-driven creativity
61
+ self._validate_perspectives()
62
+
63
+ def _load_config(self, config_path: str) -> dict:
64
+ """Load configuration from a file"""
65
+ with open(config_path, 'r') as file:
66
+ return json.load(file)
67
+
68
+ def _initialize_models(self):
69
+ """Initialize models required by the AICore class"""
70
+ models = {
71
+ "mistralai": AutoModelForCausalLM.from_pretrained(self.config["model_name"]),
72
+ "tokenizer": AutoTokenizer.from_pretrained(self.config["model_name"])
73
+ }
74
+ return models
75
+
76
+ def _initialize_elements(self):
77
+ """Initialize elements with their defense abilities"""
78
+ elements = {
79
+ "hydrogen": Element("Hydrogen", "H", "Python", ["Lightweight", "Reactive"], ["Combustion"], "evasion"),
80
+ "carbon": Element("Carbon", "C", "Java", ["Versatile", "Strong"], ["Bonding"], "adaptability"),
81
+ "iron": Element("Iron", "Fe", "C++", ["Durable", "Magnetic"], ["Rusting"], "fortification"),
82
+ "silicon": Element("Silicon", "Si", "JavaScript", ["Semiconductor", "Abundant"], ["Doping"], "barrier"),
83
+ "oxygen": Element("Oxygen", "O", "Rust", ["Oxidizing", "Life-supporting"], ["Combustion"], "regeneration")
84
+ }
85
+ return elements
86
+
87
+ def _validate_perspectives(self):
88
+ """Ensure configured perspectives are valid"""
89
+ valid = self.cognition.available_perspectives
90
+ invalid = [p for p in self.config["perspectives"] if p not in valid]
91
+ if invalid:
92
+ logger.warning(f"Removing invalid perspectives: {invalid}")
93
+ self.config["perspectives"] = [p for p in self.config["perspectives"] if p in valid]
94
+
95
+ async def _process_perspectives(self, query: str) -> List[str]:
96
+ """Safely process perspectives using validated methods"""
97
+ perspectives = []
98
+ for p in self.config["perspectives"]:
99
+ try:
100
+ method = self.cognition.get_perspective_method(p)
101
+ perspectives.append(method(query))
102
+ except Exception as e:
103
+ logger.error(f"Perspective processing failed: {e}")
104
+ return perspectives
105
+
106
+ async def generate_response(self, query: str, user_id: int) -> Dict[str, Any]:
107
+ """Generate response with advanced capabilities"""
108
+ try:
109
+ # Initialize temporary modifiers/filters for this query
110
+ response_modifiers = []
111
+ response_filters = []
112
+
113
+ # Execute element defenses
114
+ for element in self.elements.values():
115
+ element.execute_defense_function(self, response_modifiers, response_filters)
116
+
117
+ # Process perspectives and generate response
118
+ perspectives = await self._process_perspectives(query)
119
+ model_response = await self._generate_local_model_response(query)
120
+
121
+ # Apply sentiment analysis
122
+ sentiment = self.sentiment_analyzer.detailed_analysis(query)
123
+
124
+ # Apply modifiers and filters
125
+ final_response = model_response
126
+ for modifier in response_modifiers:
127
+ final_response = modifier(final_response)
128
+ for filter_func in response_filters:
129
+ final_response = filter_func(final_response)
130
+
131
+ # Adjust response based on feedback
132
+ feedback = self.database.get_latest_feedback(user_id)
133
+ if feedback:
134
+ final_response = self.feedback_manager.adjust_response_based_on_feedback(final_response, feedback)
135
+
136
+ # Log user interaction for analytics
137
+ self.database.log_interaction(user_id, query, final_response)
138
+
139
+ # Update context
140
+ self.context_manager.update_environment(user_id, {"query": query, "response": final_response})
141
+
142
+ # Personalize response
143
+ final_response = self.user_personalizer.personalize_response(final_response, user_id)
144
+
145
+ # Apply ethical decision-making framework
146
+ final_response = self.ethical_decision_maker.enforce_policies(final_response)
147
+
148
+ # Explain the decision
149
+ explanation = self.explainable_ai.explain_decision(final_response, query)
150
+
151
+ return {
152
+ "insights": perspectives,
153
+ "response": final_response,
154
+ "sentiment": sentiment,
155
+ "security_level": self.security_level,
156
+ "health_status": await self.self_healing.check_health(),
157
+ "explanation": explanation
158
+ }
159
+ except Exception as e:
160
+ logger.error(f"Response generation failed: {e}")
161
+ return {"error": "Processing failed - safety protocols engaged"}
162
+
163
+ async def _generate_local_model_response(self, query: str) -> str:
164
+ """Generate a response from the local model"""
165
+ inputs = self.models['tokenizer'](query, return_tensors='pt')
166
+ outputs = self.models['mistralai'].generate(**inputs)
167
+ return self.models['tokenizer'].decode(outputs[0], skip_special_tokens=True)
168
+
169
+ async def shutdown(self):
170
+ """Proper async resource cleanup"""
171
+ await self.http_session.close()
172
+ await self.database.close() # Close the database connection
173
+
174
+ # Optimization Techniques
175
+ def apply_quantization(self):
176
+ """Apply quantization to the model"""
177
+ self.models['mistralai'] = torch.quantization.quantize_dynamic(
178
+ self.models['mistralai'], {torch.nn.Linear}, dtype=torch.qint8
179
+ )
180
+
181
+ def apply_pruning(self):
182
+ """Apply pruning to the model"""
183
+ parameters_to_prune = (
184
+ (self.models['mistralai'].transformer.h[i].attn.c_attn, 'weight') for i in range(self.models['mistralai'].config.n_layer)
185
+ )
186
+ torch.nn.utils.prune.global_unstructured(
187
+ parameters_to_prune,
188
+ pruning_method=torch.nn.utils.prune.L1Unstructured,
189
+ amount=0.4,
190
+ )
191
+
192
+ def apply_mixed_precision_training(self):
193
+ """Enable mixed precision training"""
194
+ scaler = torch.cuda.amp.GradScaler()
195
+ return scaler
196
+
197
+ def setup_distributed_training(self):
198
+ """Setup distributed training"""
199
+ world_size = int(os.getenv("WORLD_SIZE", "1"))
200
+ rank = int(os.getenv("RANK", "0"))
201
+ local_rank = int(os.getenv("LOCAL_RANK", "0"))
202
+ if world_size > 1:
203
+ dist.init_process_group("nccl")
204
+ torch.cuda.set_device(local_rank)
205
+ return world_size, rank, local_rank
206
+
207
+ def optimize_data_pipeline(self):
208
+ """Optimize data loading and preprocessing pipeline"""
209
+ # Example: Using DALI for efficient data loading
210
+ import nvidia.dali.pipeline as pipeline
211
+ from nvidia.dali.plugin.pytorch import DALIGenericIterator
212
+
213
+ class ExternalInputIterator:
214
+ def __init__(self, batch_size):
215
+ self.batch_size = batch_size
216
+
217
+ def __iter__(self):
218
+ self.i = 0
219
+ return self
220
+
221
+ def __next__(self):
222
+ self.i += 1
223
+ if self.i > 10:
224
+ raise StopIteration
225
+ return [np.random.rand(3, 224, 224).astype(np.float32) for _ in range(self.batch_size)]
226
+
227
+ pipe = pipeline.Pipeline(batch_size=32, num_threads=2, device_id=0)
228
+ with pipe:
229
+ images = pipeline.fn.external_source(source=ExternalInputIterator(32), num_outputs=1)
230
+ pipe.set_outputs(images)
231
+
232
+ self.data_loader = DALIGenericIterator(pipe, ['data'], reader_name='Reader')
233
+
234
+ def apply_gradient_accumulation(self, optimizer, loss, scaler=None, accumulation_steps=4):
235
+ """Apply gradient accumulation to simulate larger batch sizes"""
236
+ if scaler:
237
+ scaler.scale(loss).backward()
238
+ if (self.step + 1) % accumulation_steps == 0:
239
+ scaler.step(optimizer)
240
+ scaler.update()
241
+ optimizer.zero_grad()
242
+ else:
243
+ loss.backward()
244
+ if (self.step + 1) % accumulation_steps == 0:
245
+ optimizer.step()
246
+ optimizer.zero_grad()
247
+
248
+ def apply_knowledge_distillation(self, teacher_model, student_model, data_loader, optimizer, loss_fn, temperature=1.0, alpha=0.5):
249
+ """Apply knowledge distillation from teacher model to student model"""
250
+ student_model.train()
251
+ teacher_model.eval()
252
+ for data in data_loader:
253
+ inputs, labels = data
254
+ inputs, labels = inputs.to(self.device), labels.to(self.device)
255
+
256
+ with torch.no_grad():
257
+ teacher_outputs = teacher_model(inputs)
258
+ student_outputs = student_model(inputs)
259
+
260
+ loss = alpha * loss_fn(student_outputs, labels) + (1 - alpha) * loss_fn(student_outputs / temperature, teacher_outputs / temperature)
261
+ optimizer.zero_grad()
262
+ loss.backward()
263
+ optimizer.step()
264
+
265
+ def monitor_performance(self):
266
+ """Monitor and profile performance"""
267
+ from torch.profiler import profile, record_function, ProfilerActivity
268
+
269
+ with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], record_shapes=True) as prof:
270
+ with record_function("model_inference"):
271
+ self.generate_response("Sample query", 1)
272
+ print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10))
273
+
274
+ def apply_vector_search(self, embeddings, query_embedding, top_k=5):
275
+ """Apply vector search to find the most similar embeddings"""
276
+ from sklearn.metrics.pairwise import cosine_similarity
277
+ similarities = cosine_similarity(query_embedding, embeddings)
278
+ top_k_indices = similarities.argsort()[0][-top_k:]
279
+ return top_k_indices
280
+
281
+ def apply_prompt_engineering(self, prompt):
282
+ """Apply prompt engineering to improve model responses"""
283
+ engineered_prompt = f"Please provide a detailed and informative response to the following query: {prompt}"
284
+ return engineered_prompt
285
+
286
+ def optimize_model(self):
287
+ """Optimize the model using various techniques"""
288
+ self.apply_quantization()
289
+ self.apply_pruning()
290
+ scaler = self.apply_mixed_precision_training()
291
+ world_size, rank, local_rank = self.setup_distributed_training()
292
+ self.optimize_data_pipeline()
293
+ self.monitor_performance()
294
+
295
+ # Example usage of gradient accumulation
296
+ optimizer = torch.optim.Adam(self.models['mistralai'].parameters(), lr=1e-4)
297
+ for step, (inputs, labels) in enumerate(self.data_loader):
298
+ self.step = step
299
+ loss = self.models['mistralai'](inputs, labels)
300
+ self.apply_gradient_accumulation(optimizer, loss, scaler)
301
+
302
+ # Example usage of knowledge distillation
303
+ teacher_model = AutoModelForCausalLM.from_pretrained("teacher_model_path")
304
+ student_model = AutoModelForCausalLM.from_pretrained("student_model_path")
305
+ loss_fn = torch.nn.CrossEntropyLoss()
306
+ self.apply_knowledge_distillation(teacher_model, student_model, self.data_loader, optimizer, loss_fn)
307
+
308
+ # Example usage of vector search
309
+ embeddings = self.models['mistralai'].get_input_embeddings().weight.data.cpu().numpy()
310
+ query_embedding = self.models['mistralai'].get_input_embeddings()(torch.tensor([self.models['tokenizer'].encode("query")])).cpu().numpy()
311
+ top_k_indices = self.apply_vector_search(embeddings, query_embedding)
312
+ print(f"Top {top_k} similar embeddings indices: {top_k_indices}")
313
+
314
+ # Example usage of prompt engineering
315
+ prompt = "What is the capital of France?"
316
+ engineered_prompt = self.apply_prompt_engineering(prompt)
317
+ print(f"Engineered prompt: {engineered_prompt}")
318
+
319
+ if __name__ == "__main__":
320
+ ai_core = AICore(config_path="config/ai_assistant_config.json")
321
+ ai_core.optimize_model()
322
+ import aiohttp
323
+ import json
324
+ import torch
325
+ import torch.distributed as dist
326
+ from transformers import AutoModelForCausalLM, AutoTokenizer
327
+ from typing import List, Dict, Any
328
+ from components.adaptive_learning import AdaptiveLearningEnvironment
329
+ from components.ai_driven_creativity import AIDrivenCreativity
330
+ from components.collaborative_ai import CollaborativeAI
331
+ from components.cultural_sensitivity import CulturalSensitivityEngine
332
+ from components.data_processing import AdvancedDataProcessor
333
+ from components.dynamic_learning import DynamicLearner
334
+ from components.ethical_governance import EthicalAIGovernance
335
+ from components.explainable_ai import ExplainableAI
336
+ from components.feedback_manager import ImprovedFeedbackManager
337
+ from components.multimodal_analyzer import MultimodalAnalyzer
338
+ from components.neuro_symbolic import NeuroSymbolicEngine
339
+ from components.quantum_optimizer import QuantumInspiredOptimizer
340
+ from components.real_time_data import RealTimeDataIntegrator
341
+ from components.sentiment_analysis import EnhancedSentimentAnalyzer
342
+ from components.self_improving_ai import SelfImprovingAI
343
+ from components.user_personalization import UserPersonalizer
344
+ from models.cognitive_engine import BroaderPerspectiveEngine
345
+ from models.elements import Element
346
+ from models.healing_system import SelfHealingSystem
347
+ from models.safety_system import SafetySystem
348
+ from models.user_profiles import UserProfile
349
+ from utils.database import Database
350
+ from utils.logger import logger
351
+
352
+ class AICore:
353
+ """Improved core system with cutting-edge capabilities"""
354
+ def __init__(self, config_path: str = "config/ai_assistant_config.json"):
355
+ self.config = self._load_config(config_path)
356
+ self.models = self._initialize_models()
357
+ self.cognition = BroaderPerspectiveEngine()
358
+ self.self_healing = SelfHealingSystem(self.config)
359
+ self.safety_system = SafetySystem()
360
+ self.emotional_analyzer = EnhancedSentimentAnalyzer()
361
+ self.elements = self._initialize_elements()
362
+ self.security_level = 0
363
+ self.http_session = aiohttp.ClientSession()
364
+ self.database = Database() # Initialize database
365
+ self.user_profiles = UserProfile(self.database) # Initialize user profiles
366
+ self.feedback_manager = ImprovedFeedbackManager(self.database) # Initialize feedback manager
367
+ self.context_manager = AdaptiveLearningEnvironment() # Initialize adaptive learning environment
368
+ self.data_fetcher = RealTimeDataIntegrator() # Initialize real-time data fetcher
369
+ self.sentiment_analyzer = EnhancedSentimentAnalyzer() # Initialize sentiment analyzer
370
+ self.data_processor = AdvancedDataProcessor() # Initialize advanced data processor
371
+ self.dynamic_learner = DynamicLearner() # Initialize dynamic learner
372
+ self.multimodal_analyzer = MultimodalAnalyzer() # Initialize multimodal analyzer
373
+ self.ethical_decision_maker = EthicalAIGovernance() # Initialize ethical decision maker
374
+ self.user_personalizer = UserPersonalizer(self.database) # Initialize user personalizer
375
+ self.ai_integrator = CollaborativeAI() # Initialize AI integrator
376
+ self.neuro_symbolic_engine = NeuroSymbolicEngine() # Initialize neuro-symbolic engine
377
+ self.explainable_ai = ExplainableAI() # Initialize explainable AI
378
+ self.quantum_inspired_optimizer = QuantumInspiredOptimizer() # Initialize quantum-inspired optimizer
379
+ self.cultural_sensitivity_engine = CulturalSensitivityEngine() # Initialize cultural sensitivity engine
380
+ self.self_improving_ai = SelfImprovingAI() # Initialize self-improving AI
381
+ self.ai_driven_creativity = AIDrivenCreativity() # Initialize AI-driven creativity
382
+ self._validate_perspectives()
383
+
384
+ def _load_config(self, config_path: str) -> dict:
385
+ """Load configuration from a file"""
386
+ with open(config_path, 'r') as file:
387
+ return json.load(file)
388
+
389
+ def _initialize_models(self):
390
+ """Initialize models required by the AICore class"""
391
+ models = {
392
+ "mistralai": AutoModelForCausalLM.from_pretrained(self.config["model_name"]),
393
+ "tokenizer": AutoTokenizer.from_pretrained(self.config["model_name"])
394
+ }
395
+ return models
396
+
397
+ def _initialize_elements(self):
398
+ """Initialize elements with their defense abilities"""
399
+ elements = {
400
+ "hydrogen": Element("Hydrogen", "H", "Python", ["Lightweight", "Reactive"], ["Combustion"], "evasion"),
401
+ "carbon": Element("Carbon", "C", "Java", ["Versatile", "Strong"], ["Bonding"], "adaptability"),
402
+ "iron": Element("Iron", "Fe", "C++", ["Durable", "Magnetic"], ["Rusting"], "fortification"),
403
+ "silicon": Element("Silicon", "Si", "JavaScript", ["Semiconductor", "Abundant"], ["Doping"], "barrier"),
404
+ "oxygen": Element("Oxygen", "O", "Rust", ["Oxidizing", "Life-supporting"], ["Combustion"], "regeneration")
405
+ }
406
+ return elements
407
+
408
+ def _validate_perspectives(self):
409
+ """Ensure configured perspectives are valid"""
410
+ valid = self.cognition.available_perspectives
411
+ invalid = [p for p in self.config["perspectives"] if p not in valid]
412
+ if invalid:
413
+ logger.warning(f"Removing invalid perspectives: {invalid}")
414
+ self.config["perspectives"] = [p for p in self.config["perspectives"] if p in valid]
415
+
416
+ async def _process_perspectives(self, query: str) -> List[str]:
417
+ """Safely process perspectives using validated methods"""
418
+ perspectives = []
419
+ for p in self.config["perspectives"]:
420
+ try:
421
+ method = self.cognition.get_perspective_method(p)
422
+ perspectives.append(method(query))
423
+ except Exception as e:
424
+ logger.error(f"Perspective processing failed: {e}")
425
+ return perspectives
426
+
427
+ async def generate_response(self, query: str, user_id: int) -> Dict[str, Any]:
428
+ """Generate response with advanced capabilities"""
429
+ try:
430
+ # Initialize temporary modifiers/filters for this query
431
+ response_modifiers = []
432
+ response_filters = []
433
+
434
+ # Execute element defenses
435
+ for element in self.elements.values():
436
+ element.execute_defense_function(self, response_modifiers, response_filters)
437
+
438
+ # Process perspectives and generate response
439
+ perspectives = await self._process_perspectives(query)
440
+ model_response = await self._generate_local_model_response(query)
441
+
442
+ # Apply sentiment analysis
443
+ sentiment = self.sentiment_analyzer.detailed_analysis(query)
444
+
445
+ # Apply modifiers and filters
446
+ final_response = model_response
447
+ for modifier in response_modifiers:
448
+ final_response = modifier(final_response)
449
+ for filter_func in response_filters:
450
+ final_response = filter_func(final_response)
451
+
452
+ # Adjust response based on feedback
453
+ feedback = self.database.get_latest_feedback(user_id)
454
+ if feedback:
455
+ final_response = self.feedback_manager.adjust_response_based_on_feedback(final_response, feedback)
456
+
457
+ # Log user interaction for analytics
458
+ self.database.log_interaction(user_id, query, final_response)
459
+
460
+ # Update context
461
+ self.context_manager.update_environment(user_id, {"query": query, "response": final_response})
462
+
463
+ # Personalize response
464
+ final_response = self.user_personalizer.personalize_response(final_response, user_id)
465
+
466
+ # Apply ethical decision-making framework
467
+ final_response = self.ethical_decision_maker.enforce_policies(final_response)
468
+
469
+ # Explain the decision
470
+ explanation = self.explainable_ai.explain_decision(final_response, query)
471
+
472
+ return {
473
+ "insights": perspectives,
474
+ "response": final_response,
475
+ "sentiment": sentiment,
476
+ "security_level": self.security_level,
477
+ "health_status": await self.self_healing.check_health(),
478
+ "explanation": explanation
479
+ }
480
+ except Exception as e:
481
+ logger.error(f"Response generation failed: {e}")
482
+ return {"error": "Processing failed - safety protocols engaged"}
483
+
484
+ async def _generate_local_model_response(self, query: str) -> str:
485
+ """Generate a response from the local model"""
486
+ inputs = self.models['tokenizer'](query, return_tensors='pt')
487
+ outputs = self.models['mistralai'].generate(**inputs)
488
+ return self.models['tokenizer'].decode(outputs[0], skip_special_tokens=True)
489
+
490
+ async def shutdown(self):
491
+ """Proper async resource cleanup"""
492
+ await self.http_session.close()
493
+ await self.database.close() # Close the database connection
494
+
495
+ # Optimization Techniques
496
+ def apply_quantization(self):
497
+ """Apply quantization to the model"""
498
+ self.models['mistralai'] = torch.quantization.quantize_dynamic(
499
+ self.models['mistralai'], {torch.nn.Linear}, dtype=torch.qint8
500
+ )
501
+
502
+ def apply_pruning(self):
503
+ """Apply pruning to the model"""
504
+ parameters_to_prune = (
505
+ (self.models['mistralai'].transformer.h[i].attn.c_attn, 'weight') for i in range(self.models['mistralai'].config.n_layer)
506
+ )
507
+ torch.nn.utils.prune.global_unstructured(
508
+ parameters_to_prune,
509
+ pruning_method=torch.nn.utils.prune.L1Unstructured,
510
+ amount=0.4,
511
+ )
512
+
513
+ def apply_mixed_precision_training(self):
514
+ """Enable mixed precision training"""
515
+ scaler = torch.cuda.amp.GradScaler()
516
+ return scaler
517
+
518
+ def setup_distributed_training(self):
519
+ """Setup distributed training"""
520
+ world_size = int(os.getenv("WORLD_SIZE", "1"))
521
+ rank = int(os.getenv("RANK", "0"))
522
+ local_rank = int(os.getenv("LOCAL_RANK", "0"))
523
+ if world_size > 1:
524
+ dist.init_process_group("nccl")
525
+ torch.cuda.set_device(local_rank)
526
+ return world_size, rank, local_rank
527
+
528
+ def optimize_data_pipeline(self):
529
+ """Optimize data loading and preprocessing pipeline"""
530
+ # Example: Using DALI for efficient data loading
531
+ import nvidia.dali.pipeline as pipeline
532
+ from nvidia.dali.plugin.pytorch import DALIGenericIterator
533
+
534
+ class ExternalInputIterator:
535
+ def __init__(self, batch_size):
536
+ self.batch_size = batch_size
537
+
538
+ def __iter__(self):
539
+ self.i = 0
540
+ return self
541
+
542
+ def __next__(self):
543
+ self.i += 1
544
+ if self.i > 10:
545
+ raise StopIteration
546
+ return [np.random.rand(3, 224, 224).astype(np.float32) for _ in range(self.batch_size)]
547
+
548
+ pipe = pipeline.Pipeline(batch_size=32, num_threads=2, device_id=0)
549
+ with pipe:
550
+ images = pipeline.fn.external_source(source=ExternalInputIterator(32), num_outputs=1)
551
+ pipe.set_outputs(images)
552
+
553
+ self.data_loader = DALIGenericIterator(pipe, ['data'], reader_name='Reader')
554
+
555
+ def apply_gradient_accumulation(self, optimizer, loss, scaler=None, accumulation_steps=4):
556
+ """Apply gradient accumulation to simulate larger batch sizes"""
557
+ if scaler:
558
+ scaler.scale(loss).backward()
559
+ if (self.step + 1) % accumulation_steps == 0:
560
+ scaler.step(optimizer)
561
+ scaler.update()
562
+ optimizer.zero_grad()
563
+ else:
564
+ loss.backward()
565
+ if (self.step + 1) % accumulation_steps == 0:
566
+ optimizer.step()
567
+ optimizer.zero_grad()
568
+
569
+ def apply_knowledge_distillation(self, teacher_model, student_model, data_loader, optimizer, loss_fn, temperature=1.0, alpha=0.5):
570
+ """Apply knowledge distillation from teacher model to student model"""
571
+ student_model.train()
572
+ teacher_model.eval()
573
+ for data in data_loader:
574
+ inputs, labels = data
575
+ inputs, labels = inputs.to(self.device), labels.to(self.device)
576
+
577
+ with torch.no_grad():
578
+ teacher_outputs = teacher_model(inputs)
579
+ student_outputs = student_model(inputs)
580
+
581
+ loss = alpha * loss_fn(student_outputs, labels) + (1 - alpha) * loss_fn(student_outputs / temperature, teacher_outputs / temperature)
582
+ optimizer.zero_grad()
583
+ loss.backward()
584
+ optimizer.step()
585
+
586
+ def monitor_performance(self):
587
+ """Monitor and profile performance"""
588
+ from torch.profiler import profile, record_function, ProfilerActivity
589
+
590
+ with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], record_shapes=True) as prof:
591
+ with record_function("model_inference"):
592
+ self.generate_response("Sample query", 1)
593
+ print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10))
594
+
595
+ def apply_vector_search(self, embeddings, query_embedding, top_k=5):
596
+ """Apply vector search to find the most similar embeddings"""
597
+ from sklearn.metrics.pairwise import cosine_similarity
598
+ similarities = cosine_similarity(query_embedding, embeddings)
599
+ top_k_indices = similarities.argsort()[0][-top_k:]
600
+ return top_k_indices
601
+
602
+ def apply_prompt_engineering(self, prompt):
603
+ """Apply prompt engineering to improve model responses"""
604
+ engineered_prompt = f"Please provide a detailed and informative response to the following query: {prompt}"
605
+ return engineered_prompt
606
+
607
+ def optimize_model(self):
608
+ """Optimize the model using various techniques"""
609
+ self.apply_quantization()
610
+ self.apply_pruning()
611
+ scaler = self.apply_mixed_precision_training()
612
+ world_size, rank, local_rank = self.setup_distributed_training()
613
+ self.optimize_data_pipeline()
614
+ self.monitor_performance()
615
+
616
+ # Example usage of gradient accumulation
617
+ optimizer = torch.optim.Adam(self.models['mistralai'].parameters(), lr=1e-4)
618
+ for step, (inputs, labels) in enumerate(self.data_loader):
619
+ self.step = step
620
+ loss = self.models['mistralai'](inputs, labels)
621
+ self.apply_gradient_accumulation(optimizer, loss, scaler)
622
+
623
+ # Example usage of knowledge distillation
624
+ teacher_model = AutoModelForCausalLM.from_pretrained("teacher_model_path")
625
+ student_model = AutoModelForCausalLM.from_pretrained("student_model_path")
626
+ loss_fn = torch.nn.CrossEntropyLoss()
627
+ self.apply_knowledge_distillation(teacher_model, student_model, self.data_loader, optimizer, loss_fn)
628
+
629
+ # Example usage of vector search
630
+ embeddings = self.models['mistralai'].get_input_embeddings().weight.data.cpu().numpy()
631
+ query_embedding = self.models['mistralai'].get_input_embeddings()(torch.tensor([self.models['tokenizer'].encode("query")])).cpu().numpy()
632
+ top_k_indices = self.apply_vector_search(embeddings, query_embedding)
633
+ print(f"Top {top_k} similar embeddings indices: {top_k_indices}")
634
+
635
+ # Example usage of prompt engineering
636
+ prompt = "What is the capital of France?"
637
+ engineered_prompt = self.apply_prompt_engineering(prompt)
638
+ print(f"Engineered prompt: {engineered_prompt}")
639
+
640
+ if __name__ == "__main__":
641
+ ai_core = AICore(config_path="config/ai_assistant_config.json")
642
+ ai_core.optimize_model()
src/ai_core_system_new.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import json
3
+ import logging
4
+ from typing import List, Dict, Any, Optional
5
+ from huggingface_hub import InferenceClient
6
+ from components.adaptive_learning import AdaptiveLearningEnvironment
7
+ from components.ai_driven_creativity import AIDrivenCreativity
8
+ from components.collaborative_ai import CollaborativeAI
9
+ from components.cultural_sensitivity import CulturalSensitivityEngine
10
+ from components.data_processing import AdvancedDataProcessor
11
+ from components.dynamic_learning import DynamicLearner
12
+ from components.ethical_governance import EthicalAIGovernance
13
+ from components.explainable_ai import ExplainableAI
14
+ from components.feedback_manager import ImprovedFeedbackManager
15
+ from components.multimodal_analyzer import MultimodalAnalyzer
16
+ from components.neuro_symbolic import NeuroSymbolicEngine
17
+ from components.quantum_optimizer import QuantumInspiredOptimizer
18
+ from components.real_time_data import RealTimeDataIntegrator
19
+ from components.self_improving_ai import SelfImprovingAI
20
+ from components.user_personalization import UserPersonalizer
21
+ from models.cognitive_engine import BroaderPerspectiveEngine
22
+ from models.elements import Element
23
+ from models.healing_system import SelfHealingSystem
24
+ from models.safety_system import SafetySystem
25
+ from models.user_profiles import UserProfile
26
+ from utils.database import Database
27
+ from utils.logger import logger
28
+
29
+ class AICore:
30
+ """Enhanced AI Core System using open-source models"""
31
+ def __init__(self, config_path: str = "config/ai_assistant_config.json"):
32
+ # Load configuration
33
+ self.config = self._load_config(config_path)
34
+
35
+ # Initialize Hugging Face client
36
+ self.inference_client = InferenceClient()
37
+
38
+ # Initialize ML models configuration
39
+ self.models = {
40
+ 'text-generation': "gpt2",
41
+ 'sentiment': "distilbert-base-uncased-finetuned-sst-2-english",
42
+ 'embeddings': "sentence-transformers/all-MiniLM-L6-v2"
43
+ }
44
+
45
+ # Initialize core components
46
+ self.cognition = BroaderPerspectiveEngine()
47
+ self.self_healing = SelfHealingSystem(self.config)
48
+ self.safety_system = SafetySystem()
49
+ self.elements = self._initialize_elements()
50
+
51
+ # Initialize support systems
52
+ self.database = Database()
53
+ self.user_profiles = UserProfile(self.database)
54
+ self.feedback_manager = ImprovedFeedbackManager(self.database)
55
+ self.context_manager = AdaptiveLearningEnvironment()
56
+ self.data_processor = AdvancedDataProcessor()
57
+ self.dynamic_learner = DynamicLearner()
58
+
59
+ def _load_config(self, config_path: str) -> Dict[str, Any]:
60
+ """Load configuration from JSON file"""
61
+ try:
62
+ with open(config_path, 'r') as f:
63
+ return json.load(f)
64
+ except Exception as e:
65
+ logger.error(f"Failed to load config: {e}")
66
+ return {}
67
+
68
+ def _initialize_elements(self) -> List[Element]:
69
+ """Initialize system elements"""
70
+ return [Element(name) for name in self.config.get('elements', [])]
71
+
72
+ async def _generate_huggingface_response(self, query: str, task: str = 'text-generation') -> Any:
73
+ """Generate a response using Hugging Face models"""
74
+ try:
75
+ if task == 'text-generation':
76
+ response = self.inference_client.text_generation(
77
+ query,
78
+ model=self.models['text-generation'],
79
+ max_new_tokens=100
80
+ )
81
+ return response[0]["generated_text"]
82
+
83
+ elif task == 'sentiment':
84
+ response = self.inference_client.text_classification(
85
+ query,
86
+ model=self.models['sentiment']
87
+ )
88
+ return {
89
+ "score": response[0]["score"],
90
+ "label": response[0]["label"]
91
+ }
92
+
93
+ elif task == 'embeddings':
94
+ response = self.inference_client.feature_extraction(
95
+ query,
96
+ model=self.models['embeddings']
97
+ )
98
+ return response
99
+
100
+ except Exception as e:
101
+ logger.error(f"Hugging Face API error ({task}): {str(e)}")
102
+ return None
103
+
104
+ async def process_input(self, text: str, context: Dict[str, Any] = None) -> Dict[str, Any]:
105
+ """Process input text through multiple perspectives"""
106
+ try:
107
+ # Get sentiment analysis
108
+ sentiment = await self._generate_huggingface_response(text, 'sentiment')
109
+
110
+ # Generate perspectives using different models
111
+ perspectives = []
112
+
113
+ # Add text generation perspective
114
+ generated_text = await self._generate_huggingface_response(text)
115
+ if generated_text:
116
+ perspectives.append({
117
+ "type": "text-generation",
118
+ "content": generated_text
119
+ })
120
+
121
+ # Get embeddings for semantic understanding
122
+ embeddings = await self._generate_huggingface_response(text, 'embeddings')
123
+ if embeddings is not None:
124
+ perspectives.append({
125
+ "type": "semantic",
126
+ "content": "Semantic analysis complete"
127
+ })
128
+
129
+ # Process through cognitive engine
130
+ cognitive_result = self.cognition.process(text, context)
131
+ perspectives.append({
132
+ "type": "cognitive",
133
+ "content": cognitive_result
134
+ })
135
+
136
+ # Generate final response
137
+ final_response = await self._generate_huggingface_response(
138
+ text + "\n\nContext: " + str(perspectives)
139
+ )
140
+
141
+ # Get explanation from the ethical governance system
142
+ explanation = "Response generated using ethical AI principles"
143
+
144
+ return {
145
+ "insights": perspectives,
146
+ "response": final_response,
147
+ "sentiment": sentiment,
148
+ "explanation": explanation,
149
+ "status": "success"
150
+ }
151
+
152
+ except Exception as e:
153
+ logger.error(f"Error in process_input: {str(e)}")
154
+ return {
155
+ "error": str(e),
156
+ "status": "error"
157
+ }
158
+
159
+ async def shutdown(self):
160
+ """Cleanup resources"""
161
+ try:
162
+ await self.database.close()
163
+ except Exception as e:
164
+ logger.error(f"Error during shutdown: {e}")
src/app.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ import sys
3
+ import traceback
4
+ import gradio as gr
5
+ import asyncio
6
+ from datetime import datetime
7
+ from aiohttp import web
8
+ from aiohttp.web import Request, Response, json_response
9
+ from botbuilder.core import (
10
+ BotFrameworkAdapterSettings,
11
+ TurnContext,
12
+ BotFrameworkAdapter,
13
+ )
14
+ from botbuilder.core.integration import aiohttp_error_middleware
15
+ from botbuilder.schema import Activity, ActivityTypes
16
+ from bot import MyBot
17
+ from config import DefaultConfig
18
+ from ai_core import AICore
19
+ from aegis_integration import AegisBridge
20
+ from aegis_integration.config import AEGIS_CONFIG
21
+ from aegis_integration.routes import register_aegis_endpoints
22
+ import numpy as np
23
+ import logging
24
+ from typing import Dict, Any, Tuple
25
+
26
+ # Configure logging
27
+ logging.basicConfig(level=logging.INFO)
28
+ logger = logging.getLogger(__name__)
29
+
30
+ CONFIG = DefaultConfig()
31
+
32
+ # Initialize AI Core and AEGIS
33
+ ai_core = AICore()
34
+ aegis_bridge = AegisBridge(ai_core, AEGIS_CONFIG)
35
+ ai_core.set_aegis_bridge(aegis_bridge)
36
+
37
+ # Force fallback to gpt2 for text generation
38
+ ai_core.model_id = 'gpt2'
39
+
40
+ # Bot Framework Setup
41
+ SETTINGS = BotFrameworkAdapterSettings(CONFIG.APP_ID, CONFIG.APP_PASSWORD)
42
+ ADAPTER = BotFrameworkAdapter(SETTINGS)
43
+
44
+ # Create Gradio interface with AEGIS integration
45
+ app = gr.Interface(
46
+ fn=lambda x: ai_core.generate_text(x),
47
+ inputs="text",
48
+ outputs=[
49
+ gr.Textbox(label="Response"),
50
+ gr.JSON(label="AEGIS Analysis", visible=True)
51
+ ],
52
+ title="Codette with AEGIS",
53
+ description="An ethical AI assistant enhanced with AEGIS analysis"
54
+ )
55
+
56
+ class CodetteGradioApp:
57
+ def __init__(self, ai_core: AICore):
58
+ self.ai_core = ai_core
59
+ self.chat_history = []
60
+
61
+ def process_message(self, message: str, history: list, cocoon_mode: bool = False) -> Tuple[str, list]:
62
+ """Process a message and update chat history, with optional cocoon-powered creativity"""
63
+ try:
64
+ # Generate response (cocoon-powered if enabled)
65
+ if cocoon_mode:
66
+ # Ensure cocoons are loaded
67
+ if not hasattr(self.ai_core, 'cocoon_data') or not self.ai_core.cocoon_data:
68
+ self.ai_core.load_cocoon_data()
69
+ response = self.ai_core.remix_and_randomize_response(message, cocoon_mode=True)
70
+ else:
71
+ response = self.ai_core.generate_text(message)
72
+ try:
73
+ # Analyze sentiment
74
+ sentiment = self.ai_core.analyze_sentiment(message)
75
+ label = sentiment.get('label', '').upper()
76
+ score = sentiment.get('score', 0.0)
77
+ # Use transformers to generate a unique, sentiment-aware reply
78
+ if label == 'POS':
79
+ prompt = f"The user said something positive: '{message}'. Respond in a cheerful, encouraging, and unique way."
80
+ elif label == 'NEG':
81
+ prompt = f"The user said something negative: '{message}'. Respond with empathy, support, and a unique comforting message."
82
+ elif label == 'NEU':
83
+ prompt = f"The user said something neutral: '{message}'. Respond in a thoughtful, neutral, and unique way."
84
+ else:
85
+ prompt = f"The user's sentiment is unclear: '{message}'. Respond in a curious, open-minded, and unique way."
86
+ char_response = self.ai_core.generate_text(prompt, max_length=60)
87
+ sentiment_info = f"\n[Sentiment: {label} ({score:.2f})] {char_response}"
88
+ except Exception as sent_e:
89
+ logger.error(f"Sentiment analysis error: {sent_e}")
90
+ sentiment_info = "\n[Sentiment: error (0.00)] 🤖 Sorry, I couldn't analyze the sentiment."
91
+ # Update history in Gradio 'messages' format
92
+ history = history + [
93
+ {"role": "user", "content": message},
94
+ {"role": "assistant", "content": response + sentiment_info}
95
+ ]
96
+ return "", history
97
+ except Exception as e:
98
+ logger.error(f"Error processing message: {e}")
99
+ # Add error as assistant message
100
+ history = history + [
101
+ {"role": "user", "content": message},
102
+ {"role": "assistant", "content": f"Error: {str(e)}"}
103
+ ]
104
+ return "", history
105
+
106
+ def analyze_text(self, text: str):
107
+ """Perform comprehensive text analysis"""
108
+ try:
109
+ # Get sentiment
110
+ sentiment = self.ai_core.analyze_sentiment(text)
111
+ # Get embeddings
112
+ embeddings = self.ai_core.get_embeddings(text)
113
+ if embeddings:
114
+ # Convert embeddings to 2D visualization
115
+ embedding_viz = self._visualize_embeddings(embeddings)
116
+ else:
117
+ embedding_viz = None
118
+ # Generate creative expansion
119
+ expansion = self.ai_core.generate_text(
120
+ f"Creative expansion of the concept: {text}",
121
+ max_length=150
122
+ )
123
+ return (
124
+ f"Sentiment: {sentiment['label']} (confidence: {sentiment['score']:.2f})",
125
+ embedding_viz,
126
+ expansion
127
+ )
128
+ except Exception as e:
129
+ logger.error(f"Error in text analysis: {e}")
130
+ return (
131
+ "Error analyzing sentiment",
132
+ None,
133
+ str(e)
134
+ )
135
+
136
+ def _visualize_embeddings(self, embeddings: list) -> np.ndarray:
137
+ """Create a simple 2D visualization of embeddings"""
138
+ # Convert to numpy array and reshape to 2D
139
+ emb_array = np.array(embeddings)
140
+ if len(emb_array.shape) > 2:
141
+ emb_array = emb_array.reshape(-1, emb_array.shape[-1])
142
+
143
+ # Simple dimensionality reduction (mean across dimensions)
144
+ viz_data = emb_array.mean(axis=1)
145
+
146
+ # Create a simple heatmap-style visualization
147
+ size = int(np.sqrt(len(viz_data)))
148
+ heatmap = viz_data[:size*size].reshape(size, size)
149
+ return heatmap
150
+
151
+ # Create Gradio Interface
152
+ gradio_app = CodetteGradioApp(ai_core)
153
+
154
+ # Create the Bot
155
+ BOT = MyBot(ai_core)
156
+
157
+ # Bot Framework message handler
158
+ async def messages(req: Request) -> Response:
159
+ if "application/json" in req.headers["Content-Type"]:
160
+ body = await req.json()
161
+ else:
162
+ return Response(status=415)
163
+
164
+ activity = Activity().deserialize(body)
165
+ auth_header = req.headers["Authorization"] if "Authorization" in req.headers else ""
166
+
167
+ response = await ADAPTER.process_activity(activity, auth_header, BOT.on_turn)
168
+ if response:
169
+ return json_response(data=response.body, status=response.status)
170
+ return Response(status=201)
171
+
172
+ # Create Gradio interface
173
+ def create_gradio_interface():
174
+ with gr.Blocks(title="Codette AI Assistant", theme="default") as interface:
175
+ gr.Markdown("""
176
+ # 🤖 Codette AI Assistant
177
+ A sophisticated AI assistant powered by Hugging Face models.
178
+
179
+ ## Features:
180
+ - 💬 Interactive Chat
181
+ - 📊 Sentiment Analysis
182
+ - 🧠 Semantic Understanding
183
+ - 🎨 Creative Generation
184
+ """)
185
+
186
+ with gr.Tabs():
187
+ # Chat Interface
188
+ with gr.Tab("Chat"):
189
+ chatbot = gr.Chatbot(
190
+ [],
191
+ elem_id="chatbot",
192
+ height=400,
193
+ type="messages"
194
+ )
195
+ with gr.Row():
196
+ txt = gr.Textbox(
197
+ show_label=False,
198
+ placeholder="Type your message here...",
199
+ container=False
200
+ )
201
+ with gr.Row():
202
+ cocoon_toggle = gr.Checkbox(label="Enable Cocoon-Powered Creativity", value=False)
203
+ txt.submit(
204
+ gradio_app.process_message,
205
+ [txt, chatbot, cocoon_toggle],
206
+ [txt, chatbot]
207
+ )
208
+ clear = gr.Button("Clear")
209
+ clear.click(lambda: [], None, chatbot)
210
+
211
+ # Analysis Interface
212
+ with gr.Tab("Analysis"):
213
+ with gr.Row():
214
+ with gr.Column():
215
+ analysis_input = gr.Textbox(
216
+ label="Text to Analyze",
217
+ placeholder="Enter text for comprehensive analysis...",
218
+ lines=3
219
+ )
220
+ analyze_btn = gr.Button("Analyze")
221
+
222
+ with gr.Column():
223
+ sentiment_output = gr.Textbox(label="Sentiment Analysis")
224
+ embedding_output = gr.Plot(label="Semantic Embedding Visualization")
225
+ expansion_output = gr.Textbox(
226
+ label="Creative Expansion",
227
+ lines=3
228
+ )
229
+
230
+ analyze_btn.click(
231
+ gradio_app.analyze_text,
232
+ inputs=analysis_input,
233
+ outputs=[
234
+ sentiment_output,
235
+ embedding_output,
236
+ expansion_output
237
+ ]
238
+ )
239
+
240
+ return interface
241
+
242
+ # Main app setup
243
+ async def main():
244
+ # Set up aiohttp web app
245
+ app = web.Application(middlewares=[aiohttp_error_middleware])
246
+ app.router.add_post("/api/messages", messages)
247
+
248
+ # Launch Gradio interface in a separate thread
249
+ interface = create_gradio_interface()
250
+ interface.launch(
251
+ server_name="0.0.0.0",
252
+ server_port=7860,
253
+ share=True,
254
+ auth=None,
255
+ favicon_path=None
256
+ )
257
+
258
+ # Start the web app
259
+ runner = web.AppRunner(app)
260
+ await runner.setup()
261
+ await web.TCPSite(runner, "0.0.0.0", 3978).start()
262
+
263
+ # Keep the server running
264
+ while True:
265
+ await asyncio.sleep(3600) # Sleep for an hour
266
+
267
+ if __name__ == "__main__":
268
+ try:
269
+ # Run the async main function
270
+ asyncio.run(main())
271
+ except Exception as error:
272
+ logger.error(f"Application error: {error}")
273
+ raise error
src/bot.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation. All rights reserved.
2
+ # Licensed under the MIT License.
3
+
4
+ from botbuilder.core import ActivityHandler, TurnContext
5
+ from botbuilder.schema import ChannelAccount
6
+ from ai_core import AICore
7
+
8
+ class MyBot(ActivityHandler):
9
+ def __init__(self, ai_core: AICore):
10
+ super().__init__()
11
+ self.ai_core = ai_core
12
+
13
+ async def on_message_activity(self, turn_context: TurnContext):
14
+ try:
15
+ # Get the message text
16
+ user_message = turn_context.activity.text
17
+
18
+ # Generate response using AI Core
19
+ response = self.ai_core.generate_text(user_message)
20
+
21
+ # Analyze sentiment
22
+ sentiment = self.ai_core.analyze_sentiment(user_message)
23
+ sentiment_info = f"\n[Sentiment: {sentiment['label']} (confidence: {sentiment['score']:.2f})]"
24
+
25
+ # Send the combined response
26
+ await turn_context.send_activity(response + sentiment_info)
27
+
28
+ except Exception as e:
29
+ await turn_context.send_activity(f"I encountered an error: {str(e)}")
30
+
31
+ async def on_members_added_activity(
32
+ self,
33
+ members_added: ChannelAccount,
34
+ turn_context: TurnContext
35
+ ):
36
+ for member_added in members_added:
37
+ if member_added.id != turn_context.activity.recipient.id:
38
+ welcome_message = (
39
+ "👋 Welcome to Codette! I'm an AI assistant that can help you with:\n\n"
40
+ "🤖 Natural language understanding\n"
41
+ "📊 Sentiment analysis\n"
42
+ "🎨 Creative text generation\n"
43
+ "🧠 Semantic analysis\n\n"
44
+ "Feel free to ask me anything!"
45
+ )
46
+ await turn_context.send_activity(welcome_message)
src/codette_imports.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Codette Framework Imports Module
4
+ Centralized import management for all Codette AI components
5
+ """
6
+
7
+ import sys
8
+ import logging
9
+ from pathlib import Path
10
+ from typing import Dict, Any, List, Optional
11
+
12
+ # Setup logging
13
+ logger = logging.getLogger(__name__)
14
+
15
+ # Core Framework Imports
16
+ try:
17
+ from ai_core import AICore
18
+ from ai_core_system import AICore as AISystemCore
19
+ from ai_core_identityscan import AICore as AIIdentityCore
20
+ except ImportError as e:
21
+ logger.warning(f"Core AI imports failed: {e}")
22
+ AICore = AISystemCore = AIIdentityCore = None
23
+
24
+ # Codette Variants
25
+ try:
26
+ from codette import Codette
27
+ from codette2 import CodetteCQURE
28
+ except ImportError as e:
29
+ logger.warning(f"Codette variant imports failed: {e}")
30
+ Codette = CodetteCQURE = None
31
+
32
+ # Cognitive Systems
33
+ try:
34
+ from cognitive_processor import CognitiveProcessor
35
+ from cognitive_auth import CognitiveAuthManager
36
+ from defense_system import DefenseSystem
37
+ from health_monitor import HealthMonitor
38
+ from config_manager import EnhancedAIConfig
39
+ except ImportError as e:
40
+ logger.warning(f"Cognitive system imports failed: {e}")
41
+ CognitiveProcessor = CognitiveAuthManager = DefenseSystem = None
42
+ HealthMonitor = EnhancedAIConfig = None
43
+
44
+ # Quantum and Scientific Computing
45
+ try:
46
+ from quantum import *
47
+ from quantum_harmonic_framework import quantum_harmonic_dynamics
48
+ from codette_quantum_multicore import codette_experiment_task, CognitionCocooner, PerspectiveAgent
49
+ from codette_quantum_multicore2 import analyse_cocoons, load_cocoon
50
+ from codette_meta_3d import *
51
+ from codette_timeline_animation import *
52
+ except ImportError as e:
53
+ logger.warning(f"Quantum module imports failed: {e}")
54
+
55
+ # Fractal and Advanced Analysis
56
+ try:
57
+ from fractal import analyze_identity
58
+ from agireasoning import AgileAGIFunctionality, UniversalReasoning
59
+ except ImportError as e:
60
+ logger.warning(f"Advanced analysis imports failed: {e}")
61
+ analyze_identity = AgileAGIFunctionality = UniversalReasoning = None
62
+
63
+ # Component Framework
64
+ try:
65
+ from Codette_final.components.adaptive_learning import AdaptiveLearningEnvironment
66
+ from Codette_final.components.ai_driven_creativity import AIDrivenCreativity
67
+ from Codette_final.components.collaborative_ai import CollaborativeAI
68
+ from Codette_final.components.cultural_sensitivity import CulturalSensitivityEngine
69
+ from Codette_final.components.data_processing import AdvancedDataProcessor
70
+ from Codette_final.components.ethical_governance import EthicalAIGovernance
71
+ from Codette_final.components.explainable_ai import ExplainableAI
72
+ from Codette_final.components.multimodal_analyzer import MultimodalAnalyzer
73
+ from Codette_final.components.neuro_symbolic import NeuroSymbolicEngine
74
+ from Codette_final.components.quantum_optimizer import QuantumInspiredOptimizer
75
+ from Codette_final.components.real_time_data import RealTimeDataIntegrator
76
+ from Codette_final.components.sentiment_analysis import EnhancedSentimentAnalyzer
77
+ from Codette_final.components.self_improving_ai import SelfImprovingAI
78
+ from Codette_final.components.user_personalization import UserPersonalizer
79
+ except ImportError as e:
80
+ logger.warning(f"Component framework imports failed: {e}")
81
+
82
+ # Bot Framework
83
+ try:
84
+ from app import APP as BotApp
85
+ from bot import MyBot
86
+ except ImportError as e:
87
+ logger.warning(f"Bot framework imports failed: {e}")
88
+ BotApp = MyBot = None
89
+
90
+ # API and CLI Tools
91
+ try:
92
+ from codette_api import app as api_app
93
+ from codette_cli import main as cli_main
94
+ from codette_test_runner import *
95
+ except ImportError as e:
96
+ logger.warning(f"API/CLI imports failed: {e}")
97
+
98
+ # GUI Components
99
+ try:
100
+ from gui import AIApplication
101
+ except ImportError as e:
102
+ logger.warning(f"GUI imports failed: {e}")
103
+ AIApplication = None
104
+
105
+ # Scientific Libraries
106
+ import numpy as np
107
+ import matplotlib.pyplot as plt
108
+ try:
109
+ from scipy.integrate import solve_ivp
110
+ from scipy.fft import fft, fftfreq
111
+ from sklearn.cluster import KMeans
112
+ from sklearn.ensemble import IsolationForest
113
+ except ImportError as e:
114
+ logger.warning(f"Scientific library imports failed: {e}")
115
+
116
+ # Utility Libraries
117
+ import json
118
+ import os
119
+ import asyncio
120
+ import aiohttp
121
+ from datetime import datetime
122
+ from typing import Dict, List, Any, Optional, Union
123
+ from collections import defaultdict, deque
124
+ import hashlib
125
+ import random
126
+ import time
127
+
128
+ class CodetteImportManager:
129
+ """Manages all Codette framework imports and provides utilities"""
130
+
131
+ def __init__(self):
132
+ self.available_modules = self._check_available_modules()
133
+ self._log_import_status()
134
+
135
+ def _check_available_modules(self) -> Dict[str, bool]:
136
+ """Check which modules are available"""
137
+ modules = {
138
+ "ai_core": AICore is not None,
139
+ "codette_classic": Codette is not None,
140
+ "codette_cqure": CodetteCQURE is not None,
141
+ "cognitive_processor": CognitiveProcessor is not None,
142
+ "defense_system": DefenseSystem is not None,
143
+ "health_monitor": HealthMonitor is not None,
144
+ "quantum_systems": 'quantum_harmonic_dynamics' in globals(),
145
+ "fractal_analysis": analyze_identity is not None,
146
+ "component_framework": 'AdaptiveLearningEnvironment' in globals(),
147
+ "bot_framework": BotApp is not None,
148
+ "gui_framework": AIApplication is not None
149
+ }
150
+ return modules
151
+
152
+ def _log_import_status(self):
153
+ """Log the status of all imports"""
154
+ logger.info("Codette Import Status:")
155
+ for module, available in self.available_modules.items():
156
+ status = "✅ Available" if available else "❌ Missing"
157
+ logger.info(f" {module}: {status}")
158
+
159
+ def get_available_systems(self) -> List[str]:
160
+ """Get list of available systems"""
161
+ return [module for module, available in self.available_modules.items() if available]
162
+
163
+ def create_integrated_system(self) -> Optional[Any]:
164
+ """Create an integrated system using available modules"""
165
+ try:
166
+ if self.available_modules["codette_cqure"]:
167
+ return CodetteCQURE(
168
+ perspectives=["Newton", "DaVinci", "Ethical", "Quantum", "Memory"],
169
+ ethical_considerations="Transparency, kindness, and recursive wisdom",
170
+ spiderweb_dim=5,
171
+ memory_path="web_quantum_cocoon.json",
172
+ recursion_depth=3,
173
+ quantum_fluctuation=0.05
174
+ )
175
+ elif self.available_modules["codette_classic"]:
176
+ return Codette("WebUser")
177
+ else:
178
+ logger.warning("No Codette systems available")
179
+ return None
180
+
181
+ except Exception as e:
182
+ logger.error(f"Failed to create integrated system: {e}")
183
+ return None
184
+
185
+ # Create global import manager
186
+ import_manager = CodetteImportManager()
187
+
188
+ # Export key functions and classes for easy access
189
+ __all__ = [
190
+ 'CodetteImportManager',
191
+ 'import_manager',
192
+ 'AICore',
193
+ 'Codette',
194
+ 'CodetteCQURE',
195
+ 'CognitiveProcessor',
196
+ 'DefenseSystem',
197
+ 'HealthMonitor',
198
+ 'analyze_identity',
199
+ 'quantum_harmonic_dynamics',
200
+ 'codette_experiment_task'
201
+ ]
src/config.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Copyright (c) 2025 Codette
3
+ # Licensed under the MIT License.
4
+
5
+ class DefaultConfig:
6
+ """Bot Configuration"""
7
+
8
+ PORT = 7860
9
+ APP_ID = ""
10
+ APP_PASSWORD = ""
11
+
12
+ # HuggingFace settings
13
+ HF_TOKEN = ""
14
+
15
+ # Logging settings
16
+ LOG_LEVEL = "INFO"
17
+
18
+ # Model settings
19
+ DEFAULT_TEMPERATURE = 0.7
20
+ MAX_LENGTH = 1024
21
+ CONTEXT_WINDOW = 2048
22
+
23
+ # Memory settings
24
+ MAX_MEMORY = 50
25
+ MEMORY_CONTEXT_SIZE = 5
26
+
27
+ # Perspective settings
28
+ PERSPECTIVES = ["newton", "davinci", "human_intuition", "quantum_computing"]
src/main.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Codette Web Application Entry Point
4
+ Imports and orchestrates all Codette AI framework modules
5
+ """
6
+
7
+ import asyncio
8
+ import logging
9
+ from pathlib import Path
10
+ import sys
11
+ import os
12
+
13
+ # Add project root to Python path
14
+ project_root = Path(__file__).parent.parent
15
+ sys.path.insert(0, str(project_root))
16
+
17
+ # Core Codette imports
18
+ from ai_core import AICore
19
+ from ai_core_system import AICore as AISystem
20
+ from codette import Codette
21
+ from codette2 import CodetteCQURE
22
+ from cognitive_processor import CognitiveProcessor
23
+ from defense_system import DefenseSystem
24
+ from health_monitor import HealthMonitor
25
+ from config_manager import EnhancedAIConfig
26
+ from fractal import analyze_identity
27
+
28
+ # Quantum and simulation imports
29
+ from quantum_harmonic_framework import quantum_harmonic_dynamics
30
+ from codette_quantum_multicore import codette_experiment_task
31
+ from codette_meta_3d import *
32
+ from codette_timeline_animation import *
33
+
34
+ # Component imports
35
+ from Codette_final.components.adaptive_learning import AdaptiveLearningEnvironment
36
+ from Codette_final.components.ai_driven_creativity import AIDrivenCreativity
37
+ from Codette_final.components.ethical_governance import EthicalAIGovernance
38
+ from Codette_final.components.sentiment_analysis import EnhancedSentimentAnalyzer
39
+ from Codette_final.components.real_time_data import RealTimeDataIntegrator
40
+
41
+ # Authentication and security
42
+ from cognitive_auth import CognitiveAuthManager
43
+
44
+ # Utilities
45
+ import numpy as np
46
+ import matplotlib.pyplot as plt
47
+ from typing import Dict, List, Any, Optional
48
+
49
+ # Configure logging
50
+ logging.basicConfig(
51
+ level=logging.INFO,
52
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
53
+ )
54
+ logger = logging.getLogger(__name__)
55
+
56
+ class CodetteWebApplication:
57
+ """Main Codette Web Application Class"""
58
+
59
+ def __init__(self):
60
+ logger.info("Initializing Codette Web Application...")
61
+
62
+ # Core AI systems
63
+ self.ai_core = None
64
+ self.ai_system = None
65
+ self.codette_classic = None
66
+ self.codette_cqure = None
67
+
68
+ # Processing engines
69
+ self.cognitive_processor = None
70
+ self.defense_system = None
71
+ self.health_monitor = None
72
+
73
+ # Component systems
74
+ self.learning_env = None
75
+ self.creativity_engine = None
76
+ self.ethical_gov = None
77
+ self.sentiment_analyzer = None
78
+ self.data_integrator = None
79
+
80
+ # Authentication
81
+ self.auth_manager = CognitiveAuthManager()
82
+
83
+ # Configuration
84
+ self.config = None
85
+
86
+ self._initialize_systems()
87
+
88
+ def _initialize_systems(self):
89
+ """Initialize all Codette subsystems"""
90
+ try:
91
+ # Load configuration
92
+ self.config = EnhancedAIConfig("config.json")
93
+ logger.info("Configuration loaded successfully")
94
+
95
+ # Initialize core AI systems
96
+ self.ai_core = AICore()
97
+ logger.info("AI Core initialized")
98
+
99
+ # Initialize classic Codette
100
+ self.codette_classic = Codette(user_name="WebUser")
101
+ logger.info("Classic Codette initialized")
102
+
103
+ # Initialize CQURE system
104
+ self.codette_cqure = CodetteCQURE(
105
+ perspectives=["Newton", "DaVinci", "Ethical", "Quantum", "Memory"],
106
+ ethical_considerations="Codette Manifesto: kindness, inclusion, safety, hope.",
107
+ spiderweb_dim=5,
108
+ memory_path="quantum_cocoon.json",
109
+ recursion_depth=4,
110
+ quantum_fluctuation=0.07
111
+ )
112
+ logger.info("Codette CQURE initialized")
113
+
114
+ # Initialize processing engines
115
+ self.cognitive_processor = CognitiveProcessor(["scientific", "creative", "emotional"])
116
+ self.defense_system = DefenseSystem(["evasion", "adaptability", "barrier"])
117
+ self.health_monitor = HealthMonitor()
118
+ logger.info("Processing engines initialized")
119
+
120
+ # Initialize components
121
+ self.learning_env = AdaptiveLearningEnvironment()
122
+ self.creativity_engine = AIDrivenCreativity()
123
+ self.ethical_gov = EthicalAIGovernance()
124
+ self.sentiment_analyzer = EnhancedSentimentAnalyzer()
125
+ self.data_integrator = RealTimeDataIntegrator()
126
+ logger.info("Component systems initialized")
127
+
128
+ except Exception as e:
129
+ logger.error(f"System initialization failed: {e}")
130
+ raise
131
+
132
+ async def process_query(self, query: str, user_id: str = "web_user") -> Dict[str, Any]:
133
+ """Process a query through all Codette systems"""
134
+ try:
135
+ logger.info(f"Processing query: {query}")
136
+
137
+ # Health check first
138
+ health_status = await self.health_monitor.check_status()
139
+
140
+ # Sentiment analysis
141
+ sentiment = self.sentiment_analyzer.analyze(query)
142
+
143
+ # Cognitive processing
144
+ insights = self.cognitive_processor.generate_insights(query)
145
+
146
+ # Classic Codette response
147
+ classic_response = self.codette_classic.respond(query)
148
+
149
+ # CQURE response
150
+ cqure_response = self.codette_cqure.answer(query)
151
+
152
+ # Apply defense filters
153
+ filtered_response = self.defense_system.apply_defenses(cqure_response)
154
+
155
+ # Ethical governance
156
+ ethical_decision = self.ethical_gov.enforce_policies(filtered_response)
157
+
158
+ # Compile comprehensive response
159
+ response = {
160
+ "query": query,
161
+ "insights": insights,
162
+ "classic_response": classic_response,
163
+ "cqure_response": filtered_response,
164
+ "ethical_decision": ethical_decision,
165
+ "sentiment": sentiment,
166
+ "health_status": health_status,
167
+ "timestamp": asyncio.get_event_loop().time()
168
+ }
169
+
170
+ logger.info("Query processed successfully")
171
+ return response
172
+
173
+ except Exception as e:
174
+ logger.error(f"Query processing failed: {e}")
175
+ return {
176
+ "error": f"Processing failed: {str(e)}",
177
+ "query": query,
178
+ "timestamp": asyncio.get_event_loop().time()
179
+ }
180
+
181
+ def run_quantum_simulation(self, cores: int = 4) -> List[str]:
182
+ """Run quantum simulation experiment"""
183
+ try:
184
+ logger.info(f"Running quantum simulation on {cores} cores")
185
+ from multiprocessing import Pool
186
+
187
+ with Pool(cores) as pool:
188
+ jobs = list(range(cores))
189
+ results = pool.map(codette_experiment_task, jobs)
190
+
191
+ logger.info("Quantum simulation completed")
192
+ return results
193
+
194
+ except Exception as e:
195
+ logger.error(f"Quantum simulation failed: {e}")
196
+ return []
197
+
198
+ def analyze_identity_fractal(self, micro_generations: List[Dict],
199
+ informational_states: List[Dict],
200
+ perspectives: List[str]) -> Dict[str, Any]:
201
+ """Perform fractal identity analysis"""
202
+ try:
203
+ quantum_analogies = {"entanglement": True, "limits": "Theoretical reasoning only"}
204
+ philosophical_context = {"continuity": True, "emergent": True}
205
+
206
+ results = analyze_identity(
207
+ micro_generations,
208
+ informational_states,
209
+ perspectives,
210
+ quantum_analogies,
211
+ philosophical_context
212
+ )
213
+
214
+ logger.info("Fractal identity analysis completed")
215
+ return results
216
+
217
+ except Exception as e:
218
+ logger.error(f"Identity analysis failed: {e}")
219
+ return {"error": str(e)}
220
+
221
+ # Global application instance
222
+ app = None
223
+
224
+ def get_app() -> CodetteWebApplication:
225
+ """Get the global application instance"""
226
+ global app
227
+ if app is None:
228
+ app = CodetteWebApplication()
229
+ return app
230
+
231
+ async def main():
232
+ """Main application entry point"""
233
+ try:
234
+ # Initialize application
235
+ application = get_app()
236
+
237
+ # Test query
238
+ test_query = "What is the meaning of consciousness in AI?"
239
+ response = await application.process_query(test_query)
240
+
241
+ print("\n" + "="*50)
242
+ print("CODETTE WEB APPLICATION INITIALIZED")
243
+ print("="*50)
244
+ print(f"Test Query: {test_query}")
245
+ print(f"Response: {response}")
246
+ print("="*50)
247
+
248
+ return application
249
+
250
+ except Exception as e:
251
+ logger.error(f"Application startup failed: {e}")
252
+ raise
253
+
254
+ if __name__ == "__main__":
255
+ # Run the application
256
+ asyncio.run(main())
src/web_interface.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Codette Web Interface
4
+ Flask-based web server for the Codette AI framework
5
+ """
6
+
7
+ from flask import Flask, render_template, request, jsonify, session
8
+ from flask_cors import CORS
9
+ import asyncio
10
+ import json
11
+ import logging
12
+ from datetime import datetime
13
+ from typing import Dict, Any
14
+
15
+ # Import main application
16
+ from main import get_app, CodetteWebApplication
17
+
18
+ # Configure logging
19
+ logging.basicConfig(level=logging.INFO)
20
+ logger = logging.getLogger(__name__)
21
+
22
+ # Create Flask app
23
+ web_app = Flask(__name__)
24
+ web_app.secret_key = "codette_secret_key_2025"
25
+ CORS(web_app)
26
+
27
+ # Global Codette application instance
28
+ codette_app: CodetteWebApplication = None
29
+
30
+ @web_app.before_first_request
31
+ def initialize_codette():
32
+ """Initialize Codette systems before first request"""
33
+ global codette_app
34
+ try:
35
+ codette_app = get_app()
36
+ logger.info("Codette systems initialized for web interface")
37
+ except Exception as e:
38
+ logger.error(f"Failed to initialize Codette: {e}")
39
+
40
+ @web_app.route('/')
41
+ def index():
42
+ """Main dashboard page"""
43
+ return render_template('index.html')
44
+
45
+ @web_app.route('/api/query', methods=['POST'])
46
+ def api_query():
47
+ """API endpoint for processing queries"""
48
+ try:
49
+ data = request.get_json()
50
+ query = data.get('query', '')
51
+ user_id = data.get('user_id', 'web_user')
52
+
53
+ if not query:
54
+ return jsonify({"error": "Query is required"}), 400
55
+
56
+ # Process query asynchronously
57
+ loop = asyncio.new_event_loop()
58
+ asyncio.set_event_loop(loop)
59
+
60
+ try:
61
+ response = loop.run_until_complete(
62
+ codette_app.process_query(query, user_id)
63
+ )
64
+ return jsonify(response)
65
+ finally:
66
+ loop.close()
67
+
68
+ except Exception as e:
69
+ logger.error(f"Query processing error: {e}")
70
+ return jsonify({"error": str(e)}), 500
71
+
72
+ @web_app.route('/api/quantum-simulation', methods=['POST'])
73
+ def api_quantum_simulation():
74
+ """API endpoint for running quantum simulations"""
75
+ try:
76
+ data = request.get_json()
77
+ cores = data.get('cores', 4)
78
+
79
+ if cores > 16: # Safety limit
80
+ cores = 16
81
+
82
+ results = codette_app.run_quantum_simulation(cores)
83
+
84
+ return jsonify({
85
+ "status": "success",
86
+ "cores_used": cores,
87
+ "results": results,
88
+ "timestamp": datetime.now().isoformat()
89
+ })
90
+
91
+ except Exception as e:
92
+ logger.error(f"Quantum simulation error: {e}")
93
+ return jsonify({"error": str(e)}), 500
94
+
95
+ @web_app.route('/api/health', methods=['GET'])
96
+ def api_health():
97
+ """Health check endpoint"""
98
+ try:
99
+ loop = asyncio.new_event_loop()
100
+ asyncio.set_event_loop(loop)
101
+
102
+ try:
103
+ health = loop.run_until_complete(
104
+ codette_app.health_monitor.check_status()
105
+ )
106
+ return jsonify({
107
+ "status": "healthy",
108
+ "metrics": health,
109
+ "timestamp": datetime.now().isoformat()
110
+ })
111
+ finally:
112
+ loop.close()
113
+
114
+ except Exception as e:
115
+ logger.error(f"Health check error: {e}")
116
+ return jsonify({"error": str(e)}), 500
117
+
118
+ @web_app.route('/api/analyze-identity', methods=['POST'])
119
+ def api_analyze_identity():
120
+ """API endpoint for fractal identity analysis"""
121
+ try:
122
+ data = request.get_json()
123
+
124
+ # Default example data if not provided
125
+ micro_generations = data.get('micro_generations', [
126
+ {"update": "Initial state", "timestamp": "2025-01-01T00:00:00Z"},
127
+ {"update": "State change 1", "timestamp": "2025-01-02T00:00:00Z"}
128
+ ])
129
+
130
+ informational_states = data.get('informational_states', [
131
+ {"state_id": "state_1", "data": "Sample data 1"},
132
+ {"state_id": "state_2", "data": "Sample data 2"}
133
+ ])
134
+
135
+ perspectives = data.get('perspectives', ["Quantum", "Classical", "Ethical"])
136
+
137
+ results = codette_app.analyze_identity_fractal(
138
+ micro_generations,
139
+ informational_states,
140
+ perspectives
141
+ )
142
+
143
+ return jsonify(results)
144
+
145
+ except Exception as e:
146
+ logger.error(f"Identity analysis error: {e}")
147
+ return jsonify({"error": str(e)}), 500
148
+
149
+ @web_app.route('/dashboard')
150
+ def dashboard():
151
+ """Codette dashboard page"""
152
+ return render_template('dashboard.html')
153
+
154
+ @web_app.route('/quantum')
155
+ def quantum():
156
+ """Quantum simulation interface"""
157
+ return render_template('quantum.html')
158
+
159
+ @web_app.route('/cognitive')
160
+ def cognitive():
161
+ """Cognitive processing interface"""
162
+ return render_template('cognitive.html')
163
+
164
+ if __name__ == "__main__":
165
+ # Initialize Codette systems
166
+ initialize_codette()
167
+
168
+ # Run the web application
169
+ web_app.run(
170
+ host='0.0.0.0',
171
+ port=5000,
172
+ debug=True
173
+ )