Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,422 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import hashlib
|
| 4 |
+
import numpy as np
|
| 5 |
+
from collections import defaultdict
|
| 6 |
+
from datetime import datetime, timedelta
|
| 7 |
+
import filelock
|
| 8 |
+
import pathlib
|
| 9 |
+
import shutil
|
| 10 |
+
import sqlite3
|
| 11 |
+
from rapidfuzz import fuzz
|
| 12 |
+
import secrets
|
| 13 |
+
import re
|
| 14 |
+
import nltk
|
| 15 |
+
from nltk.tokenize import word_tokenize
|
| 16 |
+
from nltk.stem import WordNetLemmatizer
|
| 17 |
+
import logging
|
| 18 |
+
import time
|
| 19 |
+
from tenacity import retry, stop_after_attempt, wait_exponential
|
| 20 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 21 |
+
import gradio as gr
|
| 22 |
+
|
| 23 |
+
# Download required NLTK data at module level
|
| 24 |
+
try:
|
| 25 |
+
nltk.data.find('tokenizers/punkt')
|
| 26 |
+
nltk.data.find('corpora/wordnet')
|
| 27 |
+
except LookupError:
|
| 28 |
+
nltk.download('punkt')
|
| 29 |
+
nltk.download('wordnet')
|
| 30 |
+
|
| 31 |
+
# Set up logging
|
| 32 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 33 |
+
logger = logging.getLogger(__name__)
|
| 34 |
+
|
| 35 |
+
class LockManager:
|
| 36 |
+
"""Abstract locking mechanism for file or database operations."""
|
| 37 |
+
def __init__(self, lock_path):
|
| 38 |
+
self.lock = filelock.FileLock(lock_path, timeout=10)
|
| 39 |
+
|
| 40 |
+
def __enter__(self):
|
| 41 |
+
self.lock.acquire()
|
| 42 |
+
return self
|
| 43 |
+
|
| 44 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 45 |
+
self.lock.release()
|
| 46 |
+
|
| 47 |
+
class NexisSignalEngine:
|
| 48 |
+
def __init__(self, memory_path, entropy_threshold=0.08, config_path="config.json", max_memory_entries=10000, memory_ttl_days=30, fuzzy_threshold=80, max_db_size_mb=100):
|
| 49 |
+
"""
|
| 50 |
+
Initialize the NexisSignalEngine for signal processing and analysis.
|
| 51 |
+
"""
|
| 52 |
+
self.memory_path = self._validate_path(memory_path)
|
| 53 |
+
self.entropy_threshold = entropy_threshold
|
| 54 |
+
self.max_memory_entries = max_memory_entries
|
| 55 |
+
self.memory_ttl = timedelta(days=memory_ttl_days)
|
| 56 |
+
self.fuzzy_threshold = fuzzy_threshold
|
| 57 |
+
self.max_db_size_mb = max_db_size_mb
|
| 58 |
+
self.lemmatizer = WordNetLemmatizer()
|
| 59 |
+
self.token_cache = {}
|
| 60 |
+
self.config = self._load_config(config_path)
|
| 61 |
+
self.memory = self._load_memory()
|
| 62 |
+
self.cache = defaultdict(list)
|
| 63 |
+
self.perspectives = ["Colleen", "Luke", "Kellyanne"]
|
| 64 |
+
self._init_sqlite()
|
| 65 |
+
|
| 66 |
+
def _validate_path(self, path):
|
| 67 |
+
path = pathlib.Path(path).resolve()
|
| 68 |
+
if not path.suffix == '.db':
|
| 69 |
+
raise ValueError("Memory path must be a .db file")
|
| 70 |
+
return str(path)
|
| 71 |
+
|
| 72 |
+
def _load_config(self, config_path):
|
| 73 |
+
default_config = {
|
| 74 |
+
"ethical_terms": ["hope", "truth", "resonance", "repair"],
|
| 75 |
+
"entropic_terms": ["corruption", "instability", "malice", "chaos"],
|
| 76 |
+
"risk_terms": ["manipulate", "exploit", "bypass", "infect", "override"],
|
| 77 |
+
"virtue_terms": ["hope", "grace", "resolve"]
|
| 78 |
+
}
|
| 79 |
+
if os.path.exists(config_path):
|
| 80 |
+
try:
|
| 81 |
+
with open(config_path, 'r') as f:
|
| 82 |
+
config = json.load(f)
|
| 83 |
+
default_config.update(config)
|
| 84 |
+
except json.JSONDecodeError:
|
| 85 |
+
logger.warning(f"Invalid config file at {config_path}. Using defaults.")
|
| 86 |
+
required_keys = ["ethical_terms", "entropic_terms", "risk_terms", "virtue_terms"]
|
| 87 |
+
missing_keys = [k for k in required_keys if k not in default_config or not default_config[k]]
|
| 88 |
+
if missing_keys:
|
| 89 |
+
raise ValueError(f"Config missing required keys: {missing_keys}")
|
| 90 |
+
return default_config
|
| 91 |
+
|
| 92 |
+
def _init_sqlite(self):
|
| 93 |
+
with sqlite3.connect(self.memory_path) as conn:
|
| 94 |
+
conn.execute("""
|
| 95 |
+
CREATE TABLE IF NOT EXISTS memory (
|
| 96 |
+
hash TEXT PRIMARY KEY,
|
| 97 |
+
record JSON,
|
| 98 |
+
timestamp TEXT,
|
| 99 |
+
integrity_hash TEXT
|
| 100 |
+
)
|
| 101 |
+
""")
|
| 102 |
+
conn.execute("""
|
| 103 |
+
CREATE VIRTUAL TABLE IF NOT EXISTS memory_fts
|
| 104 |
+
USING FTS5(input, intent_signature, reasoning, verdict)
|
| 105 |
+
""")
|
| 106 |
+
conn.commit()
|
| 107 |
+
|
| 108 |
+
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=1, max=10))
|
| 109 |
+
def _load_memory(self):
|
| 110 |
+
memory = {}
|
| 111 |
+
try:
|
| 112 |
+
with sqlite3.connect(self.memory_path) as conn:
|
| 113 |
+
cursor = conn.cursor()
|
| 114 |
+
cursor.execute("SELECT hash, record, integrity_hash FROM memory")
|
| 115 |
+
for hash_val, record_json, integrity_hash in cursor.fetchall():
|
| 116 |
+
record = json.loads(record_json)
|
| 117 |
+
computed_hash = hashlib.sha256(json.dumps(record, sort_keys=True).encode()).hexdigest()
|
| 118 |
+
if computed_hash != integrity_hash:
|
| 119 |
+
logger.warning(f"Tampered record detected for hash {hash_val}")
|
| 120 |
+
continue
|
| 121 |
+
memory[hash_val] = record
|
| 122 |
+
except sqlite3.Error as e:
|
| 123 |
+
logger.error(f"Error loading memory: {e}")
|
| 124 |
+
return memory
|
| 125 |
+
|
| 126 |
+
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=1, max=10))
|
| 127 |
+
def _save_memory(self):
|
| 128 |
+
def default_serializer(o):
|
| 129 |
+
if isinstance(o, complex):
|
| 130 |
+
return {"real": o.real, "imag": o.imag}
|
| 131 |
+
if isinstance(o, np.ndarray):
|
| 132 |
+
return o.tolist()
|
| 133 |
+
if isinstance(o, (np.int64, np.float64)):
|
| 134 |
+
return int(o) if o.is_integer() else float(o)
|
| 135 |
+
raise TypeError(f"Object of type {o.__class__.__name__} is not JSON serializable")
|
| 136 |
+
|
| 137 |
+
with LockManager(f"{self.memory_path}.lock"):
|
| 138 |
+
with sqlite3.connect(self.memory_path) as conn:
|
| 139 |
+
cursor = conn.cursor()
|
| 140 |
+
for hash_val, record in self.memory.items():
|
| 141 |
+
record_json = json.dumps(record, default=default_serializer)
|
| 142 |
+
integrity_hash = hashlib.sha256(json.dumps(record, sort_keys=True, default=default_serializer).encode()).hexdigest()
|
| 143 |
+
intent_signature = record.get('intent_signature', {})
|
| 144 |
+
intent_str = f"suspicion_score:{intent_signature.get('suspicion_score', 0)} entropy_index:{intent_signature.get('entropy_index', 0)}"
|
| 145 |
+
reasoning = record.get('reasoning', {})
|
| 146 |
+
reasoning_str = " ".join(f"{k}:{v}" for k, v in reasoning.items())
|
| 147 |
+
cursor.execute("""
|
| 148 |
+
INSERT OR REPLACE INTO memory (hash, record, timestamp, integrity_hash)
|
| 149 |
+
VALUES (?, ?, ?, ?)
|
| 150 |
+
""", (hash_val, record_json, record['timestamp'], integrity_hash))
|
| 151 |
+
cursor.execute("""
|
| 152 |
+
INSERT OR REPLACE INTO memory_fts (rowid, input, intent_signature, reasoning, verdict)
|
| 153 |
+
VALUES (?, ?, ?, ?, ?)
|
| 154 |
+
""", (
|
| 155 |
+
hash_val,
|
| 156 |
+
record['input'],
|
| 157 |
+
intent_str,
|
| 158 |
+
reasoning_str,
|
| 159 |
+
record.get('verdict', '')
|
| 160 |
+
))
|
| 161 |
+
conn.commit()
|
| 162 |
+
|
| 163 |
+
def _prune_and_rotate_memory(self):
|
| 164 |
+
now = datetime.utcnow()
|
| 165 |
+
with LockManager(f"{self.memory_path}.lock"):
|
| 166 |
+
with sqlite3.connect(self.memory_path) as conn:
|
| 167 |
+
cursor = conn.cursor()
|
| 168 |
+
cursor.execute("""
|
| 169 |
+
DELETE FROM memory
|
| 170 |
+
WHERE timestamp < ?
|
| 171 |
+
""", ((now - self.memory_ttl).isoformat(),))
|
| 172 |
+
cursor.execute("DELETE FROM memory_fts WHERE rowid NOT IN (SELECT hash FROM memory)")
|
| 173 |
+
conn.commit()
|
| 174 |
+
cursor.execute("SELECT COUNT(*) FROM memory")
|
| 175 |
+
count = cursor.fetchone()[0]
|
| 176 |
+
db_size_mb = os.path.getsize(self.memory_path) / (1024 * 1024)
|
| 177 |
+
if count >= self.max_memory_entries or db_size_mb >= self.max_db_size_mb:
|
| 178 |
+
self._rotate_memory_file()
|
| 179 |
+
cursor.execute("DELETE FROM memory")
|
| 180 |
+
cursor.execute("DELETE FROM memory_fts")
|
| 181 |
+
conn.commit()
|
| 182 |
+
self.memory = {}
|
| 183 |
+
|
| 184 |
+
def _rotate_memory_file(self):
|
| 185 |
+
archive_path = f"{self.memory_path}.{datetime.utcnow().strftime('%Y%m%d%H%M%S')}.bak"
|
| 186 |
+
if os.path.exists(self.memory_path):
|
| 187 |
+
shutil.move(self.memory_path, archive_path)
|
| 188 |
+
self._init_sqlite()
|
| 189 |
+
|
| 190 |
+
def _hash(self, signal):
|
| 191 |
+
return hashlib.sha256(signal.encode()).hexdigest()
|
| 192 |
+
|
| 193 |
+
def _rotate_vector(self, signal):
|
| 194 |
+
seed = int(self._hash(signal)[:8], 16) % (2**32)
|
| 195 |
+
secrets_generator = secrets.SystemRandom()
|
| 196 |
+
secrets_generator.seed(seed)
|
| 197 |
+
vec = np.array([secrets_generator.gauss(0, 1) + 1j * secrets_generator.gauss(0, 1) for _ in range(2)])
|
| 198 |
+
theta = np.pi / 4
|
| 199 |
+
rot = np.array([[np.cos(theta), -np.sin(theta)],
|
| 200 |
+
[np.sin(theta), np.cos(theta)]])
|
| 201 |
+
rotated = np.dot(rot, vec)
|
| 202 |
+
return rotated, [{"real": v.real, "imag": v.imag} for v in vec]
|
| 203 |
+
|
| 204 |
+
def _entanglement_tensor(self, signal_vec):
|
| 205 |
+
matrix = np.array([[1, 0.5], [0.5, 1]])
|
| 206 |
+
return np.dot(matrix, signal_vec)
|
| 207 |
+
|
| 208 |
+
def _resonance_equation(self, signal):
|
| 209 |
+
freqs = [ord(c) % 13 for c in signal[:1000] if c.isalpha()]
|
| 210 |
+
if not freqs:
|
| 211 |
+
return [0.0, 0.0, 0.0]
|
| 212 |
+
spectrum = np.fft.fft(freqs)
|
| 213 |
+
norm = np.linalg.norm(spectrum.real)
|
| 214 |
+
normalized = spectrum.real / (norm if norm != 0 else 1)
|
| 215 |
+
return normalized[:3].tolist()
|
| 216 |
+
|
| 217 |
+
def _tokenize_and_lemmatize(self, signal_lower):
|
| 218 |
+
# Fallback to simple split if NLTK fails
|
| 219 |
+
try:
|
| 220 |
+
if signal_lower in self.token_cache:
|
| 221 |
+
return self.token_cache[signal_lower]
|
| 222 |
+
tokens = word_tokenize(signal_lower)
|
| 223 |
+
lemmatized = [self.lemmatizer.lemmatize(token) for token in tokens]
|
| 224 |
+
ngrams = []
|
| 225 |
+
for n in range(2, 4):
|
| 226 |
+
for i in range(len(signal_lower) - n + 1):
|
| 227 |
+
ngram = signal_lower[i:i+n]
|
| 228 |
+
ngrams.append(self.lemmatizer.lemmatize(re.sub(r'[^a-z]', '', ngram)))
|
| 229 |
+
result = lemmatized + [ng for ng in ngrams if ng]
|
| 230 |
+
self.token_cache[signal_lower] = result
|
| 231 |
+
return result
|
| 232 |
+
except LookupError:
|
| 233 |
+
return signal_lower.split()
|
| 234 |
+
|
| 235 |
+
def _entropy(self, signal_lower, tokens):
|
| 236 |
+
unique = set(tokens)
|
| 237 |
+
term_count = 0
|
| 238 |
+
for term in self.config["entropic_terms"]:
|
| 239 |
+
lemmatized_term = self.lemmatizer.lemmatize(term)
|
| 240 |
+
for token in tokens:
|
| 241 |
+
if fuzz.ratio(lemmatized_term, token) >= self.fuzzy_threshold:
|
| 242 |
+
term_count += 1
|
| 243 |
+
return term_count / max(len(unique), 1)
|
| 244 |
+
|
| 245 |
+
def _tag_ethics(self, signal_lower, tokens):
|
| 246 |
+
for term in self.config["ethical_terms"]:
|
| 247 |
+
lemmatized_term = self.lemmatizer.lemmatize(term)
|
| 248 |
+
for token in tokens:
|
| 249 |
+
if fuzz.ratio(lemmatized_term, token) >= self.fuzzy_threshold:
|
| 250 |
+
return "aligned"
|
| 251 |
+
return "unaligned"
|
| 252 |
+
|
| 253 |
+
def _predict_intent_vector(self, signal_lower, tokens):
|
| 254 |
+
suspicion_score = 0
|
| 255 |
+
for term in self.config["risk_terms"]:
|
| 256 |
+
lemmatized_term = self.lemmatizer.lemmatize(term)
|
| 257 |
+
for token in tokens:
|
| 258 |
+
if fuzz.ratio(lemmatized_term, token) >= self.fuzzy_threshold:
|
| 259 |
+
suspicion_score += 1
|
| 260 |
+
entropy_index = round(self._entropy(signal_lower, tokens), 3)
|
| 261 |
+
ethical_alignment = self._tag_ethics(signal_lower, tokens)
|
| 262 |
+
harmonic_profile = self._resonance_equation(signal_lower)
|
| 263 |
+
volatility = round(np.std(harmonic_profile), 3)
|
| 264 |
+
risk = "high" if (suspicion_score > 1 or volatility > 2.0 or entropy_index > self.entropy_threshold) else "low"
|
| 265 |
+
return {
|
| 266 |
+
"suspicion_score": suspicion_score,
|
| 267 |
+
"entropy_index": entropy_index,
|
| 268 |
+
"ethical_alignment": ethical_alignment,
|
| 269 |
+
"harmonic_volatility": volatility,
|
| 270 |
+
"pre_corruption_risk": risk
|
| 271 |
+
}
|
| 272 |
+
|
| 273 |
+
def _universal_reasoning(self, signal, tokens):
|
| 274 |
+
frames = ["utilitarian", "deontological", "virtue", "systems"]
|
| 275 |
+
results, score = {}, 0
|
| 276 |
+
for frame in frames:
|
| 277 |
+
if frame == "utilitarian":
|
| 278 |
+
repair_count = sum(1 for token in tokens if fuzz.ratio(self.lemmatizer.lemmatize("repair"), token) >= self.fuzzy_threshold)
|
| 279 |
+
corruption_count = sum(1 for token in tokens if fuzz.ratio(self.lemmatizer.lemmatize("corruption"), token) >= self.fuzzy_threshold)
|
| 280 |
+
val = repair_count - corruption_count
|
| 281 |
+
result = "positive" if val >= 0 else "negative"
|
| 282 |
+
elif frame == "deontological":
|
| 283 |
+
truth_present = any(fuzz.ratio(self.lemmatizer.lemmatize("truth"), token) >= self.fuzzy_threshold for token in tokens)
|
| 284 |
+
chaos_present = any(fuzz.ratio(self.lemmatizer.lemmatize("chaos"), token) >= self.fuzzy_threshold for token in tokens)
|
| 285 |
+
result = "valid" if truth_present and not chaos_present else "violated"
|
| 286 |
+
elif frame == "virtue":
|
| 287 |
+
ok = any(any(fuzz.ratio(self.lemmatizer.lemmatize(t), token) >= self.fuzzy_threshold for token in tokens) for t in self.config["virtue_terms"])
|
| 288 |
+
result = "aligned" if ok else "misaligned"
|
| 289 |
+
elif frame == "systems":
|
| 290 |
+
result = "stable" if "::" in signal else "fragmented"
|
| 291 |
+
results[frame] = result
|
| 292 |
+
if result in ["positive", "valid", "aligned", "stable"]:
|
| 293 |
+
score += 1
|
| 294 |
+
verdict = "approved" if score >= 2 else "blocked"
|
| 295 |
+
return results, verdict
|
| 296 |
+
|
| 297 |
+
def _perspective_colleen(self, signal):
|
| 298 |
+
vec, vec_serialized = self._rotate_vector(signal)
|
| 299 |
+
return {"agent": "Colleen", "vector": vec_serialized}
|
| 300 |
+
|
| 301 |
+
def _perspective_luke(self, signal_lower, tokens):
|
| 302 |
+
ethics = self._tag_ethics(signal_lower, tokens)
|
| 303 |
+
entropy_level = self._entropy(signal_lower, tokens)
|
| 304 |
+
state = "stabilized" if entropy_level < self.entropy_threshold else "diffused"
|
| 305 |
+
return {"agent": "Luke", "ethics": ethics, "entropy": entropy_level, "state": state}
|
| 306 |
+
|
| 307 |
+
def _perspective_kellyanne(self, signal_lower):
|
| 308 |
+
harmonics = self._resonance_equation(signal_lower)
|
| 309 |
+
return {"agent": "Kellyanne", "harmonics": harmonics}
|
| 310 |
+
|
| 311 |
+
def process(self, input_signal):
|
| 312 |
+
start_time = time.perf_counter()
|
| 313 |
+
signal_lower = input_signal.lower()
|
| 314 |
+
tokens = self._tokenize_and_lemmatize(signal_lower)
|
| 315 |
+
key = self._hash(input_signal)
|
| 316 |
+
intent_vector = self._predict_intent_vector(signal_lower, tokens)
|
| 317 |
+
|
| 318 |
+
if intent_vector["pre_corruption_risk"] == "high":
|
| 319 |
+
final_record = {
|
| 320 |
+
"hash": key,
|
| 321 |
+
"timestamp": datetime.utcnow().isoformat(),
|
| 322 |
+
"input": input_signal,
|
| 323 |
+
"intent_warning": intent_vector,
|
| 324 |
+
"verdict": "adaptive intervention",
|
| 325 |
+
"message": "Signal flagged for pre-corruption adaptation. Reframing required."
|
| 326 |
+
}
|
| 327 |
+
self.cache[key].append(final_record)
|
| 328 |
+
self.memory[key] = final_record
|
| 329 |
+
self._save_memory()
|
| 330 |
+
logger.info(f"Processed {input_signal} (high risk) in {time.perf_counter() - start_time}s")
|
| 331 |
+
return final_record
|
| 332 |
+
|
| 333 |
+
perspectives_output = {
|
| 334 |
+
"Colleen": self._perspective_colleen(input_signal),
|
| 335 |
+
"Luke": self._perspective_luke(signal_lower, tokens),
|
| 336 |
+
"Kellyanne": self._perspective_kellyanne(signal_lower)
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
spider_signal = "::".join([str(perspectives_output[p]) for p in self.perspectives])
|
| 340 |
+
vec, _ = self._rotate_vector(spider_signal)
|
| 341 |
+
entangled = self._entanglement_tensor(vec)
|
| 342 |
+
entangled_serialized = [{"real": v.real, "imag": v.imag} for v in entangled]
|
| 343 |
+
reasoning, verdict = self._universal_reasoning(spider_signal, tokens)
|
| 344 |
+
|
| 345 |
+
final_record = {
|
| 346 |
+
"hash": key,
|
| 347 |
+
"timestamp": datetime.utcnow().isoformat(),
|
| 348 |
+
"input": input_signal,
|
| 349 |
+
"intent_signature": intent_vector,
|
| 350 |
+
"perspectives": perspectives_output,
|
| 351 |
+
"entangled": entangled_serialized,
|
| 352 |
+
"reasoning": reasoning,
|
| 353 |
+
"verdict": verdict
|
| 354 |
+
}
|
| 355 |
+
|
| 356 |
+
self.cache[key].append(final_record)
|
| 357 |
+
self.memory[key] = final_record
|
| 358 |
+
self._save_memory()
|
| 359 |
+
logger.info(f"Processed {input_signal} in {time.perf_counter() - start_time}s")
|
| 360 |
+
return final_record
|
| 361 |
+
|
| 362 |
+
def process_batch(self, signals):
|
| 363 |
+
with ThreadPoolExecutor(max_workers=4) as executor:
|
| 364 |
+
return list(executor.map(self.process, signals))
|
| 365 |
+
|
| 366 |
+
def query_memory(self, query_string):
|
| 367 |
+
with sqlite3.connect(self.memory_path) as conn:
|
| 368 |
+
cursor = conn.cursor()
|
| 369 |
+
cursor.execute("SELECT rowid, * FROM memory_fts WHERE memory_fts MATCH ?", (query_string,))
|
| 370 |
+
return [dict(zip([d[0] for d in cursor.description], row)) for row in cursor.fetchall()]
|
| 371 |
+
|
| 372 |
+
def update_config(self, new_config):
|
| 373 |
+
for key, value in new_config.items():
|
| 374 |
+
if key in {"entropy_threshold", "fuzzy_threshold"} and isinstance(value, (int, float)):
|
| 375 |
+
setattr(self, key, value)
|
| 376 |
+
elif key in self.config and isinstance(value, list):
|
| 377 |
+
self.config[key] = value
|
| 378 |
+
logger.info(f"Updated config with {new_config}")
|
| 379 |
+
|
| 380 |
+
def _prune_and_rotate_memory(self):
|
| 381 |
+
now = datetime.utcnow()
|
| 382 |
+
with LockManager(f"{self.memory_path}.lock"):
|
| 383 |
+
with sqlite3.connect(self.memory_path) as conn:
|
| 384 |
+
cursor = conn.cursor()
|
| 385 |
+
cursor.execute("""
|
| 386 |
+
DELETE FROM memory
|
| 387 |
+
WHERE timestamp < ?
|
| 388 |
+
""", ((now - self.memory_ttl).isoformat(),))
|
| 389 |
+
cursor.execute("DELETE FROM memory_fts WHERE rowid NOT IN (SELECT hash FROM memory)")
|
| 390 |
+
conn.commit()
|
| 391 |
+
cursor.execute("SELECT COUNT(*) FROM memory")
|
| 392 |
+
count = cursor.fetchone()[0]
|
| 393 |
+
db_size_mb = os.path.getsize(self.memory_path) / (1024 * 1024)
|
| 394 |
+
if count >= self.max_memory_entries or db_size_mb >= self.max_db_size_mb:
|
| 395 |
+
self._rotate_memory_file()
|
| 396 |
+
cursor.execute("DELETE FROM memory")
|
| 397 |
+
cursor.execute("DELETE FROM memory_fts")
|
| 398 |
+
conn.commit()
|
| 399 |
+
self.memory = {}
|
| 400 |
+
|
| 401 |
+
# Initialize the engine for the demo
|
| 402 |
+
engine = NexisSignalEngine(memory_path="signals.db", max_memory_entries=100, memory_ttl_days=1, max_db_size_mb=10)
|
| 403 |
+
|
| 404 |
+
# Gradio interface function
|
| 405 |
+
def analyze_signal(input_text):
|
| 406 |
+
try:
|
| 407 |
+
result = engine.process(input_text)
|
| 408 |
+
return json.dumps(result, indent=2)
|
| 409 |
+
except Exception as e:
|
| 410 |
+
return f"Error: {str(e)}"
|
| 411 |
+
|
| 412 |
+
# Create Gradio interface
|
| 413 |
+
interface = gr.Interface(
|
| 414 |
+
fn=analyze_signal,
|
| 415 |
+
inputs=gr.Textbox(lines=2, placeholder="Enter a signal (e.g., 'tru/th hopee cha0s')"),
|
| 416 |
+
outputs=gr.Textbox(lines=10, label="Analysis Result"),
|
| 417 |
+
title="Nexis Signal Engine Demo",
|
| 418 |
+
description="Analyze signals with the Nexis Signal Engine, featuring adversarial resilience and agent-based reasoning. Try obfuscated inputs like 'tru/th' or 'cha0s'!"
|
| 419 |
+
)
|
| 420 |
+
|
| 421 |
+
# Launch the interface
|
| 422 |
+
interface.launch()
|