Raiff1982 commited on
Commit
f1e9fc1
·
verified ·
1 Parent(s): a8835d8

Upload 7 files

Browse files
config/environment.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cuda_available": false,
3
+ "gpu_count": 0,
4
+ "gpu_memory": 0,
5
+ "python_version": "3.13.7",
6
+ "torch_version": "2.8.0+cpu"
7
+ }
config/models.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "models": {
3
+ "phi": {
4
+ "path": "microsoft/phi-3-mini-4k-instruct",
5
+ "description": "Efficient model good for general tasks",
6
+ "requirements": {
7
+ "min_ram": "8GB",
8
+ "recommended_gpu": "4GB VRAM"
9
+ }
10
+ },
11
+ "mistral": {
12
+ "path": "mistralai/Mistral-7B-v0.1",
13
+ "description": "Excellent for reasoning and technical content",
14
+ "requirements": {
15
+ "min_ram": "16GB",
16
+ "recommended_gpu": "8GB VRAM"
17
+ }
18
+ },
19
+ "codellama": {
20
+ "path": "codellama/CodeLlama-7b-instruct-hf",
21
+ "description": "Specialized for code understanding and generation",
22
+ "requirements": {
23
+ "min_ram": "16GB",
24
+ "recommended_gpu": "8GB VRAM"
25
+ }
26
+ },
27
+ "llama": {
28
+ "path": "meta-llama/Llama-3.1-8B-instruct",
29
+ "description": "Powerful general-purpose model",
30
+ "requirements": {
31
+ "min_ram": "16GB",
32
+ "recommended_gpu": "12GB VRAM"
33
+ }
34
+ }
35
+ },
36
+ "default_model": "phi",
37
+ "load_in_8bit": true,
38
+ "device_map": "auto",
39
+ "max_memory": null,
40
+ "torch_dtype": "float16",
41
+ "model_auth_required": {
42
+ "meta-llama": true
43
+ }
44
+ }
docs/configuration.md ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Codette Configuration Guide
2
+
3
+ ## Environment Variables
4
+
5
+ - `HUGGINGFACEHUB_API_TOKEN`: HuggingFace API token for sentiment analysis and model access
6
+ - `OPENAI_API_KEY`: Optional OpenAI API key for additional model support
7
+ - `LOG_LEVEL`: Logging level (DEBUG, INFO, WARNING, ERROR)
8
+ - `PORT`: Port number for the web server (default: 7860)
9
+
10
+ ## Model Configuration
11
+
12
+ Codette supports multiple language models in a fallback chain:
13
+
14
+ 1. Mistral-7B-Instruct (Primary)
15
+ - 7B parameter instruction-tuned model
16
+ - Requires 16GB+ VRAM
17
+ - Configuration: 8-bit quantization, fp16
18
+
19
+ 2. Phi-2 (Secondary)
20
+ - Lightweight yet powerful alternative
21
+ - Requires 8GB+ VRAM
22
+ - Configuration: fp16
23
+
24
+ 3. GPT-2 (Fallback)
25
+ - Minimal requirements
26
+ - Always available option
27
+ - Configuration: Standard loading
28
+
29
+ ## Consciousness Parameters
30
+
31
+ ### Memory System
32
+ - `response_memory`: Maintains last 50 responses
33
+ - `memory_context`: Uses last 5 responses for learning
34
+ - `memory_synthesis`: Uses last 2 responses for consciousness
35
+
36
+ ### Quantum States
37
+ - Stored in .cocoon files
38
+ - Format: JSON with quantum_state and chaos_state arrays
39
+ - Used for creative and probabilistic reasoning
40
+
41
+ ### Perspective System
42
+ - Newton: temperature = 0.3 (analytical)
43
+ - Da Vinci: temperature = 0.9 (creative)
44
+ - Human Intuition: temperature = 0.7 (empathetic)
45
+ - Quantum Computing: temperature = 0.8 (probabilistic)
46
+
47
+ ## Response Generation
48
+
49
+ ### Text Generation Parameters
50
+ - Max length: 512 tokens (default)
51
+ - Temperature range: 0.3 - 0.9
52
+ - Top-p: 0.9
53
+ - Context window: 2048 tokens
54
+ - Special token handling for different models
docs/source/configuration.rst ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Configuration
2
+ =============
3
+
4
+ .. _configuration:
5
+
6
+ Environment Variables
7
+ -------------------
8
+
9
+ The following environment variables can be used to configure Codette:
10
+
11
+ - ``HUGGINGFACEHUB_API_TOKEN``: HuggingFace API token
12
+ - ``OPENAI_API_KEY``: Optional OpenAI API key
13
+ - ``LOG_LEVEL``: Logging level
14
+ - ``PORT``: Web server port
15
+
16
+ Model Configuration
17
+ -----------------
18
+
19
+ Codette uses a fallback chain of models:
20
+
21
+ 1. Mistral-7B-Instruct (Primary)
22
+ 2. Phi-2 (Secondary)
23
+ 3. GPT-2 (Fallback)
24
+
25
+ See :class:`src.ai_core.AICore` for implementation details.
26
+
27
+ Consciousness System
28
+ ------------------
29
+
30
+ Memory Management
31
+ ~~~~~~~~~~~~~~~
32
+
33
+ - Response memory: Last 50 responses
34
+ - Memory context: Last 5 responses for learning
35
+ - Memory synthesis: Last 2 responses for consciousness
36
+
37
+ Quantum States
38
+ ~~~~~~~~~~~~
39
+
40
+ Stored in .cocoon files with:
41
+
42
+ - quantum_state arrays
43
+ - chaos_state arrays
44
+ - perspective information
45
+
46
+ See :meth:`src.ai_core.AICore.load_cocoon_data` for details.
docs/source/index.rst ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Codette Universal Reasoning Framework
2
+ ===================================
3
+
4
+ .. toctree::
5
+ :maxdepth: 2
6
+ :caption: Contents:
7
+
8
+ getting_started
9
+ configuration
10
+ api/modules
11
+ development/contributing
12
+
13
+ Core Components
14
+ -------------
15
+
16
+ AICore
17
+ ~~~~~~
18
+
19
+ .. automodule:: src.ai_core
20
+ :members:
21
+ :undoc-members:
22
+ :show-inheritance:
23
+
24
+ Bot Integration
25
+ ~~~~~~~~~~~~~~
26
+
27
+ .. automodule:: src.bot
28
+ :members:
29
+ :undoc-members:
30
+ :show-inheritance:
31
+
32
+ Web Application
33
+ ~~~~~~~~~~~~~~
34
+
35
+ .. automodule:: src.app
36
+ :members:
37
+ :undoc-members:
38
+ :show-inheritance:
39
+
40
+ Development
41
+ ----------
42
+
43
+ * :ref:`contributing`
44
+ * :ref:`testing`
models/fallback/__init__.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Fallback model handler for Codette
3
+ Uses open source models as fallbacks when proprietary models are unavailable
4
+ """
5
+ import os
6
+ from typing import Optional, Dict, Any
7
+ import json
8
+ import logging
9
+ from pathlib import Path
10
+ import numpy as np
11
+
12
+ # Configure logging
13
+ logging.basicConfig(level=logging.INFO)
14
+ logger = logging.getLogger(__name__)
15
+
16
+ class FallbackModelManager:
17
+ def __init__(self):
18
+ self.models_dir = Path(__file__).parent
19
+ self.fallback_dir = self.models_dir / 'fallback'
20
+ self.fallback_dir.mkdir(exist_ok=True)
21
+ self.model_cache: Dict[str, Any] = {}
22
+
23
+ def get_model(self, model_name: str) -> Optional[Any]:
24
+ """Get a model, falling back to open source alternatives if needed."""
25
+ try:
26
+ # Try loading the original model first
27
+ if model_name in self.model_cache:
28
+ return self.model_cache[model_name]
29
+
30
+ original_path = self.models_dir / f"{model_name}.pt"
31
+ if original_path.exists():
32
+ import torch
33
+ model = torch.load(original_path)
34
+ self.model_cache[model_name] = model
35
+ return model
36
+
37
+ # If original fails, try fallback
38
+ return self._load_fallback(model_name)
39
+
40
+ except Exception as e:
41
+ logger.error(f"Error loading model {model_name}: {str(e)}")
42
+ return self._load_fallback(model_name)
43
+
44
+ def _load_fallback(self, model_name: str) -> Optional[Any]:
45
+ """Load an open source fallback model."""
46
+ try:
47
+ if model_name.startswith('nlp_'):
48
+ from transformers import AutoModel, AutoTokenizer
49
+ model_id = "bert-base-uncased" # Default fallback
50
+ model = AutoModel.from_pretrained(model_id)
51
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
52
+ return {'model': model, 'tokenizer': tokenizer}
53
+
54
+ elif model_name.startswith('vision_'):
55
+ import torchvision.models as models
56
+ return models.resnet18(pretrained=True)
57
+
58
+ elif model_name.startswith('quantum_'):
59
+ # For quantum models, use classical approximation
60
+ return self._create_quantum_approximation()
61
+
62
+ else:
63
+ logger.warning(f"No fallback available for {model_name}")
64
+ return None
65
+
66
+ except Exception as e:
67
+ logger.error(f"Error loading fallback for {model_name}: {str(e)}")
68
+ return None
69
+
70
+ def _create_quantum_approximation(self) -> Any:
71
+ """Create a classical approximation of quantum operations."""
72
+ try:
73
+ import torch
74
+ import torch.nn as nn
75
+
76
+ class QuantumApproximator(nn.Module):
77
+ def __init__(self):
78
+ super().__init__()
79
+ self.layers = nn.Sequential(
80
+ nn.Linear(64, 128),
81
+ nn.ReLU(),
82
+ nn.Linear(128, 64)
83
+ )
84
+
85
+ def forward(self, x):
86
+ return self.layers(x)
87
+
88
+ return QuantumApproximator()
89
+
90
+ except Exception as e:
91
+ logger.error(f"Error creating quantum approximation: {str(e)}")
92
+ return None
93
+
94
+ def download_if_needed(self, model_name: str):
95
+ """Download model files if they don't exist."""
96
+ try:
97
+ if not (self.fallback_dir / f"{model_name}.pt").exists():
98
+ if model_name.startswith('nlp_'):
99
+ from transformers import AutoModel, AutoTokenizer
100
+ model_id = "bert-base-uncased"
101
+ AutoModel.from_pretrained(model_id)
102
+ AutoTokenizer.from_pretrained(model_id)
103
+ elif model_name.startswith('vision_'):
104
+ import torchvision.models as models
105
+ models.resnet18(pretrained=True)
106
+ except Exception as e:
107
+ logger.error(f"Error downloading model {model_name}: {str(e)}")
108
+
109
+ # Global instance
110
+ fallback_manager = FallbackModelManager()
models/fallback/model_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_mappings": {
3
+ "nlp_transformer": {
4
+ "fallback": "bert-base-uncased",
5
+ "type": "nlp",
6
+ "task": "general"
7
+ },
8
+ "vision_analyzer": {
9
+ "fallback": "resnet18",
10
+ "type": "vision",
11
+ "task": "classification"
12
+ },
13
+ "quantum_simulator": {
14
+ "fallback": "classical_approximation",
15
+ "type": "quantum",
16
+ "task": "simulation"
17
+ }
18
+ },
19
+ "fallback_priorities": {
20
+ "nlp": [
21
+ "bert-base-uncased",
22
+ "distilbert-base-uncased",
23
+ "roberta-base"
24
+ ],
25
+ "vision": [
26
+ "resnet18",
27
+ "mobilenet_v2",
28
+ "efficientnet_b0"
29
+ ],
30
+ "quantum": [
31
+ "classical_approximation",
32
+ "tensor_network",
33
+ "statistical_model"
34
+ ]
35
+ }
36
+ }