File size: 1,836 Bytes
d8e039b 6a50e97 d8e039b 6a50e97 d8e039b 6a50e97 d8e039b 6a50e97 d8e039b 6a50e97 d8e039b cd80efe d8e039b 360b135 d8e039b 360b135 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 |
"""
Configuration settings for the Edge LLM API
"""
import os
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# API Configuration
API_KEY = os.getenv("api_key", "")
BASE_URL = os.getenv("base_url", "https://aihubmix.com/v1")
# Available models configuration
AVAILABLE_MODELS = {
# API models (AiHubMix) - Prioritized first
"Qwen/Qwen3-30B-A3B": {
"name": "Qwen3-30B-A3B",
"supports_thinking": True,
"description": "API: Qwen3 with dynamic thinking modes",
"size_gb": "API",
"type": "api"
},
# Local models (for local development)
"Qwen/Qwen3-4B-Thinking-2507": {
"name": "Qwen3-4B-Thinking-2507",
"supports_thinking": True,
"description": "Local: Shows thinking process",
"size_gb": "~8GB",
"type": "local"
},
"Qwen/Qwen3-4B-Instruct-2507": {
"name": "Qwen3-4B-Instruct-2507",
"supports_thinking": False,
"description": "Local: Direct instruction following",
"size_gb": "~8GB",
"type": "local"
},
"qwen2.5-vl-72b-instruct": {
"name": "Qwen2.5-VL-72B-Instruct",
"supports_thinking": False,
"description": "API: Multimodal model with vision",
"size_gb": "API",
"type": "api"
},
"Qwen/QVQ-72B-Preview": {
"name": "QVQ-72B-Preview",
"supports_thinking": True,
"description": "API: Visual reasoning with thinking",
"size_gb": "API",
"type": "api"
}
}
# CORS settings
CORS_ORIGINS = ["*"] # Allow all origins for HF Space
# Static files directory - use static directory for production
FRONTEND_DIST_DIR = "static"
ASSETS_DIR = "static/assets"
# Server settings
HOST = "0.0.0.0"
DEFAULT_PORT = int(os.getenv("PORT", "7860")) # Use HF Spaces default port 7860
|