edgellm / backend /config.py
wu981526092's picture
Fix HF Spaces startup - use port 7860 and simplify startup logic
360b135
raw
history blame
1.84 kB
"""
Configuration settings for the Edge LLM API
"""
import os
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# API Configuration
API_KEY = os.getenv("api_key", "")
BASE_URL = os.getenv("base_url", "https://aihubmix.com/v1")
# Available models configuration
AVAILABLE_MODELS = {
# API models (AiHubMix) - Prioritized first
"Qwen/Qwen3-30B-A3B": {
"name": "Qwen3-30B-A3B",
"supports_thinking": True,
"description": "API: Qwen3 with dynamic thinking modes",
"size_gb": "API",
"type": "api"
},
# Local models (for local development)
"Qwen/Qwen3-4B-Thinking-2507": {
"name": "Qwen3-4B-Thinking-2507",
"supports_thinking": True,
"description": "Local: Shows thinking process",
"size_gb": "~8GB",
"type": "local"
},
"Qwen/Qwen3-4B-Instruct-2507": {
"name": "Qwen3-4B-Instruct-2507",
"supports_thinking": False,
"description": "Local: Direct instruction following",
"size_gb": "~8GB",
"type": "local"
},
"qwen2.5-vl-72b-instruct": {
"name": "Qwen2.5-VL-72B-Instruct",
"supports_thinking": False,
"description": "API: Multimodal model with vision",
"size_gb": "API",
"type": "api"
},
"Qwen/QVQ-72B-Preview": {
"name": "QVQ-72B-Preview",
"supports_thinking": True,
"description": "API: Visual reasoning with thinking",
"size_gb": "API",
"type": "api"
}
}
# CORS settings
CORS_ORIGINS = ["*"] # Allow all origins for HF Space
# Static files directory - use static directory for production
FRONTEND_DIST_DIR = "static"
ASSETS_DIR = "static/assets"
# Server settings
HOST = "0.0.0.0"
DEFAULT_PORT = int(os.getenv("PORT", "7860")) # Use HF Spaces default port 7860