|
|
""" |
|
|
Configuration settings for the Edge LLM API |
|
|
""" |
|
|
import os |
|
|
from dotenv import load_dotenv |
|
|
|
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
|
|
|
API_KEY = os.getenv("api_key", "") |
|
|
BASE_URL = os.getenv("base_url", "https://aihubmix.com/v1") |
|
|
|
|
|
|
|
|
AVAILABLE_MODELS = { |
|
|
|
|
|
"Qwen/Qwen3-30B-A3B": { |
|
|
"name": "Qwen3-30B-A3B", |
|
|
"supports_thinking": True, |
|
|
"description": "API: Qwen3 with dynamic thinking modes", |
|
|
"size_gb": "API", |
|
|
"type": "api" |
|
|
}, |
|
|
|
|
|
"Qwen/Qwen3-4B-Thinking-2507": { |
|
|
"name": "Qwen3-4B-Thinking-2507", |
|
|
"supports_thinking": True, |
|
|
"description": "Local: Shows thinking process", |
|
|
"size_gb": "~8GB", |
|
|
"type": "local" |
|
|
}, |
|
|
"Qwen/Qwen3-4B-Instruct-2507": { |
|
|
"name": "Qwen3-4B-Instruct-2507", |
|
|
"supports_thinking": False, |
|
|
"description": "Local: Direct instruction following", |
|
|
"size_gb": "~8GB", |
|
|
"type": "local" |
|
|
}, |
|
|
"qwen2.5-vl-72b-instruct": { |
|
|
"name": "Qwen2.5-VL-72B-Instruct", |
|
|
"supports_thinking": False, |
|
|
"description": "API: Multimodal model with vision", |
|
|
"size_gb": "API", |
|
|
"type": "api" |
|
|
}, |
|
|
"Qwen/QVQ-72B-Preview": { |
|
|
"name": "QVQ-72B-Preview", |
|
|
"supports_thinking": True, |
|
|
"description": "API: Visual reasoning with thinking", |
|
|
"size_gb": "API", |
|
|
"type": "api" |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
CORS_ORIGINS = ["*"] |
|
|
|
|
|
|
|
|
FRONTEND_DIST_DIR = "static" |
|
|
ASSETS_DIR = "static/assets" |
|
|
|
|
|
|
|
|
HOST = "0.0.0.0" |
|
|
DEFAULT_PORT = int(os.getenv("PORT", "7860")) |
|
|
|