Spaces:
Running
Running
refactor: move provider env keys to GlobalConfig and optimize imports
Browse filesMove PROVIDER_ENV_KEYS from app.py to GlobalConfig for better organization and maintainability. Also, optimize imports in llm_helper.py by removing unused imports and relocating the ChatOpenAI import to where it is needed.
- app.py +4 -16
- global_config.py +21 -12
- helpers/llm_helper.py +5 -4
app.py
CHANGED
|
@@ -26,24 +26,11 @@ import helpers.file_manager as filem
|
|
| 26 |
from global_config import GlobalConfig
|
| 27 |
from helpers import llm_helper, pptx_helper, text_helper
|
| 28 |
|
| 29 |
-
|
| 30 |
load_dotenv()
|
| 31 |
|
| 32 |
-
|
| 33 |
RUN_IN_OFFLINE_MODE = os.getenv('RUN_IN_OFFLINE_MODE', 'False').lower() == 'true'
|
| 34 |
|
| 35 |
-
|
| 36 |
-
# --- API Key Environment Variable Mapping ---
|
| 37 |
-
PROVIDER_ENV_KEYS = {
|
| 38 |
-
GlobalConfig.PROVIDER_OPENROUTER: "OPENROUTER_API_KEY",
|
| 39 |
-
GlobalConfig.PROVIDER_COHERE: "COHERE_API_KEY",
|
| 40 |
-
GlobalConfig.PROVIDER_HUGGING_FACE: "HUGGINGFACEHUB_API_TOKEN",
|
| 41 |
-
GlobalConfig.PROVIDER_GOOGLE_GEMINI: "GOOGLE_API_KEY",
|
| 42 |
-
GlobalConfig.PROVIDER_TOGETHER_AI: "TOGETHER_API_KEY",
|
| 43 |
-
GlobalConfig.PROVIDER_AZURE_OPENAI: "AZURE_OPENAI_API_KEY",
|
| 44 |
-
# Add more as needed
|
| 45 |
-
}
|
| 46 |
-
|
| 47 |
|
| 48 |
@st.cache_data
|
| 49 |
def _load_strings() -> dict:
|
|
@@ -196,9 +183,9 @@ with st.sidebar:
|
|
| 196 |
).split(' ')[0]
|
| 197 |
|
| 198 |
# --- Automatically fetch API key from .env if available ---
|
| 199 |
-
provider_match =
|
| 200 |
selected_provider = provider_match.group(1) if provider_match else llm_provider_to_use
|
| 201 |
-
env_key_name = PROVIDER_ENV_KEYS.get(selected_provider)
|
| 202 |
default_api_key = os.getenv(env_key_name, "") if env_key_name else ""
|
| 203 |
|
| 204 |
# Always sync session state to env value if needed (auto-fill on provider change)
|
|
@@ -608,3 +595,4 @@ def main():
|
|
| 608 |
|
| 609 |
if __name__ == '__main__':
|
| 610 |
main()
|
|
|
|
|
|
| 26 |
from global_config import GlobalConfig
|
| 27 |
from helpers import llm_helper, pptx_helper, text_helper
|
| 28 |
|
|
|
|
| 29 |
load_dotenv()
|
| 30 |
|
|
|
|
| 31 |
RUN_IN_OFFLINE_MODE = os.getenv('RUN_IN_OFFLINE_MODE', 'False').lower() == 'true'
|
| 32 |
|
| 33 |
+
PROVIDER_REGEX = re.compile(r'\[(.*?)\]')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
@st.cache_data
|
| 36 |
def _load_strings() -> dict:
|
|
|
|
| 183 |
).split(' ')[0]
|
| 184 |
|
| 185 |
# --- Automatically fetch API key from .env if available ---
|
| 186 |
+
provider_match = PROVIDER_REGEX.match(llm_provider_to_use)
|
| 187 |
selected_provider = provider_match.group(1) if provider_match else llm_provider_to_use
|
| 188 |
+
env_key_name = GlobalConfig.PROVIDER_ENV_KEYS.get(selected_provider)
|
| 189 |
default_api_key = os.getenv(env_key_name, "") if env_key_name else ""
|
| 190 |
|
| 191 |
# Always sync session state to env value if needed (auto-fill on provider change)
|
|
|
|
| 595 |
|
| 596 |
if __name__ == '__main__':
|
| 597 |
main()
|
| 598 |
+
|
global_config.py
CHANGED
|
@@ -20,10 +20,10 @@ class GlobalConfig:
|
|
| 20 |
PROVIDER_COHERE = 'co'
|
| 21 |
PROVIDER_GOOGLE_GEMINI = 'gg'
|
| 22 |
PROVIDER_HUGGING_FACE = 'hf'
|
| 23 |
-
PROVIDER_OLLAMA = 'ol'
|
| 24 |
-
PROVIDER_TOGETHER_AI = 'to'
|
| 25 |
PROVIDER_AZURE_OPENAI = 'az'
|
|
|
|
| 26 |
PROVIDER_OPENROUTER = 'or'
|
|
|
|
| 27 |
VALID_PROVIDERS = {
|
| 28 |
PROVIDER_COHERE,
|
| 29 |
PROVIDER_GOOGLE_GEMINI,
|
|
@@ -33,6 +33,14 @@ class GlobalConfig:
|
|
| 33 |
PROVIDER_AZURE_OPENAI,
|
| 34 |
PROVIDER_OPENROUTER,
|
| 35 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
VALID_MODELS = {
|
| 37 |
'[az]azure/open-ai': {
|
| 38 |
'description': 'faster, detailed',
|
|
@@ -64,6 +72,16 @@ class GlobalConfig:
|
|
| 64 |
'max_new_tokens': 8192,
|
| 65 |
'paid': False,
|
| 66 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
'[to]meta-llama/Llama-3.3-70B-Instruct-Turbo': {
|
| 68 |
'description': 'detailed, slower',
|
| 69 |
'max_new_tokens': 4096,
|
|
@@ -73,15 +91,6 @@ class GlobalConfig:
|
|
| 73 |
'description': 'shorter, faster',
|
| 74 |
'max_new_tokens': 4096,
|
| 75 |
'paid': True,
|
| 76 |
-
},
|
| 77 |
-
'[or]openai/gpt-3.5-turbo': {
|
| 78 |
-
'description': 'OpenAI GPT-3.5 Turbo (via OpenRouter)',
|
| 79 |
-
'max_new_tokens': 2048,
|
| 80 |
-
},
|
| 81 |
-
'[or]openrouter/gpt-4-omni': {
|
| 82 |
-
'description': 'OpenRouter GPT-4 Omni',
|
| 83 |
-
'max_new_tokens': 8192,
|
| 84 |
-
'paid': True,
|
| 85 |
}
|
| 86 |
}
|
| 87 |
LLM_PROVIDER_HELP = (
|
|
@@ -90,8 +99,8 @@ class GlobalConfig:
|
|
| 90 |
'- **[co]**: Cohere\n'
|
| 91 |
'- **[gg]**: Google Gemini API\n'
|
| 92 |
'- **[hf]**: Hugging Face Inference API\n'
|
| 93 |
-
'- **[to]**: Together AI\n'
|
| 94 |
'- **[or]**: OpenRouter\n\n'
|
|
|
|
| 95 |
'[Find out more](https://github.com/barun-saha/slide-deck-ai?tab=readme-ov-file#summary-of-the-llms)'
|
| 96 |
)
|
| 97 |
DEFAULT_MODEL_INDEX = int(os.environ.get('DEFAULT_MODEL_INDEX', '4'))
|
|
|
|
| 20 |
PROVIDER_COHERE = 'co'
|
| 21 |
PROVIDER_GOOGLE_GEMINI = 'gg'
|
| 22 |
PROVIDER_HUGGING_FACE = 'hf'
|
|
|
|
|
|
|
| 23 |
PROVIDER_AZURE_OPENAI = 'az'
|
| 24 |
+
PROVIDER_OLLAMA = 'ol'
|
| 25 |
PROVIDER_OPENROUTER = 'or'
|
| 26 |
+
PROVIDER_TOGETHER_AI = 'to'
|
| 27 |
VALID_PROVIDERS = {
|
| 28 |
PROVIDER_COHERE,
|
| 29 |
PROVIDER_GOOGLE_GEMINI,
|
|
|
|
| 33 |
PROVIDER_AZURE_OPENAI,
|
| 34 |
PROVIDER_OPENROUTER,
|
| 35 |
}
|
| 36 |
+
PROVIDER_ENV_KEYS = {
|
| 37 |
+
PROVIDER_COHERE: "COHERE_API_KEY",
|
| 38 |
+
PROVIDER_GOOGLE_GEMINI: "GOOGLE_API_KEY",
|
| 39 |
+
PROVIDER_HUGGING_FACE: "HUGGINGFACEHUB_API_TOKEN",
|
| 40 |
+
PROVIDER_AZURE_OPENAI: "AZURE_OPENAI_API_KEY",
|
| 41 |
+
PROVIDER_OPENROUTER: "OPENROUTER_API_KEY",
|
| 42 |
+
PROVIDER_TOGETHER_AI: "TOGETHER_API_KEY",
|
| 43 |
+
}
|
| 44 |
VALID_MODELS = {
|
| 45 |
'[az]azure/open-ai': {
|
| 46 |
'description': 'faster, detailed',
|
|
|
|
| 72 |
'max_new_tokens': 8192,
|
| 73 |
'paid': False,
|
| 74 |
},
|
| 75 |
+
'[or]google/gemini-2.0-flash-001': {
|
| 76 |
+
'description': 'Google Gemini-2.0-flash-001 (via OpenRouter)',
|
| 77 |
+
'max_new_tokens': 8192,
|
| 78 |
+
'paid': True,
|
| 79 |
+
},
|
| 80 |
+
'[or]openai/gpt-3.5-turbo': {
|
| 81 |
+
'description': 'OpenAI GPT-3.5 Turbo (via OpenRouter)',
|
| 82 |
+
'max_new_tokens': 4096,
|
| 83 |
+
'paid': True,
|
| 84 |
+
},
|
| 85 |
'[to]meta-llama/Llama-3.3-70B-Instruct-Turbo': {
|
| 86 |
'description': 'detailed, slower',
|
| 87 |
'max_new_tokens': 4096,
|
|
|
|
| 91 |
'description': 'shorter, faster',
|
| 92 |
'max_new_tokens': 4096,
|
| 93 |
'paid': True,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
}
|
| 95 |
}
|
| 96 |
LLM_PROVIDER_HELP = (
|
|
|
|
| 99 |
'- **[co]**: Cohere\n'
|
| 100 |
'- **[gg]**: Google Gemini API\n'
|
| 101 |
'- **[hf]**: Hugging Face Inference API\n'
|
|
|
|
| 102 |
'- **[or]**: OpenRouter\n\n'
|
| 103 |
+
'- **[to]**: Together AI\n'
|
| 104 |
'[Find out more](https://github.com/barun-saha/slide-deck-ai?tab=readme-ov-file#summary-of-the-llms)'
|
| 105 |
)
|
| 106 |
DEFAULT_MODEL_INDEX = int(os.environ.get('DEFAULT_MODEL_INDEX', '4'))
|
helpers/llm_helper.py
CHANGED
|
@@ -11,7 +11,6 @@ import requests
|
|
| 11 |
from requests.adapters import HTTPAdapter
|
| 12 |
from urllib3.util import Retry
|
| 13 |
from langchain_core.language_models import BaseLLM, BaseChatModel
|
| 14 |
-
from langchain_openai import ChatOpenAI
|
| 15 |
import os
|
| 16 |
|
| 17 |
sys.path.append('..')
|
|
@@ -191,10 +190,12 @@ def get_langchain_llm(
|
|
| 191 |
|
| 192 |
if provider == GlobalConfig.PROVIDER_OPENROUTER:
|
| 193 |
# Use langchain-openai's ChatOpenAI for OpenRouter
|
|
|
|
|
|
|
| 194 |
logger.debug('Getting LLM via OpenRouter: %s', model)
|
| 195 |
-
openrouter_api_key = api_key
|
| 196 |
-
base_url =
|
| 197 |
-
|
| 198 |
return ChatOpenAI(
|
| 199 |
base_url=base_url,
|
| 200 |
openai_api_key=openrouter_api_key,
|
|
|
|
| 11 |
from requests.adapters import HTTPAdapter
|
| 12 |
from urllib3.util import Retry
|
| 13 |
from langchain_core.language_models import BaseLLM, BaseChatModel
|
|
|
|
| 14 |
import os
|
| 15 |
|
| 16 |
sys.path.append('..')
|
|
|
|
| 190 |
|
| 191 |
if provider == GlobalConfig.PROVIDER_OPENROUTER:
|
| 192 |
# Use langchain-openai's ChatOpenAI for OpenRouter
|
| 193 |
+
from langchain_openai import ChatOpenAI
|
| 194 |
+
|
| 195 |
logger.debug('Getting LLM via OpenRouter: %s', model)
|
| 196 |
+
openrouter_api_key = api_key
|
| 197 |
+
base_url = 'https://openrouter.ai/api/v1'
|
| 198 |
+
|
| 199 |
return ChatOpenAI(
|
| 200 |
base_url=base_url,
|
| 201 |
openai_api_key=openrouter_api_key,
|