Update src.py
#5
by
Vedant-acharya
- opened
src.py
CHANGED
|
@@ -27,10 +27,13 @@ print(f"Debug - Groq Token Value: {Groq_Token[:10] + '...' if Groq_Token else 'N
|
|
| 27 |
print(f"Debug - Gemini Token: {'Present' if gemini_token else 'Missing'}")
|
| 28 |
|
| 29 |
models = {
|
| 30 |
-
"
|
|
|
|
|
|
|
| 31 |
"llama3.3": "llama-3.3-70b-versatile",
|
| 32 |
-
"
|
| 33 |
-
"
|
|
|
|
| 34 |
"gemini-pro": "gemini-1.5-pro"
|
| 35 |
}
|
| 36 |
|
|
|
|
| 27 |
print(f"Debug - Gemini Token: {'Present' if gemini_token else 'Missing'}")
|
| 28 |
|
| 29 |
models = {
|
| 30 |
+
"gpt-oss-20b": "openai/gpt-oss-20b",
|
| 31 |
+
"gpt-oss-120b": "openai/gpt-oss-120b",
|
| 32 |
+
"llama3.1": "llama-3.1-8b-instant",
|
| 33 |
"llama3.3": "llama-3.3-70b-versatile",
|
| 34 |
+
"deepseek-R1": "deepseek-r1-distill-llama-70b",
|
| 35 |
+
"llama4 maverik":"meta-llama/llama-4-maverick-17b-128e-instruct",
|
| 36 |
+
"llama4 scout":"meta-llama/llama-4-scout-17b-16e-instruct",
|
| 37 |
"gemini-pro": "gemini-1.5-pro"
|
| 38 |
}
|
| 39 |
|