Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -27,21 +27,22 @@ def load_model():
|
|
| 27 |
"""Loads the Mistral model and tokenizer and updates the load status."""
|
| 28 |
global tokenizer, model, model_loaded
|
| 29 |
try:
|
| 30 |
-
if tokenizer is None:
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
|
|
|
|
|
|
| 39 |
model_loaded = True
|
| 40 |
return "✅ Model Loaded!"
|
| 41 |
except Exception as e:
|
| 42 |
model_loaded = False
|
| 43 |
return f"❌ Model Load Failed: {str(e)}"
|
| 44 |
-
|
| 45 |
# ---------------------------------------------------------------------------
|
| 46 |
# 2. Toy Evaluation
|
| 47 |
# ---------------------------------------------------------------------------
|
|
|
|
| 27 |
"""Loads the Mistral model and tokenizer and updates the load status."""
|
| 28 |
global tokenizer, model, model_loaded
|
| 29 |
try:
|
| 30 |
+
if model is not None and tokenizer is not None: # Prevent unnecessary reloads
|
| 31 |
+
model_loaded = True
|
| 32 |
+
return "✅ Model Already Loaded!"
|
| 33 |
+
|
| 34 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token)
|
| 35 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 36 |
+
model_name,
|
| 37 |
+
token=hf_token,
|
| 38 |
+
torch_dtype=torch.float16
|
| 39 |
+
)
|
| 40 |
+
model.to('cuda')
|
| 41 |
model_loaded = True
|
| 42 |
return "✅ Model Loaded!"
|
| 43 |
except Exception as e:
|
| 44 |
model_loaded = False
|
| 45 |
return f"❌ Model Load Failed: {str(e)}"
|
|
|
|
| 46 |
# ---------------------------------------------------------------------------
|
| 47 |
# 2. Toy Evaluation
|
| 48 |
# ---------------------------------------------------------------------------
|