Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -40,8 +40,8 @@ os.environ["TRANSFORMERS_CACHE"] = "/data/models"
|
|
| 40 |
MAX_FILE_SIZE_MB = 20
|
| 41 |
MAX_FILE_SIZE_BYTES = MAX_FILE_SIZE_MB * 1024 * 1024
|
| 42 |
|
| 43 |
-
MODEL_ID = "unsloth/Llama-3.2-3B"#meta-llama/Meta-Llama-3-8B" #"unsloth/Llama-4-Scout-17B-16E-Instruct-GGUF"# unsloth/Qwen2.5-1.5B" #unsloth/Llama-3.2-3B" #unsloth/Llama-3.2-1B"
|
| 44 |
-
|
| 45 |
# Global logging system - CRITICAL FIX #1
|
| 46 |
logs = []
|
| 47 |
|
|
@@ -82,6 +82,7 @@ def initialize_model():
|
|
| 82 |
tokenizer = AutoTokenizer.from_pretrained(
|
| 83 |
MODEL_ID,
|
| 84 |
cache_dir="/data/models",
|
|
|
|
| 85 |
trust_remote_code=True,
|
| 86 |
use_fast=False
|
| 87 |
)
|
|
@@ -96,6 +97,7 @@ def initialize_model():
|
|
| 96 |
torch_dtype=torch.float16,
|
| 97 |
cache_dir="/data/models",
|
| 98 |
trust_remote_code=True,
|
|
|
|
| 99 |
device_map={"": 0}, # <- force GPU:0
|
| 100 |
low_cpu_mem_usage=True
|
| 101 |
)
|
|
|
|
| 40 |
MAX_FILE_SIZE_MB = 20
|
| 41 |
MAX_FILE_SIZE_BYTES = MAX_FILE_SIZE_MB * 1024 * 1024
|
| 42 |
|
| 43 |
+
MODEL_ID = "meta-llama/Meta-Llama-3-8B"# unsloth/Llama-3.2-3B"#meta-llama/Meta-Llama-3-8B" #"unsloth/Llama-4-Scout-17B-16E-Instruct-GGUF"# unsloth/Qwen2.5-1.5B" #unsloth/Llama-3.2-3B" #unsloth/Llama-3.2-1B"
|
| 44 |
+
glotoken=Tokentest
|
| 45 |
# Global logging system - CRITICAL FIX #1
|
| 46 |
logs = []
|
| 47 |
|
|
|
|
| 82 |
tokenizer = AutoTokenizer.from_pretrained(
|
| 83 |
MODEL_ID,
|
| 84 |
cache_dir="/data/models",
|
| 85 |
+
token=glotoken,
|
| 86 |
trust_remote_code=True,
|
| 87 |
use_fast=False
|
| 88 |
)
|
|
|
|
| 97 |
torch_dtype=torch.float16,
|
| 98 |
cache_dir="/data/models",
|
| 99 |
trust_remote_code=True,
|
| 100 |
+
token=glotoken,
|
| 101 |
device_map={"": 0}, # <- force GPU:0
|
| 102 |
low_cpu_mem_usage=True
|
| 103 |
)
|