Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -23,24 +23,31 @@ logger = logging.getLogger(__name__)
|
|
| 23 |
def load_qa_model():
|
| 24 |
"""Load question-answering model with long context support."""
|
| 25 |
try:
|
| 26 |
-
from transformers import
|
| 27 |
|
| 28 |
model_id = "hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4"
|
| 29 |
|
| 30 |
# Load tokenizer
|
| 31 |
tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=os.getenv("HF_TOKEN"))
|
| 32 |
-
tokenizer.model_max_length = 8192 # Configure tokenizer for long inputs
|
| 33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
# Load the model with simplified rope_scaling configuration
|
| 35 |
model = AutoModelForCausalLM.from_pretrained(
|
| 36 |
model_id,
|
| 37 |
torch_dtype=torch.bfloat16,
|
|
|
|
| 38 |
device_map="auto",
|
| 39 |
rope_scaling={
|
| 40 |
"type": "dynamic", # Simplified type as expected by the model
|
| 41 |
"factor": 8.0 # Scaling factor to support longer contexts
|
| 42 |
},
|
| 43 |
-
use_auth_token=os.getenv("HF_TOKEN")
|
|
|
|
| 44 |
)
|
| 45 |
|
| 46 |
# Initialize the pipeline
|
|
|
|
| 23 |
def load_qa_model():
|
| 24 |
"""Load question-answering model with long context support."""
|
| 25 |
try:
|
| 26 |
+
from transformers import AutoModelForCausalLM, AwqConfig
|
| 27 |
|
| 28 |
model_id = "hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4"
|
| 29 |
|
| 30 |
# Load tokenizer
|
| 31 |
tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=os.getenv("HF_TOKEN"))
|
|
|
|
| 32 |
|
| 33 |
+
quantization_config = AwqConfig(
|
| 34 |
+
bits=4,
|
| 35 |
+
fuse_max_seq_len=8192, # Configure tokenizer for long inputs
|
| 36 |
+
do_fuse=True,
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
# Load the model with simplified rope_scaling configuration
|
| 40 |
model = AutoModelForCausalLM.from_pretrained(
|
| 41 |
model_id,
|
| 42 |
torch_dtype=torch.bfloat16,
|
| 43 |
+
low_cpu_mem_usage=True,
|
| 44 |
device_map="auto",
|
| 45 |
rope_scaling={
|
| 46 |
"type": "dynamic", # Simplified type as expected by the model
|
| 47 |
"factor": 8.0 # Scaling factor to support longer contexts
|
| 48 |
},
|
| 49 |
+
use_auth_token=os.getenv("HF_TOKEN"),
|
| 50 |
+
quantization_config=quantization_config
|
| 51 |
)
|
| 52 |
|
| 53 |
# Initialize the pipeline
|