Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -12,16 +12,15 @@ import gradio as gr
|
|
| 12 |
from fastapi.responses import RedirectResponse
|
| 13 |
import numpy as np
|
| 14 |
import easyocr
|
| 15 |
-
|
| 16 |
# Initialize FastAPI
|
| 17 |
app = FastAPI()
|
| 18 |
|
| 19 |
-
|
| 20 |
-
model_name = "TinyLlama/TinyLlama-1.1B
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
model = AutoModelForCausalLM.from_pretrained(model_name, token=hf_token)
|
| 25 |
|
| 26 |
doc_qa_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1)
|
| 27 |
|
|
|
|
| 12 |
from fastapi.responses import RedirectResponse
|
| 13 |
import numpy as np
|
| 14 |
import easyocr
|
| 15 |
+
|
| 16 |
# Initialize FastAPI
|
| 17 |
app = FastAPI()
|
| 18 |
|
| 19 |
+
# Load AI Model for Question Answering on Documents (Mistral-7B)
|
| 20 |
+
model_name = "TinyLlama/TinyLlama-1.1B"
|
| 21 |
+
print(f"π Loading model: {model_name}...")
|
| 22 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 23 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
|
|
|
| 24 |
|
| 25 |
doc_qa_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1)
|
| 26 |
|