Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -128,18 +128,6 @@ def load_image_internvl(image, input_size=448, max_num=12):
|
|
| 128 |
return pixel_values
|
| 129 |
|
| 130 |
# --- Model Loading ---
|
| 131 |
-
MODEL_ID_M = "LiquidAI/LFM2-VL-450M"
|
| 132 |
-
processor_m = AutoProcessor.from_pretrained(MODEL_ID_M, trust_remote_code=True)
|
| 133 |
-
model_m = AutoModelForImageTextToText.from_pretrained(
|
| 134 |
-
MODEL_ID_M, trust_remote_code=True, torch_dtype=torch.float16
|
| 135 |
-
).to(device).eval()
|
| 136 |
-
|
| 137 |
-
MODEL_ID_T = "LiquidAI/LFM2-VL-1.6B"
|
| 138 |
-
processor_t = AutoProcessor.from_pretrained(MODEL_ID_T, trust_remote_code=True)
|
| 139 |
-
model_t = AutoModelForImageTextToText.from_pretrained(
|
| 140 |
-
MODEL_ID_T, trust_remote_code=True, torch_dtype=torch.float16
|
| 141 |
-
).to(device).eval()
|
| 142 |
-
|
| 143 |
MODEL_ID_C = "HuggingFaceTB/SmolVLM-Instruct-250M"
|
| 144 |
processor_c = AutoProcessor.from_pretrained(MODEL_ID_C, trust_remote_code=True)
|
| 145 |
model_c = AutoModelForVision2Seq.from_pretrained(
|
|
@@ -161,12 +149,6 @@ model_i = Qwen2VLForConditionalGeneration.from_pretrained(
|
|
| 161 |
MODEL_ID_I, trust_remote_code=True, torch_dtype=torch.float16
|
| 162 |
).to(device).eval()
|
| 163 |
|
| 164 |
-
MODEL_ID_A = "nanonets/Nanonets-OCR-s"
|
| 165 |
-
processor_a = AutoProcessor.from_pretrained(MODEL_ID_A, trust_remote_code=True)
|
| 166 |
-
model_a = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
| 167 |
-
MODEL_ID_A, trust_remote_code=True, torch_dtype=torch.float16
|
| 168 |
-
).to(device).eval()
|
| 169 |
-
|
| 170 |
MODEL_ID_X = "prithivMLmods/Megalodon-OCR-Sync-0713"
|
| 171 |
processor_x = AutoProcessor.from_pretrained(MODEL_ID_X, trust_remote_code=True)
|
| 172 |
model_x = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
|
@@ -365,12 +347,9 @@ def process_document_stream(
|
|
| 365 |
inputs = processor(text=prompt, images=image, return_tensors="pt").to(device, torch.float16)
|
| 366 |
# --- Generic Handling for all other models ---
|
| 367 |
else:
|
| 368 |
-
if model_name == "
|
| 369 |
-
elif model_name == "LFM2-VL-1.6B(fast)": processor, model = processor_t, model_t
|
| 370 |
-
elif model_name == "SmolVLM-Instruct-250M(smol)": processor, model = processor_c, model_c
|
| 371 |
elif model_name == "MonkeyOCR-pro-1.2B(ocr)": processor, model = processor_g, model_g
|
| 372 |
elif model_name == "VLAA-Thinker-Qwen2VL-2B(reason)": processor, model = processor_i, model_i
|
| 373 |
-
elif model_name == "Nanonets-OCR-s(ocr)": processor, model = processor_a, model_a
|
| 374 |
elif model_name == "Megalodon-OCR-Sync-0713(ocr)": processor, model = processor_x, model_x
|
| 375 |
elif model_name == "Qwen2.5-VL-3B-Abliterated-Caption-it(caption)": processor, model = processor_n, model_n
|
| 376 |
elif model_name == "LMM-R1-MGT-PerceReason(reason)": processor, model = processor_f, model_f
|
|
@@ -434,13 +413,13 @@ def create_gradio_interface():
|
|
| 434 |
# Left Column (Inputs)
|
| 435 |
with gr.Column(scale=1):
|
| 436 |
model_choice = gr.Dropdown(
|
| 437 |
-
choices=["
|
| 438 |
"OpenGVLab/InternVL3_5-2B-MPO", "Megalodon-OCR-Sync-0713(ocr)",
|
| 439 |
"VLAA-Thinker-Qwen2VL-2B(reason)", "MonkeyOCR-pro-1.2B(ocr)",
|
| 440 |
-
"Qwen2.5-VL-3B-Abliterated-Caption-it(caption)",
|
| 441 |
"LMM-R1-MGT-PerceReason(reason)", "OCRFlux-3B(ocr)", "TBAC-VLR1-3B(open-r1)",
|
| 442 |
"SmolVLM-500M-Instruct(smol)", "llava-onevision-qwen2-0.5b-ov-hf(mini)"],
|
| 443 |
-
label="Select Model", value= "
|
| 444 |
)
|
| 445 |
|
| 446 |
prompt_input = gr.Textbox(label="Query Input", placeholder="✦︎ Enter the prompt")
|
|
|
|
| 128 |
return pixel_values
|
| 129 |
|
| 130 |
# --- Model Loading ---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
MODEL_ID_C = "HuggingFaceTB/SmolVLM-Instruct-250M"
|
| 132 |
processor_c = AutoProcessor.from_pretrained(MODEL_ID_C, trust_remote_code=True)
|
| 133 |
model_c = AutoModelForVision2Seq.from_pretrained(
|
|
|
|
| 149 |
MODEL_ID_I, trust_remote_code=True, torch_dtype=torch.float16
|
| 150 |
).to(device).eval()
|
| 151 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 152 |
MODEL_ID_X = "prithivMLmods/Megalodon-OCR-Sync-0713"
|
| 153 |
processor_x = AutoProcessor.from_pretrained(MODEL_ID_X, trust_remote_code=True)
|
| 154 |
model_x = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
|
|
|
| 347 |
inputs = processor(text=prompt, images=image, return_tensors="pt").to(device, torch.float16)
|
| 348 |
# --- Generic Handling for all other models ---
|
| 349 |
else:
|
| 350 |
+
if model_name == "SmolVLM-Instruct-250M(smol)": processor, model = processor_c, model_c
|
|
|
|
|
|
|
| 351 |
elif model_name == "MonkeyOCR-pro-1.2B(ocr)": processor, model = processor_g, model_g
|
| 352 |
elif model_name == "VLAA-Thinker-Qwen2VL-2B(reason)": processor, model = processor_i, model_i
|
|
|
|
| 353 |
elif model_name == "Megalodon-OCR-Sync-0713(ocr)": processor, model = processor_x, model_x
|
| 354 |
elif model_name == "Qwen2.5-VL-3B-Abliterated-Caption-it(caption)": processor, model = processor_n, model_n
|
| 355 |
elif model_name == "LMM-R1-MGT-PerceReason(reason)": processor, model = processor_f, model_f
|
|
|
|
| 413 |
# Left Column (Inputs)
|
| 414 |
with gr.Column(scale=1):
|
| 415 |
model_choice = gr.Dropdown(
|
| 416 |
+
choices=["SmolVLM-Instruct-250M(smol)", "Moondream2(vision)",
|
| 417 |
"OpenGVLab/InternVL3_5-2B-MPO", "Megalodon-OCR-Sync-0713(ocr)",
|
| 418 |
"VLAA-Thinker-Qwen2VL-2B(reason)", "MonkeyOCR-pro-1.2B(ocr)",
|
| 419 |
+
"Qwen2.5-VL-3B-Abliterated-Caption-it(caption)",
|
| 420 |
"LMM-R1-MGT-PerceReason(reason)", "OCRFlux-3B(ocr)", "TBAC-VLR1-3B(open-r1)",
|
| 421 |
"SmolVLM-500M-Instruct(smol)", "llava-onevision-qwen2-0.5b-ov-hf(mini)"],
|
| 422 |
+
label="Select Model", value= "Qwen2.5-VL-3B-Abliterated-Caption-it(caption)"
|
| 423 |
)
|
| 424 |
|
| 425 |
prompt_input = gr.Textbox(label="Query Input", placeholder="✦︎ Enter the prompt")
|