Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -45,11 +45,11 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
|
| 45 |
|
| 46 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 47 |
|
| 48 |
-
# Load
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
trust_remote_code=True,
|
| 54 |
torch_dtype=torch.float16
|
| 55 |
).to(device).eval()
|
|
@@ -63,15 +63,6 @@ model_x = Qwen2VLForConditionalGeneration.from_pretrained(
|
|
| 63 |
torch_dtype=torch.float16
|
| 64 |
).to(device).eval()
|
| 65 |
|
| 66 |
-
# Load Nanonets-OCR-s
|
| 67 |
-
MODEL_ID_V = "nanonets/Nanonets-OCR-s"
|
| 68 |
-
processor_v = AutoProcessor.from_pretrained(MODEL_ID_V, trust_remote_code=True)
|
| 69 |
-
model_v = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
| 70 |
-
MODEL_ID_V,
|
| 71 |
-
trust_remote_code=True,
|
| 72 |
-
torch_dtype=torch.float16
|
| 73 |
-
).to(device).eval()
|
| 74 |
-
|
| 75 |
# Load Aya-Vision-8b
|
| 76 |
MODEL_ID_A = "CohereForAI/aya-vision-8b"
|
| 77 |
processor_a = AutoProcessor.from_pretrained(MODEL_ID_A, trust_remote_code=True)
|
|
@@ -81,17 +72,23 @@ model_a = AutoModelForImageTextToText.from_pretrained(
|
|
| 81 |
torch_dtype=torch.float16
|
| 82 |
).to(device).eval()
|
| 83 |
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
MODEL_ID_W =
|
| 87 |
-
SUBFOLDER = "Recognition"
|
| 88 |
-
processor_w = AutoProcessor.from_pretrained(MODEL_ID_W, trust_remote_code=True, subfolder=SUBFOLDER)
|
| 89 |
model_w = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
| 90 |
-
MODEL_ID_W,
|
| 91 |
-
|
| 92 |
-
torch_dtype=torch.float16
|
| 93 |
-
|
| 94 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
|
| 96 |
def downsample_video(video_path):
|
| 97 |
"""
|
|
@@ -128,7 +125,7 @@ def generate_image(model_name: str, text: str, image: Image.Image,
|
|
| 128 |
if model_name == "RolmOCR-7B":
|
| 129 |
processor = processor_m
|
| 130 |
model = model_m
|
| 131 |
-
elif model_name == "Qwen2-VL-OCR":
|
| 132 |
processor = processor_x
|
| 133 |
model = model_x
|
| 134 |
elif model_name == "Nanonets-OCR-s":
|
|
@@ -137,7 +134,7 @@ def generate_image(model_name: str, text: str, image: Image.Image,
|
|
| 137 |
elif model_name == "Aya-Vision-8B":
|
| 138 |
processor = processor_a
|
| 139 |
model = model_a
|
| 140 |
-
elif model_name == "
|
| 141 |
processor = processor_w
|
| 142 |
model = model_w
|
| 143 |
else:
|
|
@@ -189,7 +186,7 @@ def generate_video(model_name: str, text: str, video_path: str,
|
|
| 189 |
if model_name == "RolmOCR-7B":
|
| 190 |
processor = processor_m
|
| 191 |
model = model_m
|
| 192 |
-
elif model_name == "Qwen2-VL-OCR":
|
| 193 |
processor = processor_x
|
| 194 |
model = model_x
|
| 195 |
elif model_name == "Nanonets-OCR-s":
|
|
@@ -198,7 +195,7 @@ def generate_video(model_name: str, text: str, video_path: str,
|
|
| 198 |
elif model_name == "Aya-Vision-8B":
|
| 199 |
processor = processor_a
|
| 200 |
model = model_a
|
| 201 |
-
elif model_name == "
|
| 202 |
processor = processor_w
|
| 203 |
model = model_w
|
| 204 |
else:
|
|
@@ -312,17 +309,16 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
|
| 312 |
with gr.Accordion("(Result.md)", open=False):
|
| 313 |
markdown_output = gr.Markdown(label="Formatted Result (Result.Md)")
|
| 314 |
model_choice = gr.Radio(
|
| 315 |
-
choices=["Nanonets-OCR-s", "
|
| 316 |
label="Select Model",
|
| 317 |
value="Nanonets-OCR-s"
|
| 318 |
)
|
| 319 |
gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-OCR/discussions)")
|
| 320 |
gr.Markdown("> [Nanonets-OCR-s](https://huggingface.co/nanonets/Nanonets-OCR-s): nanonets-ocr-s is a powerful, state-of-the-art image-to-markdown ocr model that goes far beyond traditional text extraction. it transforms documents into structured markdown with intelligent content recognition and semantic tagging.")
|
| 321 |
-
gr.Markdown("> [
|
| 322 |
-
gr.Markdown("> [Qwen2-VL-OCR-2B
|
| 323 |
gr.Markdown("> [RolmOCR](https://huggingface.co/reducto/RolmOCR): rolmocr, high-quality, openly available approach to parsing pdfs and other complex documents optical character recognition. it is designed to handle a wide range of document types, including scanned documents, handwritten text, and complex layouts.")
|
| 324 |
gr.Markdown("> [Aya-Vision](https://huggingface.co/CohereLabs/aya-vision-8b): cohere labs aya vision 8b is an open weights research release of an 8-billion parameter model with advanced capabilities optimized for a variety of vision-language use cases, including ocr, captioning, visual reasoning, summarization, question answering, code, and more.")
|
| 325 |
-
|
| 326 |
gr.Markdown(">⚠️note: all the models in space are not guaranteed to perform well in video inference use cases.")
|
| 327 |
|
| 328 |
image_submit.click(
|
|
|
|
| 45 |
|
| 46 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 47 |
|
| 48 |
+
# Load Nanonets-OCR-s
|
| 49 |
+
MODEL_ID_V = "nanonets/Nanonets-OCR-s"
|
| 50 |
+
processor_v = AutoProcessor.from_pretrained(MODEL_ID_V, trust_remote_code=True)
|
| 51 |
+
model_v = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
| 52 |
+
MODEL_ID_V,
|
| 53 |
trust_remote_code=True,
|
| 54 |
torch_dtype=torch.float16
|
| 55 |
).to(device).eval()
|
|
|
|
| 63 |
torch_dtype=torch.float16
|
| 64 |
).to(device).eval()
|
| 65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
# Load Aya-Vision-8b
|
| 67 |
MODEL_ID_A = "CohereForAI/aya-vision-8b"
|
| 68 |
processor_a = AutoProcessor.from_pretrained(MODEL_ID_A, trust_remote_code=True)
|
|
|
|
| 72 |
torch_dtype=torch.float16
|
| 73 |
).to(device).eval()
|
| 74 |
|
| 75 |
+
# Load AIGVE-MACS
|
| 76 |
+
MODEL_ID_W = "xiaoliux/AIGVE-MACS"
|
| 77 |
+
processor_w = AutoProcessor.from_pretrained(MODEL_ID_W, trust_remote_code=True)
|
|
|
|
|
|
|
| 78 |
model_w = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
| 79 |
+
MODEL_ID_W,
|
| 80 |
+
trust_remote_code=True,
|
| 81 |
+
torch_dtype=torch.float16
|
| 82 |
+
).to(device).eval()
|
| 83 |
|
| 84 |
+
# Load RolmOCR
|
| 85 |
+
MODEL_ID_M = "reducto/RolmOCR"
|
| 86 |
+
processor_m = AutoProcessor.from_pretrained(MODEL_ID_M, trust_remote_code=True)
|
| 87 |
+
model_m = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
| 88 |
+
MODEL_ID_M,
|
| 89 |
+
trust_remote_code=True,
|
| 90 |
+
torch_dtype=torch.float16
|
| 91 |
+
).to(device).eval()
|
| 92 |
|
| 93 |
def downsample_video(video_path):
|
| 94 |
"""
|
|
|
|
| 125 |
if model_name == "RolmOCR-7B":
|
| 126 |
processor = processor_m
|
| 127 |
model = model_m
|
| 128 |
+
elif model_name == "Qwen2-VL-OCR-2B":
|
| 129 |
processor = processor_x
|
| 130 |
model = model_x
|
| 131 |
elif model_name == "Nanonets-OCR-s":
|
|
|
|
| 134 |
elif model_name == "Aya-Vision-8B":
|
| 135 |
processor = processor_a
|
| 136 |
model = model_a
|
| 137 |
+
elif model_name == "AIGVE-MACS-7B":
|
| 138 |
processor = processor_w
|
| 139 |
model = model_w
|
| 140 |
else:
|
|
|
|
| 186 |
if model_name == "RolmOCR-7B":
|
| 187 |
processor = processor_m
|
| 188 |
model = model_m
|
| 189 |
+
elif model_name == "Qwen2-VL-OCR-2B":
|
| 190 |
processor = processor_x
|
| 191 |
model = model_x
|
| 192 |
elif model_name == "Nanonets-OCR-s":
|
|
|
|
| 195 |
elif model_name == "Aya-Vision-8B":
|
| 196 |
processor = processor_a
|
| 197 |
model = model_a
|
| 198 |
+
elif model_name == "AIGVE-MACS-7B":
|
| 199 |
processor = processor_w
|
| 200 |
model = model_w
|
| 201 |
else:
|
|
|
|
| 309 |
with gr.Accordion("(Result.md)", open=False):
|
| 310 |
markdown_output = gr.Markdown(label="Formatted Result (Result.Md)")
|
| 311 |
model_choice = gr.Radio(
|
| 312 |
+
choices=["Nanonets-OCR-s", "Qwen2-VL-OCR-2B", "RolmOCR-7B", "AIGVE-MACS-7B", "Aya-Vision-8B"],
|
| 313 |
label="Select Model",
|
| 314 |
value="Nanonets-OCR-s"
|
| 315 |
)
|
| 316 |
gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-OCR/discussions)")
|
| 317 |
gr.Markdown("> [Nanonets-OCR-s](https://huggingface.co/nanonets/Nanonets-OCR-s): nanonets-ocr-s is a powerful, state-of-the-art image-to-markdown ocr model that goes far beyond traditional text extraction. it transforms documents into structured markdown with intelligent content recognition and semantic tagging.")
|
| 318 |
+
gr.Markdown("> [AIGVE-MACS-7B](https://huggingface.co/xiaoliux/AIGVE-MACS): AIGVE-MACS is a unified Vision-Language Model (VLM) designed to evaluate AI-generated videos. It provides numerical scores (0–5) and natural language justifications across 9 human-aligned video quality aspects.")
|
| 319 |
+
gr.Markdown("> [Qwen2-VL-OCR-2B](https://huggingface.co/prithivMLmods/Qwen2-VL-OCR-2B-Instruct): qwen2-vl-ocr-2b-instruct model is a fine-tuned version of qwen2-vl-2b-instruct, tailored for tasks that involve [messy] optical character recognition (ocr), image-to-text conversion, and math problem solving with latex formatting.")
|
| 320 |
gr.Markdown("> [RolmOCR](https://huggingface.co/reducto/RolmOCR): rolmocr, high-quality, openly available approach to parsing pdfs and other complex documents optical character recognition. it is designed to handle a wide range of document types, including scanned documents, handwritten text, and complex layouts.")
|
| 321 |
gr.Markdown("> [Aya-Vision](https://huggingface.co/CohereLabs/aya-vision-8b): cohere labs aya vision 8b is an open weights research release of an 8-billion parameter model with advanced capabilities optimized for a variety of vision-language use cases, including ocr, captioning, visual reasoning, summarization, question answering, code, and more.")
|
|
|
|
| 322 |
gr.Markdown(">⚠️note: all the models in space are not guaranteed to perform well in video inference use cases.")
|
| 323 |
|
| 324 |
image_submit.click(
|