prithivMLmods commited on
Commit
7cad394
·
verified ·
1 Parent(s): d0e4a52

update app

Browse files
Files changed (1) hide show
  1. app.py +286 -274
app.py CHANGED
@@ -1,275 +1,287 @@
1
- import os
2
- import sys
3
- from threading import Thread
4
- from typing import Iterable
5
- from huggingface_hub import snapshot_download
6
-
7
- import gradio as gr
8
- import spaces
9
- import torch
10
- from PIL import Image
11
- from transformers import (
12
- Qwen2_5_VLForConditionalGeneration,
13
- AutoModelForImageTextToText,
14
- AutoModelForCausalLM,
15
- AutoProcessor,
16
- TextIteratorStreamer,
17
- )
18
-
19
- from gradio.themes import Soft
20
- from gradio.themes.utils import colors, fonts, sizes
21
-
22
- colors.steel_blue = colors.Color(
23
- name="steel_blue",
24
- c50="#EBF3F8",
25
- c100="#D3E5F0",
26
- c200="#A8CCE1",
27
- c300="#7DB3D2",
28
- c400="#529AC3",
29
- c500="#4682B4",
30
- c600="#3E72A0",
31
- c700="#36638C",
32
- c800="#2E5378",
33
- c900="#264364",
34
- c950="#1E3450",
35
- )
36
-
37
- class SteelBlueTheme(Soft):
38
- def __init__(
39
- self,
40
- *,
41
- primary_hue: colors.Color | str = colors.gray,
42
- secondary_hue: colors.Color | str = colors.steel_blue,
43
- neutral_hue: colors.Color | str = colors.slate,
44
- text_size: sizes.Size | str = sizes.text_lg,
45
- font: fonts.Font | str | Iterable[fonts.Font | str] = (
46
- fonts.GoogleFont("Outfit"), "Arial", "sans-serif",
47
- ),
48
- font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
49
- fonts.GoogleFont("IBM Plex Mono"), "ui-monospace", "monospace",
50
- ),
51
- ):
52
- super().__init__(
53
- primary_hue=primary_hue,
54
- secondary_hue=secondary_hue,
55
- neutral_hue=neutral_hue,
56
- text_size=text_size,
57
- font=font,
58
- font_mono=font_mono,
59
- )
60
- super().set(
61
- background_fill_primary="*primary_50",
62
- background_fill_primary_dark="*primary_900",
63
- body_background_fill="linear-gradient(135deg, *primary_200, *primary_100)",
64
- body_background_fill_dark="linear-gradient(135deg, *primary_900, *primary_800)",
65
- button_primary_text_color="white",
66
- button_primary_text_color_hover="white",
67
- button_primary_background_fill="linear-gradient(90deg, *secondary_500, *secondary_600)",
68
- button_primary_background_fill_hover="linear-gradient(90deg, *secondary_600, *secondary_700)",
69
- button_primary_background_fill_dark="linear-gradient(90deg, *secondary_600, *secondary_700)",
70
- button_primary_background_fill_hover_dark="linear-gradient(90deg, *secondary_500, *secondary_600)",
71
- slider_color="*secondary_500",
72
- slider_color_dark="*secondary_600",
73
- block_title_text_weight="600",
74
- block_border_width="3px",
75
- block_shadow="*shadow_drop_lg",
76
- button_primary_shadow="*shadow_drop_lg",
77
- button_large_padding="11px",
78
- color_accent_soft="*primary_100",
79
- block_label_background_fill="*primary_200",
80
- )
81
-
82
- steel_blue_theme = SteelBlueTheme()
83
-
84
- css = """
85
- #main-title h1 {
86
- font-size: 2.3em !important;
87
- }
88
- #output-title h2 {
89
- font-size: 2.1em !important;
90
- }
91
- """
92
-
93
-
94
- CACHE_PATH = "./model_cache"
95
- if not os.path.exists(CACHE_PATH):
96
- os.makedirs(CACHE_PATH)
97
-
98
-
99
- model_path_d_local = snapshot_download(
100
- repo_id='rednote-hilab/dots.ocr',
101
- local_dir=os.path.join(CACHE_PATH, 'dots.ocr'),
102
- max_workers=20,
103
- local_dir_use_symlinks=False
104
- )
105
-
106
- config_file_path = os.path.join(model_path_d_local, "configuration_dots.py")
107
-
108
- if os.path.exists(config_file_path):
109
- with open(config_file_path, 'r') as f:
110
- input_code = f.read()
111
-
112
- lines = input_code.splitlines()
113
- if "class DotsVLProcessor" in input_code and not any("attributes = " in line for line in lines):
114
- output_lines = []
115
- for line in lines:
116
- output_lines.append(line)
117
- if line.strip().startswith("class DotsVLProcessor"):
118
- output_lines.append(" attributes = [\"image_processor\", \"tokenizer\"]")
119
-
120
- with open(config_file_path, 'w') as f:
121
- f.write('\n'.join(output_lines))
122
- print("Patched configuration_dots.py successfully.")
123
-
124
-
125
- sys.path.append(model_path_d_local)
126
-
127
- MAX_MAX_NEW_TOKENS = 4096
128
- DEFAULT_MAX_NEW_TOKENS = 1440
129
- MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
130
-
131
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
132
-
133
- # Load Nanonets-OCR2-3B
134
- MODEL_ID_M = "nanonets/Nanonets-OCR2-3B"
135
- processor_m = AutoProcessor.from_pretrained(MODEL_ID_M, trust_remote_code=True)
136
- model_m = Qwen2_5_VLForConditionalGeneration.from_pretrained(
137
- MODEL_ID_M,
138
- trust_remote_code=True,
139
- torch_dtype=torch.float16
140
- ).to(device).eval()
141
-
142
- # Load Nanonets-OCR2-1.5B-exp
143
- MODEL_ID_N = "strangervisionhf/excess_layer_pruned-nanonets-1.5b" # -> https://huggingface.co/nanonets/Nanonets-OCR2-1.5B-exp
144
- processor_n = AutoProcessor.from_pretrained(MODEL_ID_N, trust_remote_code=True)
145
- model_n = AutoModelForImageTextToText.from_pretrained(
146
- MODEL_ID_N,
147
- trust_remote_code=True,
148
- torch_dtype=torch.float16,
149
- attn_implementation="flash_attention_2"
150
- ).to(device).eval()
151
-
152
-
153
- # Load Dots.OCR from the local, patched directory
154
- MODEL_PATH_D = model_path_d_local
155
- processor_d = AutoProcessor.from_pretrained(MODEL_PATH_D, trust_remote_code=True)
156
- model_d = AutoModelForCausalLM.from_pretrained(
157
- MODEL_PATH_D,
158
- attn_implementation="flash_attention_2",
159
- torch_dtype=torch.bfloat16,
160
- device_map="auto",
161
- trust_remote_code=True
162
- ).eval()
163
-
164
- # Load PaddleOCR
165
- MODEL_ID_P = "strangervisionhf/paddle" # -> https://huggingface.co/PaddlePaddle/PaddleOCR-VL
166
- processor_p = AutoProcessor.from_pretrained(MODEL_ID_P, trust_remote_code=True)
167
- model_p = AutoModelForCausalLM.from_pretrained(
168
- MODEL_ID_P,
169
- trust_remote_code=True,
170
- torch_dtype=torch.bfloat16
171
- ).to(device).eval()
172
-
173
- @spaces.GPU
174
- def generate_image(model_name: str, text: str, image: Image.Image,
175
- max_new_tokens: int = 1024,
176
- temperature: float = 0.6,
177
- top_p: float = 0.9,
178
- top_k: int = 50,
179
- repetition_penalty: float = 1.2):
180
- """Generate responses for image input using the selected model."""
181
- if model_name == "Nanonets-OCR2-3B":
182
- processor, model = processor_m, model_m
183
- elif model_name == "Nanonets-OCR2-1.5B(exp)":
184
- processor, model = processor_n, model_n
185
- elif model_name == "Dots.OCR":
186
- processor, model = processor_d, model_d
187
- elif model_name == "PaddleOCR":
188
- processor, model = processor_p, model_p
189
- else:
190
- yield "Invalid model selected.", "Invalid model selected."
191
- return
192
-
193
- if image is None:
194
- yield "Please upload an image.", "Please upload an image."
195
- return
196
-
197
- images = [image.convert("RGB")]
198
-
199
- if model_name == "PaddleOCR":
200
- messages = [
201
- {"role": "user", "content": text}
202
- ]
203
- else:
204
- messages = [
205
- {
206
- "role": "user",
207
- "content": [{"type": "image"}] + [{"type": "text", "text": text}]
208
- }
209
- ]
210
-
211
- prompt = processor.apply_chat_template(messages, add_generation_prompt=True)
212
- inputs = processor(text=prompt, images=images, return_tensors="pt").to(device)
213
-
214
- streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
215
- generation_kwargs = {
216
- **inputs,
217
- "streamer": streamer,
218
- "max_new_tokens": max_new_tokens,
219
- "temperature": temperature,
220
- "top_p": top_p,
221
- "top_k": top_k,
222
- "repetition_penalty": repetition_penalty,
223
- "do_sample": True
224
- }
225
- thread = Thread(target=model.generate, kwargs=generation_kwargs)
226
- thread.start()
227
-
228
- buffer = ""
229
- for new_text in streamer:
230
- buffer += new_text.replace("<|im_end|>", "").replace("<end_of_utterance>", "")
231
- yield buffer, buffer
232
-
233
- image_examples = [
234
- ["Perform OCR on the image.", "examples/1.jpg"],
235
- ["Phrase the document [page].", "examples/2.jpg"],
236
- ["OCR the content perfectly.", "examples/3.jpg"],
237
- ]
238
-
239
- with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
240
- gr.Markdown("# **Multimodal OCR3**", elem_id="main-title")
241
- with gr.Row():
242
- with gr.Column(scale=2):
243
- image_query = gr.Textbox(label="Query Input", placeholder="Enter your query here...")
244
- image_upload = gr.Image(type="pil", label="Upload Image", height=320)
245
- image_submit = gr.Button("Submit", variant="primary")
246
- gr.Examples(examples=image_examples, inputs=[image_query, image_upload])
247
-
248
- with gr.Accordion("Advanced options", open=False):
249
- max_new_tokens = gr.Slider(label="Max new tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)
250
- temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=4.0, step=0.1, value=0.6)
251
- top_p = gr.Slider(label="Top-p (nucleus sampling)", minimum=0.05, maximum=1.0, step=0.05, value=0.9)
252
- top_k = gr.Slider(label="Top-k", minimum=1, maximum=1000, step=1, value=50)
253
- repetition_penalty = gr.Slider(label="Repetition penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.2)
254
-
255
- with gr.Column(scale=3):
256
- gr.Markdown("## Output", elem_id="output-title")
257
- raw_output = gr.Textbox(label="Raw Output Stream", interactive=False, lines=11, show_copy_button=True)
258
- with gr.Accordion("[Result.md]", open=False):
259
- formatted_output = gr.Markdown(label="Formatted Result")
260
-
261
- model_choice = gr.Radio(
262
- choices=["Nanonets-OCR2-3B", "Dots.OCR", "Nanonets-OCR2-1.5B(exp)", "PaddleOCR"],
263
- label="Select Model",
264
- value="Nanonets-OCR2-3B"
265
- )
266
- gr.Markdown("Note: Currently, PaddleOCR VL only supports OCR inference. Structured OCR document parsing transformer inference is coming soon. [Report – Bug/Issue](https://huggingface.co/spaces/prithivMLmods/Multimodal-OCR3/discussions/1)")
267
-
268
- image_submit.click(
269
- fn=generate_image,
270
- inputs=[model_choice, image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],
271
- outputs=[raw_output, formatted_output]
272
- )
273
-
274
- if __name__ == "__main__":
 
 
 
 
 
 
 
 
 
 
 
 
275
  demo.queue(max_size=50).launch(mcp_server=True, ssr_mode=False, show_error=True)
 
1
+ import os
2
+ import sys
3
+ from threading import Thread
4
+ from typing import Iterable
5
+ from huggingface_hub import snapshot_download
6
+
7
+ import gradio as gr
8
+ import spaces
9
+ import torch
10
+ from PIL import Image
11
+ from transformers import (
12
+ Qwen2_5_VLForConditionalGeneration,
13
+ Qwen3VLForConditionalGeneration,
14
+ AutoModelForImageTextToText,
15
+ AutoModelForCausalLM,
16
+ AutoProcessor,
17
+ TextIteratorStreamer,
18
+ )
19
+
20
+ from gradio.themes import Soft
21
+ from gradio.themes.utils import colors, fonts, sizes
22
+
23
+ colors.steel_blue = colors.Color(
24
+ name="steel_blue",
25
+ c50="#EBF3F8",
26
+ c100="#D3E5F0",
27
+ c200="#A8CCE1",
28
+ c300="#7DB3D2",
29
+ c400="#529AC3",
30
+ c500="#4682B4",
31
+ c600="#3E72A0",
32
+ c700="#36638C",
33
+ c800="#2E5378",
34
+ c900="#264364",
35
+ c950="#1E3450",
36
+ )
37
+
38
+ class SteelBlueTheme(Soft):
39
+ def __init__(
40
+ self,
41
+ *,
42
+ primary_hue: colors.Color | str = colors.gray,
43
+ secondary_hue: colors.Color | str = colors.steel_blue,
44
+ neutral_hue: colors.Color | str = colors.slate,
45
+ text_size: sizes.Size | str = sizes.text_lg,
46
+ font: fonts.Font | str | Iterable[fonts.Font | str] = (
47
+ fonts.GoogleFont("Outfit"), "Arial", "sans-serif",
48
+ ),
49
+ font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
50
+ fonts.GoogleFont("IBM Plex Mono"), "ui-monospace", "monospace",
51
+ ),
52
+ ):
53
+ super().__init__(
54
+ primary_hue=primary_hue,
55
+ secondary_hue=secondary_hue,
56
+ neutral_hue=neutral_hue,
57
+ text_size=text_size,
58
+ font=font,
59
+ font_mono=font_mono,
60
+ )
61
+ super().set(
62
+ background_fill_primary="*primary_50",
63
+ background_fill_primary_dark="*primary_900",
64
+ body_background_fill="linear-gradient(135deg, *primary_200, *primary_100)",
65
+ body_background_fill_dark="linear-gradient(135deg, *primary_900, *primary_800)",
66
+ button_primary_text_color="white",
67
+ button_primary_text_color_hover="white",
68
+ button_primary_background_fill="linear-gradient(90deg, *secondary_500, *secondary_600)",
69
+ button_primary_background_fill_hover="linear-gradient(90deg, *secondary_600, *secondary_700)",
70
+ button_primary_background_fill_dark="linear-gradient(90deg, *secondary_600, *secondary_700)",
71
+ button_primary_background_fill_hover_dark="linear-gradient(90deg, *secondary_500, *secondary_600)",
72
+ slider_color="*secondary_500",
73
+ slider_color_dark="*secondary_600",
74
+ block_title_text_weight="600",
75
+ block_border_width="3px",
76
+ block_shadow="*shadow_drop_lg",
77
+ button_primary_shadow="*shadow_drop_lg",
78
+ button_large_padding="11px",
79
+ color_accent_soft="*primary_100",
80
+ block_label_background_fill="*primary_200",
81
+ )
82
+
83
+ steel_blue_theme = SteelBlueTheme()
84
+
85
+ css = """
86
+ #main-title h1 {
87
+ font-size: 2.3em !important;
88
+ }
89
+ #output-title h2 {
90
+ font-size: 2.1em !important;
91
+ }
92
+ """
93
+
94
+
95
+ CACHE_PATH = "./model_cache"
96
+ if not os.path.exists(CACHE_PATH):
97
+ os.makedirs(CACHE_PATH)
98
+
99
+
100
+ model_path_d_local = snapshot_download(
101
+ repo_id='rednote-hilab/dots.ocr',
102
+ local_dir=os.path.join(CACHE_PATH, 'dots.ocr'),
103
+ max_workers=20,
104
+ local_dir_use_symlinks=False
105
+ )
106
+
107
+ config_file_path = os.path.join(model_path_d_local, "configuration_dots.py")
108
+
109
+ if os.path.exists(config_file_path):
110
+ with open(config_file_path, 'r') as f:
111
+ input_code = f.read()
112
+
113
+ lines = input_code.splitlines()
114
+ if "class DotsVLProcessor" in input_code and not any("attributes = " in line for line in lines):
115
+ output_lines = []
116
+ for line in lines:
117
+ output_lines.append(line)
118
+ if line.strip().startswith("class DotsVLProcessor"):
119
+ output_lines.append(" attributes = [\"image_processor\", \"tokenizer\"]")
120
+
121
+ with open(config_file_path, 'w') as f:
122
+ f.write('\n'.join(output_lines))
123
+ print("Patched configuration_dots.py successfully.")
124
+
125
+
126
+ sys.path.append(model_path_d_local)
127
+
128
+ MAX_MAX_NEW_TOKENS = 4096
129
+ DEFAULT_MAX_NEW_TOKENS = 2048
130
+ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
131
+
132
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
133
+
134
+ # Load chandra
135
+ MODEL_ID_C = "datalab-to/chandra"
136
+ processor_c = AutoProcessor.from_pretrained(MODEL_ID_C, trust_remote_code=True)
137
+ model_c = Qwen3VLForConditionalGeneration.from_pretrained(
138
+ MODEL_ID_C,
139
+ trust_remote_code=True,
140
+ torch_dtype=torch.float16
141
+ ).to(device).eval()
142
+
143
+ # Load Nanonets-OCR2-3B
144
+ MODEL_ID_M = "nanonets/Nanonets-OCR2-3B"
145
+ processor_m = AutoProcessor.from_pretrained(MODEL_ID_M, trust_remote_code=True)
146
+ model_m = Qwen2_5_VLForConditionalGeneration.from_pretrained(
147
+ MODEL_ID_M,
148
+ trust_remote_code=True,
149
+ torch_dtype=torch.float16
150
+ ).to(device).eval()
151
+
152
+ # Load Nanonets-OCR2-1.5B-exp
153
+ MODEL_ID_N = "strangervisionhf/excess_layer_pruned-nanonets-1.5b" # -> https://huggingface.co/nanonets/Nanonets-OCR2-1.5B-exp
154
+ processor_n = AutoProcessor.from_pretrained(MODEL_ID_N, trust_remote_code=True)
155
+ model_n = AutoModelForImageTextToText.from_pretrained(
156
+ MODEL_ID_N,
157
+ trust_remote_code=True,
158
+ torch_dtype=torch.float16,
159
+ attn_implementation="flash_attention_2"
160
+ ).to(device).eval()
161
+
162
+
163
+ # Load Dots.OCR from the local, patched directory
164
+ MODEL_PATH_D = model_path_d_local
165
+ processor_d = AutoProcessor.from_pretrained(MODEL_PATH_D, trust_remote_code=True)
166
+ model_d = AutoModelForCausalLM.from_pretrained(
167
+ MODEL_PATH_D,
168
+ attn_implementation="flash_attention_2",
169
+ torch_dtype=torch.bfloat16,
170
+ device_map="auto",
171
+ trust_remote_code=True
172
+ ).eval()
173
+
174
+ # Load PaddleOCR
175
+ MODEL_ID_P = "strangervisionhf/paddle" # -> https://huggingface.co/PaddlePaddle/PaddleOCR-VL
176
+ processor_p = AutoProcessor.from_pretrained(MODEL_ID_P, trust_remote_code=True)
177
+ model_p = AutoModelForCausalLM.from_pretrained(
178
+ MODEL_ID_P,
179
+ trust_remote_code=True,
180
+ torch_dtype=torch.bfloat16
181
+ ).to(device).eval()
182
+
183
+ @spaces.GPU
184
+ def generate_image(model_name: str, text: str, image: Image.Image,
185
+ max_new_tokens: int = 1024,
186
+ temperature: float = 0.6,
187
+ top_p: float = 0.9,
188
+ top_k: int = 50,
189
+ repetition_penalty: float = 1.2):
190
+ """Generate responses for image input using the selected model."""
191
+ if model_name == "Nanonets-OCR2-3B":
192
+ processor, model = processor_m, model_m
193
+ elif model_name == "Nanonets-OCR2-1.5B(exp)":
194
+ processor, model = processor_n, model_n
195
+ elif model_name == "Dots.OCR":
196
+ processor, model = processor_d, model_d
197
+ elif model_name == "PaddleOCR":
198
+ processor, model = processor_p, model_p
199
+ elif model_name == "Chandra-OCR":
200
+ processor, model = processor_c, model_c
201
+ else:
202
+ yield "Invalid model selected.", "Invalid model selected."
203
+ return
204
+
205
+ if image is None:
206
+ yield "Please upload an image.", "Please upload an image."
207
+ return
208
+
209
+ images = [image.convert("RGB")]
210
+
211
+ if model_name == "PaddleOCR":
212
+ messages = [
213
+ {"role": "user", "content": text}
214
+ ]
215
+ else:
216
+ messages = [
217
+ {
218
+ "role": "user",
219
+ "content": [{"type": "image"}] + [{"type": "text", "text": text}]
220
+ }
221
+ ]
222
+
223
+ prompt = processor.apply_chat_template(messages, add_generation_prompt=True)
224
+ inputs = processor(text=prompt, images=images, return_tensors="pt").to(device)
225
+
226
+ streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
227
+ generation_kwargs = {
228
+ **inputs,
229
+ "streamer": streamer,
230
+ "max_new_tokens": max_new_tokens,
231
+ "temperature": temperature,
232
+ "top_p": top_p,
233
+ "top_k": top_k,
234
+ "repetition_penalty": repetition_penalty,
235
+ "do_sample": True
236
+ }
237
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
238
+ thread.start()
239
+
240
+ buffer = ""
241
+ for new_text in streamer:
242
+ buffer += new_text.replace("<|im_end|>", "").replace("<end_of_utterance>", "")
243
+ yield buffer, buffer
244
+
245
+ image_examples = [
246
+ ["Perform OCR on the image.", "examples/1.jpg"],
247
+ ["Phrase the document [page].", "examples/2.jpg"],
248
+ ["OCR the content perfectly.", "examples/3.jpg"],
249
+ ]
250
+
251
+ with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
252
+ gr.Markdown("# **Multimodal OCR3**", elem_id="main-title")
253
+ with gr.Row():
254
+ with gr.Column(scale=2):
255
+ image_query = gr.Textbox(label="Query Input", placeholder="Enter your query here...")
256
+ image_upload = gr.Image(type="pil", label="Upload Image", height=320)
257
+ image_submit = gr.Button("Submit", variant="primary")
258
+ gr.Examples(examples=image_examples, inputs=[image_query, image_upload])
259
+
260
+ with gr.Accordion("Advanced options", open=False):
261
+ max_new_tokens = gr.Slider(label="Max new tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)
262
+ temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=4.0, step=0.1, value=0.6)
263
+ top_p = gr.Slider(label="Top-p (nucleus sampling)", minimum=0.05, maximum=1.0, step=0.05, value=0.9)
264
+ top_k = gr.Slider(label="Top-k", minimum=1, maximum=1000, step=1, value=50)
265
+ repetition_penalty = gr.Slider(label="Repetition penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.2)
266
+
267
+ with gr.Column(scale=3):
268
+ gr.Markdown("## Output", elem_id="output-title")
269
+ raw_output = gr.Textbox(label="Raw Output Stream", interactive=False, lines=11, show_copy_button=True)
270
+ with gr.Accordion("[Result.md]", open=False):
271
+ formatted_output = gr.Markdown(label="Formatted Result")
272
+
273
+ model_choice = gr.Radio(
274
+ choices=["Nanonets-OCR2-3B", "Dots.OCR", "Nanonets-OCR2-1.5B(exp)", "PaddleOCR"],
275
+ label="Select Model",
276
+ value="Nanonets-OCR2-3B"
277
+ )
278
+ gr.Markdown("Note: Currently, PaddleOCR VL only supports OCR inference. Structured OCR document parsing transformer inference is coming soon. [Report – Bug/Issue](https://huggingface.co/spaces/prithivMLmods/Multimodal-OCR3/discussions/1)")
279
+
280
+ image_submit.click(
281
+ fn=generate_image,
282
+ inputs=[model_choice, image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],
283
+ outputs=[raw_output, formatted_output]
284
+ )
285
+
286
+ if __name__ == "__main__":
287
  demo.queue(max_size=50).launch(mcp_server=True, ssr_mode=False, show_error=True)