davanstrien HF Staff commited on
Commit
f987b04
·
1 Parent(s): e69f406

Add DeepSeek-OCR vLLM scripts (PR #27247 testing)

Browse files
Files changed (2) hide show
  1. deepseek-ocr-vllm-test.py +223 -0
  2. deepseek-ocr-vllm.py +621 -0
deepseek-ocr-vllm-test.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.11"
3
+ # dependencies = [
4
+ # "vllm @ git+https://github.com/Isotr0py/vllm.git@deepseek-ocr",
5
+ # "pillow",
6
+ # "datasets",
7
+ # "torch",
8
+ # "huggingface-hub",
9
+ # ]
10
+ #
11
+ # [tool.uv]
12
+ # prerelease = "allow"
13
+ # ///
14
+
15
+ """
16
+ Minimal test script for DeepSeek-OCR with vLLM (PR #27247 testing).
17
+
18
+ This is an MVP script to validate that DeepSeek-OCR works with vLLM.
19
+ Installs vLLM from Isotr0py's fork (PR #27247 branch) since not yet merged to main.
20
+
21
+ Use this to test in Colab before building the full production script.
22
+
23
+ Usage:
24
+ # Test with a HF dataset image
25
+ uv run deepseek-ocr-vllm-test.py --dataset pixparse/idl-wds --index 0
26
+
27
+ # Test with local image
28
+ uv run deepseek-ocr-vllm-test.py --image path/to/image.png
29
+
30
+ # Test with different resolution mode
31
+ uv run deepseek-ocr-vllm-test.py --dataset pixparse/idl-wds --resolution-mode tiny
32
+ """
33
+
34
+ import argparse
35
+ import base64
36
+ import io
37
+ import logging
38
+ import sys
39
+ from typing import List, Dict
40
+
41
+ import torch
42
+ from PIL import Image
43
+ from datasets import load_dataset
44
+ from vllm import LLM, SamplingParams
45
+
46
+ logging.basicConfig(level=logging.INFO)
47
+ logger = logging.getLogger(__name__)
48
+
49
+ # Resolution mode presets from DeepSeek-OCR
50
+ RESOLUTION_MODES = {
51
+ "tiny": {"base_size": 512, "image_size": 512, "crop_mode": False},
52
+ "small": {"base_size": 640, "image_size": 640, "crop_mode": False},
53
+ "base": {"base_size": 1024, "image_size": 1024, "crop_mode": False},
54
+ "large": {"base_size": 1280, "image_size": 1280, "crop_mode": False},
55
+ "gundam": {"base_size": 1024, "image_size": 640, "crop_mode": True},
56
+ }
57
+
58
+
59
+ def check_cuda():
60
+ """Check CUDA availability."""
61
+ if not torch.cuda.is_available():
62
+ logger.error("❌ CUDA not available. DeepSeek-OCR requires a GPU.")
63
+ logger.error("Please run in Colab with GPU runtime or on a GPU machine.")
64
+ sys.exit(1)
65
+ logger.info(f"✅ CUDA available: {torch.cuda.get_device_name(0)}")
66
+
67
+
68
+ def make_message(image: Image.Image, prompt: str) -> List[Dict]:
69
+ """Create vLLM chat message with base64 encoded image."""
70
+ # Convert to RGB
71
+ image = image.convert("RGB")
72
+
73
+ # Encode as base64 data URI
74
+ buf = io.BytesIO()
75
+ image.save(buf, format="PNG")
76
+ data_uri = f"data:image/png;base64,{base64.b64encode(buf.getvalue()).decode()}"
77
+
78
+ return [
79
+ {
80
+ "role": "user",
81
+ "content": [
82
+ {"type": "image_url", "image_url": {"url": data_uri}},
83
+ {"type": "text", "text": prompt},
84
+ ],
85
+ }
86
+ ]
87
+
88
+
89
+ def main():
90
+ parser = argparse.ArgumentParser(
91
+ description="Test DeepSeek-OCR with vLLM (PR #27247)",
92
+ formatter_class=argparse.RawDescriptionHelpFormatter,
93
+ epilog="""
94
+ Examples:
95
+ # Test with HF dataset
96
+ uv run deepseek-ocr-vllm-test.py --dataset pixparse/idl-wds --index 0
97
+
98
+ # Test with local image
99
+ uv run deepseek-ocr-vllm-test.py --image document.png
100
+
101
+ # Test different resolution
102
+ uv run deepseek-ocr-vllm-test.py --dataset pixparse/idl-wds --resolution-mode large
103
+ """,
104
+ )
105
+
106
+ # Image source (one required)
107
+ source_group = parser.add_mutually_exclusive_group(required=True)
108
+ source_group.add_argument("--image", help="Path to local image file")
109
+ source_group.add_argument(
110
+ "--dataset",
111
+ help="HF dataset ID (will use first image from train split)",
112
+ )
113
+
114
+ parser.add_argument(
115
+ "--index",
116
+ type=int,
117
+ default=0,
118
+ help="Dataset index to use (default: 0)",
119
+ )
120
+ parser.add_argument(
121
+ "--image-column",
122
+ default="image",
123
+ help="Column name for images in dataset (default: image)",
124
+ )
125
+ parser.add_argument(
126
+ "--split",
127
+ default="train",
128
+ help="Dataset split to use (default: train)",
129
+ )
130
+ parser.add_argument(
131
+ "--resolution-mode",
132
+ choices=list(RESOLUTION_MODES.keys()),
133
+ default="gundam",
134
+ help="Resolution mode preset (default: gundam)",
135
+ )
136
+ parser.add_argument(
137
+ "--prompt",
138
+ default="<image>\n<|grounding|>Convert the document to markdown.",
139
+ help="OCR prompt",
140
+ )
141
+ parser.add_argument(
142
+ "--model",
143
+ default="deepseek-ai/DeepSeek-OCR",
144
+ help="Model ID (default: deepseek-ai/DeepSeek-OCR)",
145
+ )
146
+
147
+ args = parser.parse_args()
148
+
149
+ # Check CUDA
150
+ check_cuda()
151
+
152
+ # Load image
153
+ logger.info("📷 Loading image...")
154
+ if args.image:
155
+ image = Image.open(args.image)
156
+ logger.info(f"Loaded from file: {args.image}")
157
+ else:
158
+ dataset = load_dataset(args.dataset, split=args.split, streaming=False)
159
+ if args.image_column not in dataset.column_names:
160
+ logger.error(f"Column '{args.image_column}' not found in dataset")
161
+ logger.error(f"Available columns: {dataset.column_names}")
162
+ sys.exit(1)
163
+ image = dataset[args.index][args.image_column]
164
+ if isinstance(image, dict) and "bytes" in image:
165
+ image = Image.open(io.BytesIO(image["bytes"]))
166
+ logger.info(f"Loaded from dataset: {args.dataset}[{args.index}]")
167
+
168
+ logger.info(f"Image size: {image.size}")
169
+
170
+ # Get resolution settings
171
+ resolution = RESOLUTION_MODES[args.resolution_mode]
172
+ logger.info(f"Resolution mode: {args.resolution_mode}")
173
+ logger.info(f" base_size={resolution['base_size']}, image_size={resolution['image_size']}, crop_mode={resolution['crop_mode']}")
174
+
175
+ # Initialize vLLM
176
+ logger.info(f"🚀 Loading model: {args.model}")
177
+ logger.info("This may take a few minutes on first run...")
178
+
179
+ try:
180
+ llm = LLM(
181
+ model=args.model,
182
+ trust_remote_code=True,
183
+ max_model_len=8192,
184
+ gpu_memory_utilization=0.8,
185
+ limit_mm_per_prompt={"image": 1},
186
+ enforce_eager=False,
187
+ )
188
+ logger.info("✅ Model loaded successfully!")
189
+ except Exception as e:
190
+ logger.error(f"❌ Failed to load model: {e}")
191
+ logger.error("\nThis might mean:")
192
+ logger.error(" 1. PR #27247 is not merged/available in this vLLM version")
193
+ logger.error(" 2. The model architecture is not recognized")
194
+ logger.error(" 3. Missing dependencies")
195
+ sys.exit(1)
196
+
197
+ # Run inference
198
+ logger.info("🔍 Running OCR...")
199
+ sampling_params = SamplingParams(
200
+ temperature=0.0,
201
+ max_tokens=8192,
202
+ )
203
+
204
+ message = make_message(image, args.prompt)
205
+
206
+ try:
207
+ outputs = llm.chat([message], sampling_params)
208
+ result = outputs[0].outputs[0].text.strip()
209
+
210
+ logger.info("✅ OCR Complete!")
211
+ print("\n" + "=" * 80)
212
+ print("MARKDOWN OUTPUT:")
213
+ print("=" * 80)
214
+ print(result)
215
+ print("=" * 80)
216
+
217
+ except Exception as e:
218
+ logger.error(f"❌ Inference failed: {e}")
219
+ sys.exit(1)
220
+
221
+
222
+ if __name__ == "__main__":
223
+ main()
deepseek-ocr-vllm.py ADDED
@@ -0,0 +1,621 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.11"
3
+ # dependencies = [
4
+ # "datasets",
5
+ # "huggingface-hub[hf_transfer]",
6
+ # "pillow",
7
+ # "vllm @ git+https://github.com/Isotr0py/vllm.git@deepseek-ocr",
8
+ # "tqdm",
9
+ # "toolz",
10
+ # "torch",
11
+ # ]
12
+ #
13
+ # [tool.uv]
14
+ # prerelease = "allow"
15
+ # ///
16
+
17
+ """
18
+ Convert document images to markdown using DeepSeek-OCR with vLLM.
19
+
20
+ This script processes images through the DeepSeek-OCR model to extract
21
+ text and structure as markdown, using vLLM for efficient batch processing.
22
+
23
+ NOTE: Uses vLLM from PR #27247 (Isotr0py/vllm@deepseek-ocr branch) as
24
+ DeepSeek-OCR support is not yet merged into main vLLM. First run will take
25
+ 10-15 minutes to compile vLLM from source.
26
+
27
+ Features:
28
+ - Multiple resolution modes (Tiny/Small/Base/Large/Gundam)
29
+ - LaTeX equation recognition
30
+ - Table extraction and formatting
31
+ - Document structure preservation
32
+ - Image grounding and descriptions
33
+ - Multilingual support
34
+ - Batch processing with vLLM for better performance
35
+ """
36
+
37
+ import argparse
38
+ import base64
39
+ import io
40
+ import json
41
+ import logging
42
+ import os
43
+ import sys
44
+ from typing import Any, Dict, List, Union
45
+ from datetime import datetime
46
+
47
+ import torch
48
+ from datasets import load_dataset
49
+ from huggingface_hub import DatasetCard, login
50
+ from PIL import Image
51
+ from toolz import partition_all
52
+ from tqdm.auto import tqdm
53
+ from vllm import LLM, SamplingParams
54
+
55
+ logging.basicConfig(level=logging.INFO)
56
+ logger = logging.getLogger(__name__)
57
+
58
+ # Resolution mode presets
59
+ RESOLUTION_MODES = {
60
+ "tiny": {"base_size": 512, "image_size": 512, "crop_mode": False},
61
+ "small": {"base_size": 640, "image_size": 640, "crop_mode": False},
62
+ "base": {"base_size": 1024, "image_size": 1024, "crop_mode": False},
63
+ "large": {"base_size": 1280, "image_size": 1280, "crop_mode": False},
64
+ "gundam": {"base_size": 1024, "image_size": 640, "crop_mode": True}, # Dynamic resolution
65
+ }
66
+
67
+ def check_cuda_availability():
68
+ """Check if CUDA is available and exit if not."""
69
+ if not torch.cuda.is_available():
70
+ logger.error("CUDA is not available. This script requires a GPU.")
71
+ logger.error("Please run on a machine with a CUDA-capable GPU.")
72
+ sys.exit(1)
73
+ else:
74
+ logger.info(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}")
75
+
76
+ def make_ocr_message(
77
+ image: Union[Image.Image, Dict[str, Any], str],
78
+ prompt: str = "<image>\n<|grounding|>Convert the document to markdown.",
79
+ ) -> List[Dict]:
80
+ """Create chat message for OCR processing."""
81
+ # Convert to PIL Image if needed
82
+ if isinstance(image, Image.Image):
83
+ pil_img = image
84
+ elif isinstance(image, dict) and "bytes" in image:
85
+ pil_img = Image.open(io.BytesIO(image["bytes"]))
86
+ elif isinstance(image, str):
87
+ pil_img = Image.open(image)
88
+ else:
89
+ raise ValueError(f"Unsupported image type: {type(image)}")
90
+
91
+ # Convert to RGB
92
+ pil_img = pil_img.convert("RGB")
93
+
94
+ # Convert to base64 data URI
95
+ buf = io.BytesIO()
96
+ pil_img.save(buf, format="PNG")
97
+ data_uri = f"data:image/png;base64,{base64.b64encode(buf.getvalue()).decode()}"
98
+
99
+ # Return message in vLLM format
100
+ return [
101
+ {
102
+ "role": "user",
103
+ "content": [
104
+ {"type": "image_url", "image_url": {"url": data_uri}},
105
+ {"type": "text", "text": prompt},
106
+ ],
107
+ }
108
+ ]
109
+
110
+ def create_dataset_card(
111
+ source_dataset: str,
112
+ model: str,
113
+ num_samples: int,
114
+ processing_time: str,
115
+ batch_size: int,
116
+ max_model_len: int,
117
+ max_tokens: int,
118
+ gpu_memory_utilization: float,
119
+ resolution_mode: str,
120
+ base_size: int,
121
+ image_size: int,
122
+ crop_mode: bool,
123
+ image_column: str = "image",
124
+ split: str = "train",
125
+ ) -> str:
126
+ """Create a dataset card documenting the OCR process."""
127
+ model_name = model.split("/")[-1]
128
+
129
+ return f"""---
130
+ tags:
131
+ - ocr
132
+ - document-processing
133
+ - deepseek
134
+ - deepseek-ocr
135
+ - markdown
136
+ - uv-script
137
+ - generated
138
+ ---
139
+
140
+ # Document OCR using {model_name}
141
+
142
+ This dataset contains markdown-formatted OCR results from images in [{source_dataset}](https://huggingface.co/datasets/{source_dataset}) using DeepSeek-OCR.
143
+
144
+ ## Processing Details
145
+
146
+ - **Source Dataset**: [{source_dataset}](https://huggingface.co/datasets/{source_dataset})
147
+ - **Model**: [{model}](https://huggingface.co/{model})
148
+ - **Number of Samples**: {num_samples:,}
149
+ - **Processing Time**: {processing_time}
150
+ - **Processing Date**: {datetime.now().strftime("%Y-%m-%d %H:%M UTC")}
151
+
152
+ ### Configuration
153
+
154
+ - **Image Column**: `{image_column}`
155
+ - **Output Column**: `markdown`
156
+ - **Dataset Split**: `{split}`
157
+ - **Batch Size**: {batch_size}
158
+ - **Resolution Mode**: {resolution_mode}
159
+ - **Base Size**: {base_size}
160
+ - **Image Size**: {image_size}
161
+ - **Crop Mode**: {crop_mode}
162
+ - **Max Model Length**: {max_model_len:,} tokens
163
+ - **Max Output Tokens**: {max_tokens:,}
164
+ - **GPU Memory Utilization**: {gpu_memory_utilization:.1%}
165
+
166
+ ## Model Information
167
+
168
+ DeepSeek-OCR is a state-of-the-art document OCR model that excels at:
169
+ - 📐 **LaTeX equations** - Mathematical formulas preserved in LaTeX format
170
+ - 📊 **Tables** - Extracted and formatted as HTML/markdown
171
+ - 📝 **Document structure** - Headers, lists, and formatting maintained
172
+ - 🖼️ **Image grounding** - Spatial layout and bounding box information
173
+ - 🔍 **Complex layouts** - Multi-column and hierarchical structures
174
+ - 🌍 **Multilingual** - Supports multiple languages
175
+
176
+ ### Resolution Modes
177
+
178
+ - **Tiny** (512×512): Fast processing, 64 vision tokens
179
+ - **Small** (640×640): Balanced speed/quality, 100 vision tokens
180
+ - **Base** (1024×1024): High quality, 256 vision tokens
181
+ - **Large** (1280×1280): Maximum quality, 400 vision tokens
182
+ - **Gundam** (dynamic): Adaptive multi-tile processing for large documents
183
+
184
+ ## Dataset Structure
185
+
186
+ The dataset contains all original columns plus:
187
+ - `markdown`: The extracted text in markdown format with preserved structure
188
+ - `inference_info`: JSON list tracking all OCR models applied to this dataset
189
+
190
+ ## Usage
191
+
192
+ ```python
193
+ from datasets import load_dataset
194
+ import json
195
+
196
+ # Load the dataset
197
+ dataset = load_dataset("{{{{output_dataset_id}}}}", split="{split}")
198
+
199
+ # Access the markdown text
200
+ for example in dataset:
201
+ print(example["markdown"])
202
+ break
203
+
204
+ # View all OCR models applied to this dataset
205
+ inference_info = json.loads(dataset[0]["inference_info"])
206
+ for info in inference_info:
207
+ print(f"Column: {{{{info['column_name']}}}} - Model: {{{{info['model_id']}}}}")
208
+ ```
209
+
210
+ ## Reproduction
211
+
212
+ This dataset was generated using the [uv-scripts/ocr](https://huggingface.co/datasets/uv-scripts/ocr) DeepSeek OCR vLLM script:
213
+
214
+ ```bash
215
+ uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr-vllm.py \\\\
216
+ {source_dataset} \\\\
217
+ <output-dataset> \\\\
218
+ --resolution-mode {resolution_mode} \\\\
219
+ --image-column {image_column}
220
+ ```
221
+
222
+ ## Performance
223
+
224
+ - **Processing Speed**: ~{num_samples / (float(processing_time.split()[0]) * 60):.1f} images/second
225
+ - **Processing Method**: Batch processing with vLLM (2-3x speedup over sequential)
226
+
227
+ Generated with 🤖 [UV Scripts](https://huggingface.co/uv-scripts)
228
+ """
229
+
230
+ def main(
231
+ input_dataset: str,
232
+ output_dataset: str,
233
+ image_column: str = "image",
234
+ batch_size: int = 8, # Smaller batch size to avoid potential memory issues with DeepSeek-OCR
235
+ model: str = "deepseek-ai/DeepSeek-OCR",
236
+ resolution_mode: str = "gundam",
237
+ base_size: int = None,
238
+ image_size: int = None,
239
+ crop_mode: bool = None,
240
+ max_model_len: int = 8192,
241
+ max_tokens: int = 8192,
242
+ gpu_memory_utilization: float = 0.8,
243
+ prompt: str = "<image>\n<|grounding|>Convert the document to markdown.",
244
+ hf_token: str = None,
245
+ split: str = "train",
246
+ max_samples: int = None,
247
+ private: bool = False,
248
+ shuffle: bool = False,
249
+ seed: int = 42,
250
+ ):
251
+ """Process images from HF dataset through DeepSeek-OCR model with vLLM."""
252
+
253
+ # Check CUDA availability first
254
+ check_cuda_availability()
255
+
256
+ # Track processing start time
257
+ start_time = datetime.now()
258
+
259
+ # Enable HF_TRANSFER for faster downloads
260
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
261
+
262
+ # Login to HF if token provided
263
+ HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
264
+ if HF_TOKEN:
265
+ login(token=HF_TOKEN)
266
+
267
+ # Determine resolution settings
268
+ if resolution_mode in RESOLUTION_MODES:
269
+ mode_config = RESOLUTION_MODES[resolution_mode]
270
+ final_base_size = base_size if base_size is not None else mode_config["base_size"]
271
+ final_image_size = image_size if image_size is not None else mode_config["image_size"]
272
+ final_crop_mode = crop_mode if crop_mode is not None else mode_config["crop_mode"]
273
+ logger.info(f"Using resolution mode: {resolution_mode}")
274
+ else:
275
+ # Custom mode - require all parameters
276
+ if base_size is None or image_size is None or crop_mode is None:
277
+ raise ValueError(
278
+ f"Invalid resolution mode '{resolution_mode}'. "
279
+ f"Use one of {list(RESOLUTION_MODES.keys())} or specify "
280
+ f"--base-size, --image-size, and --crop-mode manually."
281
+ )
282
+ final_base_size = base_size
283
+ final_image_size = image_size
284
+ final_crop_mode = crop_mode
285
+ resolution_mode = "custom"
286
+
287
+ logger.info(
288
+ f"Resolution: base_size={final_base_size}, "
289
+ f"image_size={final_image_size}, crop_mode={final_crop_mode}"
290
+ )
291
+
292
+ # Load dataset
293
+ logger.info(f"Loading dataset: {input_dataset}")
294
+ dataset = load_dataset(input_dataset, split=split)
295
+
296
+ # Validate image column
297
+ if image_column not in dataset.column_names:
298
+ raise ValueError(
299
+ f"Column '{image_column}' not found. Available: {dataset.column_names}"
300
+ )
301
+
302
+ # Shuffle if requested
303
+ if shuffle:
304
+ logger.info(f"Shuffling dataset with seed {seed}")
305
+ dataset = dataset.shuffle(seed=seed)
306
+
307
+ # Limit samples if requested
308
+ if max_samples:
309
+ dataset = dataset.select(range(min(max_samples, len(dataset))))
310
+ logger.info(f"Limited to {len(dataset)} samples")
311
+
312
+ # Initialize vLLM
313
+ logger.info(f"Initializing vLLM with model: {model}")
314
+ logger.info("This may take a few minutes on first run...")
315
+
316
+ # Add specific parameters for DeepSeek-OCR compatibility
317
+ llm = LLM(
318
+ model=model,
319
+ trust_remote_code=True,
320
+ max_model_len=max_model_len,
321
+ gpu_memory_utilization=gpu_memory_utilization,
322
+ limit_mm_per_prompt={"image": 1},
323
+ enforce_eager=False, # Use torch.compile instead of eager execution
324
+ )
325
+
326
+ sampling_params = SamplingParams(
327
+ temperature=0.0, # Deterministic for OCR
328
+ max_tokens=max_tokens,
329
+ )
330
+
331
+ logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
332
+ logger.info("Using vLLM for batch processing - should be faster than sequential processing")
333
+
334
+ # Process images in batches
335
+ all_markdown = []
336
+
337
+ for batch_indices in tqdm(
338
+ partition_all(batch_size, range(len(dataset))),
339
+ total=(len(dataset) + batch_size - 1) // batch_size,
340
+ desc="DeepSeek-OCR vLLM processing",
341
+ ):
342
+ batch_indices = list(batch_indices)
343
+ batch_images = [dataset[i][image_column] for i in batch_indices]
344
+
345
+ try:
346
+ # Create messages for batch
347
+ batch_messages = [make_ocr_message(img, prompt) for img in batch_images]
348
+
349
+ # Process with vLLM
350
+ outputs = llm.chat(batch_messages, sampling_params)
351
+
352
+ # Extract outputs
353
+ for output in outputs:
354
+ text = output.outputs[0].text.strip()
355
+ all_markdown.append(text)
356
+
357
+ except Exception as e:
358
+ logger.error(f"Error processing batch: {e}")
359
+ # Add error placeholders for failed batch
360
+ all_markdown.extend(["[OCR FAILED]"] * len(batch_images))
361
+
362
+ # Calculate processing time
363
+ processing_duration = datetime.now() - start_time
364
+ processing_time_str = f"{processing_duration.total_seconds() / 60:.1f} min"
365
+
366
+ # Add markdown column to dataset
367
+ logger.info("Adding markdown column to dataset")
368
+ dataset = dataset.add_column("markdown", all_markdown)
369
+
370
+ # Handle inference_info tracking
371
+ logger.info("Updating inference_info...")
372
+
373
+ # Check for existing inference_info
374
+ if "inference_info" in dataset.column_names:
375
+ # Parse existing info from first row (all rows have same info)
376
+ try:
377
+ existing_info = json.loads(dataset[0]["inference_info"])
378
+ if not isinstance(existing_info, list):
379
+ existing_info = [existing_info] # Convert old format to list
380
+ except (json.JSONDecodeError, TypeError):
381
+ existing_info = []
382
+ # Remove old column to update it
383
+ dataset = dataset.remove_columns(["inference_info"])
384
+ else:
385
+ existing_info = []
386
+
387
+ # Add new inference info
388
+ new_info = {
389
+ "column_name": "markdown",
390
+ "model_id": model,
391
+ "processing_date": datetime.now().isoformat(),
392
+ "resolution_mode": resolution_mode,
393
+ "base_size": final_base_size,
394
+ "image_size": final_image_size,
395
+ "crop_mode": final_crop_mode,
396
+ "prompt": prompt,
397
+ "batch_size": batch_size,
398
+ "max_tokens": max_tokens,
399
+ "gpu_memory_utilization": gpu_memory_utilization,
400
+ "max_model_len": max_model_len,
401
+ "script": "deepseek-ocr-vllm.py",
402
+ "script_version": "1.0.0",
403
+ "script_url": "https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr-vllm.py",
404
+ "implementation": "vllm (batch processing)",
405
+ }
406
+ existing_info.append(new_info)
407
+
408
+ # Add updated inference_info column
409
+ info_json = json.dumps(existing_info, ensure_ascii=False)
410
+ dataset = dataset.add_column("inference_info", [info_json] * len(dataset))
411
+
412
+ # Push to hub
413
+ logger.info(f"Pushing to {output_dataset}")
414
+ dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN)
415
+
416
+ # Create and push dataset card
417
+ logger.info("Creating dataset card...")
418
+ card_content = create_dataset_card(
419
+ source_dataset=input_dataset,
420
+ model=model,
421
+ num_samples=len(dataset),
422
+ processing_time=processing_time_str,
423
+ batch_size=batch_size,
424
+ max_model_len=max_model_len,
425
+ max_tokens=max_tokens,
426
+ gpu_memory_utilization=gpu_memory_utilization,
427
+ resolution_mode=resolution_mode,
428
+ base_size=final_base_size,
429
+ image_size=final_image_size,
430
+ crop_mode=final_crop_mode,
431
+ image_column=image_column,
432
+ split=split,
433
+ )
434
+
435
+ card = DatasetCard(card_content)
436
+ card.push_to_hub(output_dataset, token=HF_TOKEN)
437
+ logger.info("✅ Dataset card created and pushed!")
438
+
439
+ logger.info("✅ OCR conversion complete!")
440
+ logger.info(
441
+ f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
442
+ )
443
+ logger.info(f"Processing time: {processing_time_str}")
444
+
445
+
446
+ if __name__ == "__main__":
447
+ # Show example usage if no arguments
448
+ if len(sys.argv) == 1:
449
+ print("=" * 80)
450
+ print("DeepSeek-OCR to Markdown Converter (vLLM)")
451
+ print("=" * 80)
452
+ print("\nThis script converts document images to markdown using")
453
+ print("DeepSeek-OCR with vLLM for efficient batch processing.")
454
+ print("\nFeatures:")
455
+ print("- Multiple resolution modes (Tiny/Small/Base/Large/Gundam)")
456
+ print("- LaTeX equation recognition")
457
+ print("- Table extraction and formatting")
458
+ print("- Document structure preservation")
459
+ print("- Image grounding and spatial layout")
460
+ print("- Multilingual support")
461
+ print("- ⚡ Fast batch processing with vLLM (2-3x speedup)")
462
+ print("\nExample usage:")
463
+ print("\n1. Basic OCR conversion (Gundam mode - dynamic resolution):")
464
+ print(" uv run deepseek-ocr-vllm.py document-images markdown-docs")
465
+ print("\n2. High quality mode (Large - 1280×1280):")
466
+ print(" uv run deepseek-ocr-vllm.py scanned-pdfs extracted-text --resolution-mode large")
467
+ print("\n3. Fast processing (Tiny - 512×512):")
468
+ print(" uv run deepseek-ocr-vllm.py quick-test output --resolution-mode tiny")
469
+ print("\n4. Process a subset for testing:")
470
+ print(" uv run deepseek-ocr-vllm.py large-dataset test-output --max-samples 10")
471
+ print("\n5. Custom resolution:")
472
+ print(" uv run deepseek-ocr-vllm.py dataset output \\")
473
+ print(" --base-size 1024 --image-size 640 --crop-mode")
474
+ print("\n6. Running on HF Jobs:")
475
+ print(" hf jobs uv run --flavor l4x1 \\")
476
+ print(" -e HF_TOKEN=$(python3 -c \"from huggingface_hub import get_token; print(get_token())\") \\")
477
+ print(" https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr-vllm.py \\")
478
+ print(" your-document-dataset \\")
479
+ print(" your-markdown-output")
480
+ print("\n" + "=" * 80)
481
+ print("\nFor full help, run: uv run deepseek-ocr-vllm.py --help")
482
+ sys.exit(0)
483
+
484
+ parser = argparse.ArgumentParser(
485
+ description="OCR images to markdown using DeepSeek-OCR (vLLM)",
486
+ formatter_class=argparse.RawDescriptionHelpFormatter,
487
+ epilog="""
488
+ Resolution Modes:
489
+ tiny 512×512 pixels, fast processing (64 vision tokens)
490
+ small 640×640 pixels, balanced (100 vision tokens)
491
+ base 1024×1024 pixels, high quality (256 vision tokens)
492
+ large 1280×1280 pixels, maximum quality (400 vision tokens)
493
+ gundam Dynamic multi-tile processing (adaptive)
494
+
495
+ Examples:
496
+ # Basic usage with default Gundam mode
497
+ uv run deepseek-ocr-vllm.py my-images-dataset ocr-results
498
+
499
+ # High quality processing
500
+ uv run deepseek-ocr-vllm.py documents extracted-text --resolution-mode large
501
+
502
+ # Fast processing for testing
503
+ uv run deepseek-ocr-vllm.py dataset output --resolution-mode tiny --max-samples 100
504
+
505
+ # Custom resolution settings
506
+ uv run deepseek-ocr-vllm.py dataset output --base-size 1024 --image-size 640 --crop-mode
507
+
508
+ # With custom batch size for performance tuning
509
+ uv run deepseek-ocr-vllm.py dataset output --batch-size 16 --max-model-len 16384
510
+ """,
511
+ )
512
+
513
+ parser.add_argument("input_dataset", help="Input dataset ID from Hugging Face Hub")
514
+ parser.add_argument("output_dataset", help="Output dataset ID for Hugging Face Hub")
515
+ parser.add_argument(
516
+ "--image-column",
517
+ default="image",
518
+ help="Column containing images (default: image)",
519
+ )
520
+ parser.add_argument(
521
+ "--batch-size",
522
+ type=int,
523
+ default=8,
524
+ help="Batch size for processing (default: 8, adjust based on GPU memory)",
525
+ )
526
+ parser.add_argument(
527
+ "--model",
528
+ default="deepseek-ai/DeepSeek-OCR",
529
+ help="Model to use (default: deepseek-ai/DeepSeek-OCR)",
530
+ )
531
+ parser.add_argument(
532
+ "--resolution-mode",
533
+ default="gundam",
534
+ choices=list(RESOLUTION_MODES.keys()) + ["custom"],
535
+ help="Resolution mode preset (default: gundam)",
536
+ )
537
+ parser.add_argument(
538
+ "--base-size",
539
+ type=int,
540
+ help="Base resolution size (overrides resolution-mode)",
541
+ )
542
+ parser.add_argument(
543
+ "--image-size",
544
+ type=int,
545
+ help="Image tile size (overrides resolution-mode)",
546
+ )
547
+ parser.add_argument(
548
+ "--crop-mode",
549
+ action="store_true",
550
+ help="Enable dynamic multi-tile cropping (overrides resolution-mode)",
551
+ )
552
+ parser.add_argument(
553
+ "--max-model-len",
554
+ type=int,
555
+ default=8192,
556
+ help="Maximum model context length (default: 8192)",
557
+ )
558
+ parser.add_argument(
559
+ "--max-tokens",
560
+ type=int,
561
+ default=8192,
562
+ help="Maximum tokens to generate (default: 8192)",
563
+ )
564
+ parser.add_argument(
565
+ "--gpu-memory-utilization",
566
+ type=float,
567
+ default=0.8,
568
+ help="GPU memory utilization (default: 0.8)",
569
+ )
570
+ parser.add_argument(
571
+ "--prompt",
572
+ default="<image>\n<|grounding|>Convert the document to markdown.",
573
+ help="Prompt for OCR (default: grounding markdown conversion)",
574
+ )
575
+ parser.add_argument("--hf-token", help="Hugging Face API token")
576
+ parser.add_argument(
577
+ "--split", default="train", help="Dataset split to use (default: train)"
578
+ )
579
+ parser.add_argument(
580
+ "--max-samples",
581
+ type=int,
582
+ help="Maximum number of samples to process (for testing)",
583
+ )
584
+ parser.add_argument(
585
+ "--private", action="store_true", help="Make output dataset private"
586
+ )
587
+ parser.add_argument(
588
+ "--shuffle",
589
+ action="store_true",
590
+ help="Shuffle the dataset before processing (useful for random sampling)",
591
+ )
592
+ parser.add_argument(
593
+ "--seed",
594
+ type=int,
595
+ default=42,
596
+ help="Random seed for shuffling (default: 42)",
597
+ )
598
+
599
+ args = parser.parse_args()
600
+
601
+ main(
602
+ input_dataset=args.input_dataset,
603
+ output_dataset=args.output_dataset,
604
+ image_column=args.image_column,
605
+ batch_size=args.batch_size,
606
+ model=args.model,
607
+ resolution_mode=args.resolution_mode,
608
+ base_size=args.base_size,
609
+ image_size=args.image_size,
610
+ crop_mode=args.crop_mode if args.crop_mode else None,
611
+ max_model_len=args.max_model_len,
612
+ max_tokens=args.max_tokens,
613
+ gpu_memory_utilization=args.gpu_memory_utilization,
614
+ prompt=args.prompt,
615
+ hf_token=args.hf_token,
616
+ split=args.split,
617
+ max_samples=args.max_samples,
618
+ private=args.private,
619
+ shuffle=args.shuffle,
620
+ seed=args.seed,
621
+ )