davanstrien HF Staff commited on
Commit
88db448
·
1 Parent(s): cf61cef
Files changed (2) hide show
  1. deepseek-ocr-vllm-test.py +0 -226
  2. deepseek-ocr-vllm.py +33 -11
deepseek-ocr-vllm-test.py DELETED
@@ -1,226 +0,0 @@
1
- # /// script
2
- # requires-python = ">=3.11"
3
- # dependencies = [
4
- # "vllm",
5
- # "pillow",
6
- # "datasets",
7
- # "torch",
8
- # "huggingface-hub",
9
- # ]
10
- #
11
- # [[tool.uv.index]]
12
- # url = "https://wheels.vllm.ai/nightly"
13
- #
14
- # [tool.uv]
15
- # prerelease = "allow"
16
- # ///
17
-
18
- """
19
- Minimal test script for DeepSeek-OCR with vLLM.
20
-
21
- This is an MVP script to validate that DeepSeek-OCR works with vLLM.
22
- Installs vLLM from nightly wheels (PR #27247 now merged to main).
23
-
24
- Use this to test in Colab before building the full production script.
25
-
26
- Usage:
27
- # Test with a HF dataset image
28
- uv run deepseek-ocr-vllm-test.py --dataset pixparse/idl-wds --index 0
29
-
30
- # Test with local image
31
- uv run deepseek-ocr-vllm-test.py --image path/to/image.png
32
-
33
- # Test with different resolution mode
34
- uv run deepseek-ocr-vllm-test.py --dataset pixparse/idl-wds --resolution-mode tiny
35
- """
36
-
37
- import argparse
38
- import base64
39
- import io
40
- import logging
41
- import sys
42
- from typing import List, Dict
43
-
44
- import torch
45
- from PIL import Image
46
- from datasets import load_dataset
47
- from vllm import LLM, SamplingParams
48
-
49
- logging.basicConfig(level=logging.INFO)
50
- logger = logging.getLogger(__name__)
51
-
52
- # Resolution mode presets from DeepSeek-OCR
53
- RESOLUTION_MODES = {
54
- "tiny": {"base_size": 512, "image_size": 512, "crop_mode": False},
55
- "small": {"base_size": 640, "image_size": 640, "crop_mode": False},
56
- "base": {"base_size": 1024, "image_size": 1024, "crop_mode": False},
57
- "large": {"base_size": 1280, "image_size": 1280, "crop_mode": False},
58
- "gundam": {"base_size": 1024, "image_size": 640, "crop_mode": True},
59
- }
60
-
61
-
62
- def check_cuda():
63
- """Check CUDA availability."""
64
- if not torch.cuda.is_available():
65
- logger.error("❌ CUDA not available. DeepSeek-OCR requires a GPU.")
66
- logger.error("Please run in Colab with GPU runtime or on a GPU machine.")
67
- sys.exit(1)
68
- logger.info(f"✅ CUDA available: {torch.cuda.get_device_name(0)}")
69
-
70
-
71
- def make_message(image: Image.Image, prompt: str) -> List[Dict]:
72
- """Create vLLM chat message with base64 encoded image."""
73
- # Convert to RGB
74
- image = image.convert("RGB")
75
-
76
- # Encode as base64 data URI
77
- buf = io.BytesIO()
78
- image.save(buf, format="PNG")
79
- data_uri = f"data:image/png;base64,{base64.b64encode(buf.getvalue()).decode()}"
80
-
81
- return [
82
- {
83
- "role": "user",
84
- "content": [
85
- {"type": "image_url", "image_url": {"url": data_uri}},
86
- {"type": "text", "text": prompt},
87
- ],
88
- }
89
- ]
90
-
91
-
92
- def main():
93
- parser = argparse.ArgumentParser(
94
- description="Test DeepSeek-OCR with vLLM",
95
- formatter_class=argparse.RawDescriptionHelpFormatter,
96
- epilog="""
97
- Examples:
98
- # Test with HF dataset
99
- uv run deepseek-ocr-vllm-test.py --dataset pixparse/idl-wds --index 0
100
-
101
- # Test with local image
102
- uv run deepseek-ocr-vllm-test.py --image document.png
103
-
104
- # Test different resolution
105
- uv run deepseek-ocr-vllm-test.py --dataset pixparse/idl-wds --resolution-mode large
106
- """,
107
- )
108
-
109
- # Image source (one required)
110
- source_group = parser.add_mutually_exclusive_group(required=True)
111
- source_group.add_argument("--image", help="Path to local image file")
112
- source_group.add_argument(
113
- "--dataset",
114
- help="HF dataset ID (will use first image from train split)",
115
- )
116
-
117
- parser.add_argument(
118
- "--index",
119
- type=int,
120
- default=0,
121
- help="Dataset index to use (default: 0)",
122
- )
123
- parser.add_argument(
124
- "--image-column",
125
- default="image",
126
- help="Column name for images in dataset (default: image)",
127
- )
128
- parser.add_argument(
129
- "--split",
130
- default="train",
131
- help="Dataset split to use (default: train)",
132
- )
133
- parser.add_argument(
134
- "--resolution-mode",
135
- choices=list(RESOLUTION_MODES.keys()),
136
- default="gundam",
137
- help="Resolution mode preset (default: gundam)",
138
- )
139
- parser.add_argument(
140
- "--prompt",
141
- default="<image>\n<|grounding|>Convert the document to markdown.",
142
- help="OCR prompt",
143
- )
144
- parser.add_argument(
145
- "--model",
146
- default="deepseek-ai/DeepSeek-OCR",
147
- help="Model ID (default: deepseek-ai/DeepSeek-OCR)",
148
- )
149
-
150
- args = parser.parse_args()
151
-
152
- # Check CUDA
153
- check_cuda()
154
-
155
- # Load image
156
- logger.info("📷 Loading image...")
157
- if args.image:
158
- image = Image.open(args.image)
159
- logger.info(f"Loaded from file: {args.image}")
160
- else:
161
- dataset = load_dataset(args.dataset, split=args.split, streaming=False)
162
- if args.image_column not in dataset.column_names:
163
- logger.error(f"Column '{args.image_column}' not found in dataset")
164
- logger.error(f"Available columns: {dataset.column_names}")
165
- sys.exit(1)
166
- image = dataset[args.index][args.image_column]
167
- if isinstance(image, dict) and "bytes" in image:
168
- image = Image.open(io.BytesIO(image["bytes"]))
169
- logger.info(f"Loaded from dataset: {args.dataset}[{args.index}]")
170
-
171
- logger.info(f"Image size: {image.size}")
172
-
173
- # Get resolution settings
174
- resolution = RESOLUTION_MODES[args.resolution_mode]
175
- logger.info(f"Resolution mode: {args.resolution_mode}")
176
- logger.info(f" base_size={resolution['base_size']}, image_size={resolution['image_size']}, crop_mode={resolution['crop_mode']}")
177
-
178
- # Initialize vLLM
179
- logger.info(f"🚀 Loading model: {args.model}")
180
- logger.info("This may take a few minutes on first run...")
181
-
182
- try:
183
- llm = LLM(
184
- model=args.model,
185
- trust_remote_code=True,
186
- max_model_len=8192,
187
- gpu_memory_utilization=0.8,
188
- limit_mm_per_prompt={"image": 1},
189
- enforce_eager=False,
190
- )
191
- logger.info("✅ Model loaded successfully!")
192
- except Exception as e:
193
- logger.error(f"❌ Failed to load model: {e}")
194
- logger.error("\nThis might mean:")
195
- logger.error(" 1. The model architecture is not recognized")
196
- logger.error(" 2. Missing dependencies")
197
- logger.error(" 3. Insufficient GPU memory")
198
- sys.exit(1)
199
-
200
- # Run inference
201
- logger.info("🔍 Running OCR...")
202
- sampling_params = SamplingParams(
203
- temperature=0.0,
204
- max_tokens=8192,
205
- )
206
-
207
- message = make_message(image, args.prompt)
208
-
209
- try:
210
- outputs = llm.chat([message], sampling_params)
211
- result = outputs[0].outputs[0].text.strip()
212
-
213
- logger.info("✅ OCR Complete!")
214
- print("\n" + "=" * 80)
215
- print("MARKDOWN OUTPUT:")
216
- print("=" * 80)
217
- print(result)
218
- print("=" * 80)
219
-
220
- except Exception as e:
221
- logger.error(f"❌ Inference failed: {e}")
222
- sys.exit(1)
223
-
224
-
225
- if __name__ == "__main__":
226
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
deepseek-ocr-vllm.py CHANGED
@@ -63,9 +63,14 @@ RESOLUTION_MODES = {
63
  "small": {"base_size": 640, "image_size": 640, "crop_mode": False},
64
  "base": {"base_size": 1024, "image_size": 1024, "crop_mode": False},
65
  "large": {"base_size": 1280, "image_size": 1280, "crop_mode": False},
66
- "gundam": {"base_size": 1024, "image_size": 640, "crop_mode": True}, # Dynamic resolution
 
 
 
 
67
  }
68
 
 
69
  def check_cuda_availability():
70
  """Check if CUDA is available and exit if not."""
71
  if not torch.cuda.is_available():
@@ -75,9 +80,10 @@ def check_cuda_availability():
75
  else:
76
  logger.info(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}")
77
 
 
78
  def make_ocr_message(
79
  image: Union[Image.Image, Dict[str, Any], str],
80
- prompt: str = "<image>\n<|grounding|>Convert the document to markdown.",
81
  ) -> List[Dict]:
82
  """Create chat message for OCR processing."""
83
  # Convert to PIL Image if needed
@@ -109,6 +115,7 @@ def make_ocr_message(
109
  }
110
  ]
111
 
 
112
  def create_dataset_card(
113
  source_dataset: str,
114
  model: str,
@@ -229,6 +236,7 @@ uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr-vllm
229
  Generated with 🤖 [UV Scripts](https://huggingface.co/uv-scripts)
230
  """
231
 
 
232
  def main(
233
  input_dataset: str,
234
  output_dataset: str,
@@ -269,9 +277,15 @@ def main(
269
  # Determine resolution settings
270
  if resolution_mode in RESOLUTION_MODES:
271
  mode_config = RESOLUTION_MODES[resolution_mode]
272
- final_base_size = base_size if base_size is not None else mode_config["base_size"]
273
- final_image_size = image_size if image_size is not None else mode_config["image_size"]
274
- final_crop_mode = crop_mode if crop_mode is not None else mode_config["crop_mode"]
 
 
 
 
 
 
275
  logger.info(f"Using resolution mode: {resolution_mode}")
276
  else:
277
  # Custom mode - require all parameters
@@ -314,7 +328,7 @@ def main(
314
  # Initialize vLLM
315
  logger.info(f"Initializing vLLM with model: {model}")
316
  logger.info("This may take a few minutes on first run...")
317
-
318
  # Add specific parameters for DeepSeek-OCR compatibility
319
  llm = LLM(
320
  model=model,
@@ -331,7 +345,9 @@ def main(
331
  )
332
 
333
  logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
334
- logger.info("Using vLLM for batch processing - should be faster than sequential processing")
 
 
335
 
336
  # Process images in batches
337
  all_markdown = []
@@ -465,11 +481,15 @@ if __name__ == "__main__":
465
  print("\n1. Basic OCR conversion (Gundam mode - dynamic resolution):")
466
  print(" uv run deepseek-ocr-vllm.py document-images markdown-docs")
467
  print("\n2. High quality mode (Large - 1280×1280):")
468
- print(" uv run deepseek-ocr-vllm.py scanned-pdfs extracted-text --resolution-mode large")
 
 
469
  print("\n3. Fast processing (Tiny - 512×512):")
470
  print(" uv run deepseek-ocr-vllm.py quick-test output --resolution-mode tiny")
471
  print("\n4. Process a subset for testing:")
472
- print(" uv run deepseek-ocr-vllm.py large-dataset test-output --max-samples 10")
 
 
473
  print("\n5. Custom resolution:")
474
  print(" uv run deepseek-ocr-vllm.py dataset output \\")
475
  print(" --base-size 1024 --image-size 640 --crop-mode")
@@ -477,7 +497,9 @@ if __name__ == "__main__":
477
  print(" hf jobs uv run --flavor l4x1 \\")
478
  print(" -s HF_TOKEN \\")
479
  print(" -e UV_TORCH_BACKEND=auto \\")
480
- print(" https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr-vllm.py \\")
 
 
481
  print(" your-document-dataset \\")
482
  print(" your-markdown-output")
483
  print("\n" + "=" * 80)
@@ -621,4 +643,4 @@ Examples:
621
  private=args.private,
622
  shuffle=args.shuffle,
623
  seed=args.seed,
624
- )
 
63
  "small": {"base_size": 640, "image_size": 640, "crop_mode": False},
64
  "base": {"base_size": 1024, "image_size": 1024, "crop_mode": False},
65
  "large": {"base_size": 1280, "image_size": 1280, "crop_mode": False},
66
+ "gundam": {
67
+ "base_size": 1024,
68
+ "image_size": 640,
69
+ "crop_mode": True,
70
+ }, # Dynamic resolution
71
  }
72
 
73
+
74
  def check_cuda_availability():
75
  """Check if CUDA is available and exit if not."""
76
  if not torch.cuda.is_available():
 
80
  else:
81
  logger.info(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}")
82
 
83
+
84
  def make_ocr_message(
85
  image: Union[Image.Image, Dict[str, Any], str],
86
+ prompt: str = "<image>\n<|grounding|>Convert the document to markdown. ",
87
  ) -> List[Dict]:
88
  """Create chat message for OCR processing."""
89
  # Convert to PIL Image if needed
 
115
  }
116
  ]
117
 
118
+
119
  def create_dataset_card(
120
  source_dataset: str,
121
  model: str,
 
236
  Generated with 🤖 [UV Scripts](https://huggingface.co/uv-scripts)
237
  """
238
 
239
+
240
  def main(
241
  input_dataset: str,
242
  output_dataset: str,
 
277
  # Determine resolution settings
278
  if resolution_mode in RESOLUTION_MODES:
279
  mode_config = RESOLUTION_MODES[resolution_mode]
280
+ final_base_size = (
281
+ base_size if base_size is not None else mode_config["base_size"]
282
+ )
283
+ final_image_size = (
284
+ image_size if image_size is not None else mode_config["image_size"]
285
+ )
286
+ final_crop_mode = (
287
+ crop_mode if crop_mode is not None else mode_config["crop_mode"]
288
+ )
289
  logger.info(f"Using resolution mode: {resolution_mode}")
290
  else:
291
  # Custom mode - require all parameters
 
328
  # Initialize vLLM
329
  logger.info(f"Initializing vLLM with model: {model}")
330
  logger.info("This may take a few minutes on first run...")
331
+
332
  # Add specific parameters for DeepSeek-OCR compatibility
333
  llm = LLM(
334
  model=model,
 
345
  )
346
 
347
  logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
348
+ logger.info(
349
+ "Using vLLM for batch processing - should be faster than sequential processing"
350
+ )
351
 
352
  # Process images in batches
353
  all_markdown = []
 
481
  print("\n1. Basic OCR conversion (Gundam mode - dynamic resolution):")
482
  print(" uv run deepseek-ocr-vllm.py document-images markdown-docs")
483
  print("\n2. High quality mode (Large - 1280×1280):")
484
+ print(
485
+ " uv run deepseek-ocr-vllm.py scanned-pdfs extracted-text --resolution-mode large"
486
+ )
487
  print("\n3. Fast processing (Tiny - 512×512):")
488
  print(" uv run deepseek-ocr-vllm.py quick-test output --resolution-mode tiny")
489
  print("\n4. Process a subset for testing:")
490
+ print(
491
+ " uv run deepseek-ocr-vllm.py large-dataset test-output --max-samples 10"
492
+ )
493
  print("\n5. Custom resolution:")
494
  print(" uv run deepseek-ocr-vllm.py dataset output \\")
495
  print(" --base-size 1024 --image-size 640 --crop-mode")
 
497
  print(" hf jobs uv run --flavor l4x1 \\")
498
  print(" -s HF_TOKEN \\")
499
  print(" -e UV_TORCH_BACKEND=auto \\")
500
+ print(
501
+ " https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr-vllm.py \\"
502
+ )
503
  print(" your-document-dataset \\")
504
  print(" your-markdown-output")
505
  print("\n" + "=" * 80)
 
643
  private=args.private,
644
  shuffle=args.shuffle,
645
  seed=args.seed,
646
+ )