Commit
·
dbf8967
1
Parent(s):
76273a4
Refactor dependencies and clean up whitespace in paddleocr-vl.py
Browse files- paddleocr-vl.py +67 -53
paddleocr-vl.py
CHANGED
|
@@ -2,7 +2,7 @@
|
|
| 2 |
# requires-python = ">=3.11"
|
| 3 |
# dependencies = [
|
| 4 |
# "datasets",
|
| 5 |
-
# "huggingface-hub
|
| 6 |
# "pillow",
|
| 7 |
# "vllm",
|
| 8 |
# "tqdm",
|
|
@@ -97,19 +97,19 @@ def smart_resize(
|
|
| 97 |
) -> tuple[int, int]:
|
| 98 |
"""
|
| 99 |
PaddleOCR-VL's intelligent resize logic.
|
| 100 |
-
|
| 101 |
Rescales the image so that:
|
| 102 |
1. Both dimensions are divisible by 'factor' (28)
|
| 103 |
2. Total pixels are within [min_pixels, max_pixels]
|
| 104 |
3. Aspect ratio is maintained as closely as possible
|
| 105 |
-
|
| 106 |
Args:
|
| 107 |
height: Original image height
|
| 108 |
width: Original image width
|
| 109 |
factor: Dimension divisibility factor (default: 28)
|
| 110 |
min_pixels: Minimum total pixels (default: 100,880)
|
| 111 |
max_pixels: Maximum total pixels (default: 1,003,520)
|
| 112 |
-
|
| 113 |
Returns:
|
| 114 |
Tuple of (new_height, new_width)
|
| 115 |
"""
|
|
@@ -126,10 +126,10 @@ def smart_resize(
|
|
| 126 |
f"Extreme aspect ratio detected: {max(height, width) / min(height, width):.1f}"
|
| 127 |
)
|
| 128 |
# Continue anyway, but warn about potential issues
|
| 129 |
-
|
| 130 |
h_bar = round(height / factor) * factor
|
| 131 |
w_bar = round(width / factor) * factor
|
| 132 |
-
|
| 133 |
if h_bar * w_bar > max_pixels:
|
| 134 |
beta = math.sqrt((height * width) / max_pixels)
|
| 135 |
h_bar = math.floor(height / beta / factor) * factor
|
|
@@ -138,7 +138,7 @@ def smart_resize(
|
|
| 138 |
beta = math.sqrt(min_pixels / (height * width))
|
| 139 |
h_bar = math.ceil(height * beta / factor) * factor
|
| 140 |
w_bar = math.ceil(width * beta / factor) * factor
|
| 141 |
-
|
| 142 |
return h_bar, w_bar
|
| 143 |
|
| 144 |
|
|
@@ -149,7 +149,7 @@ def make_ocr_message(
|
|
| 149 |
) -> List[Dict]:
|
| 150 |
"""
|
| 151 |
Create chat message for PaddleOCR-VL processing.
|
| 152 |
-
|
| 153 |
PaddleOCR-VL expects a specific format with the task prefix after the image.
|
| 154 |
"""
|
| 155 |
# Convert to PIL Image if needed
|
|
@@ -164,7 +164,7 @@ def make_ocr_message(
|
|
| 164 |
|
| 165 |
# Convert to RGB
|
| 166 |
pil_img = pil_img.convert("RGB")
|
| 167 |
-
|
| 168 |
# Apply smart resize if requested
|
| 169 |
if apply_smart_resize:
|
| 170 |
original_size = pil_img.size
|
|
@@ -207,7 +207,7 @@ def create_dataset_card(
|
|
| 207 |
) -> str:
|
| 208 |
"""Create a dataset card documenting the OCR process."""
|
| 209 |
task_description = TASK_DESCRIPTIONS[task_mode]
|
| 210 |
-
|
| 211 |
return f"""---
|
| 212 |
tags:
|
| 213 |
- ocr
|
|
@@ -334,59 +334,59 @@ def main(
|
|
| 334 |
output_column: str = None,
|
| 335 |
):
|
| 336 |
"""Process images from HF dataset through PaddleOCR-VL model."""
|
| 337 |
-
|
| 338 |
# Check CUDA availability first
|
| 339 |
check_cuda_availability()
|
| 340 |
-
|
| 341 |
# Track processing start time
|
| 342 |
start_time = datetime.now()
|
| 343 |
-
|
| 344 |
# Enable HF_TRANSFER for faster downloads
|
| 345 |
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
|
| 346 |
-
|
| 347 |
# Login to HF if token provided
|
| 348 |
HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
|
| 349 |
if HF_TOKEN:
|
| 350 |
login(token=HF_TOKEN)
|
| 351 |
-
|
| 352 |
# Validate task mode
|
| 353 |
if task_mode not in TASK_MODES:
|
| 354 |
raise ValueError(
|
| 355 |
f"Invalid task_mode '{task_mode}'. Choose from: {list(TASK_MODES.keys())}"
|
| 356 |
)
|
| 357 |
-
|
| 358 |
# Auto-generate output column name based on task mode
|
| 359 |
if output_column is None:
|
| 360 |
output_column = f"paddleocr_{task_mode}"
|
| 361 |
-
|
| 362 |
logger.info(f"Using task mode: {task_mode} - {TASK_DESCRIPTIONS[task_mode]}")
|
| 363 |
logger.info(f"Output will be written to column: {output_column}")
|
| 364 |
-
|
| 365 |
# Load dataset
|
| 366 |
logger.info(f"Loading dataset: {input_dataset}")
|
| 367 |
dataset = load_dataset(input_dataset, split=split)
|
| 368 |
-
|
| 369 |
# Validate image column
|
| 370 |
if image_column not in dataset.column_names:
|
| 371 |
raise ValueError(
|
| 372 |
f"Column '{image_column}' not found. Available: {dataset.column_names}"
|
| 373 |
)
|
| 374 |
-
|
| 375 |
# Shuffle if requested
|
| 376 |
if shuffle:
|
| 377 |
logger.info(f"Shuffling dataset with seed {seed}")
|
| 378 |
dataset = dataset.shuffle(seed=seed)
|
| 379 |
-
|
| 380 |
# Limit samples if requested
|
| 381 |
if max_samples:
|
| 382 |
dataset = dataset.select(range(min(max_samples, len(dataset))))
|
| 383 |
logger.info(f"Limited to {len(dataset)} samples")
|
| 384 |
-
|
| 385 |
# Initialize vLLM model
|
| 386 |
model_name = "PaddlePaddle/PaddleOCR-VL"
|
| 387 |
logger.info(f"Initializing vLLM with {model_name}")
|
| 388 |
logger.info("This may take a minute on first run (model is only 0.9B)...")
|
| 389 |
-
|
| 390 |
llm = LLM(
|
| 391 |
model=model_name,
|
| 392 |
trust_remote_code=True,
|
|
@@ -394,20 +394,20 @@ def main(
|
|
| 394 |
gpu_memory_utilization=gpu_memory_utilization,
|
| 395 |
limit_mm_per_prompt={"image": 1},
|
| 396 |
)
|
| 397 |
-
|
| 398 |
# Sampling parameters - deterministic for OCR
|
| 399 |
sampling_params = SamplingParams(
|
| 400 |
temperature=temperature,
|
| 401 |
max_tokens=max_tokens,
|
| 402 |
)
|
| 403 |
-
|
| 404 |
logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
|
| 405 |
if apply_smart_resize:
|
| 406 |
logger.info("Smart resize enabled (PaddleOCR-VL's adaptive resolution)")
|
| 407 |
-
|
| 408 |
# Process images in batches
|
| 409 |
all_outputs = []
|
| 410 |
-
|
| 411 |
for batch_indices in tqdm(
|
| 412 |
partition_all(batch_size, range(len(dataset))),
|
| 413 |
total=(len(dataset) + batch_size - 1) // batch_size,
|
|
@@ -415,35 +415,37 @@ def main(
|
|
| 415 |
):
|
| 416 |
batch_indices = list(batch_indices)
|
| 417 |
batch_images = [dataset[i][image_column] for i in batch_indices]
|
| 418 |
-
|
| 419 |
try:
|
| 420 |
# Create messages for batch with task-specific prefix
|
| 421 |
batch_messages = [
|
| 422 |
-
make_ocr_message(
|
|
|
|
|
|
|
| 423 |
for img in batch_images
|
| 424 |
]
|
| 425 |
-
|
| 426 |
# Process with vLLM
|
| 427 |
outputs = llm.chat(batch_messages, sampling_params)
|
| 428 |
-
|
| 429 |
# Extract outputs
|
| 430 |
for output in outputs:
|
| 431 |
text = output.outputs[0].text.strip()
|
| 432 |
all_outputs.append(text)
|
| 433 |
-
|
| 434 |
except Exception as e:
|
| 435 |
logger.error(f"Error processing batch: {e}")
|
| 436 |
# Add error placeholders for failed batch
|
| 437 |
all_outputs.extend([f"[{task_mode.upper()} ERROR]"] * len(batch_images))
|
| 438 |
-
|
| 439 |
# Calculate processing time
|
| 440 |
processing_duration = datetime.now() - start_time
|
| 441 |
processing_time_str = f"{processing_duration.total_seconds() / 60:.1f} min"
|
| 442 |
-
|
| 443 |
# Add output column to dataset
|
| 444 |
logger.info(f"Adding '{output_column}' column to dataset")
|
| 445 |
dataset = dataset.add_column(output_column, all_outputs)
|
| 446 |
-
|
| 447 |
# Handle inference_info tracking (for multi-model comparisons)
|
| 448 |
inference_entry = {
|
| 449 |
"model_id": model_name,
|
|
@@ -456,31 +458,35 @@ def main(
|
|
| 456 |
"max_tokens": max_tokens,
|
| 457 |
"smart_resize": apply_smart_resize,
|
| 458 |
}
|
| 459 |
-
|
| 460 |
if "inference_info" in dataset.column_names:
|
| 461 |
# Append to existing inference info
|
| 462 |
logger.info("Updating existing inference_info column")
|
| 463 |
-
|
| 464 |
def update_inference_info(example):
|
| 465 |
try:
|
| 466 |
-
existing_info =
|
|
|
|
|
|
|
|
|
|
|
|
|
| 467 |
except (json.JSONDecodeError, TypeError):
|
| 468 |
existing_info = []
|
| 469 |
-
|
| 470 |
existing_info.append(inference_entry)
|
| 471 |
return {"inference_info": json.dumps(existing_info)}
|
| 472 |
-
|
| 473 |
dataset = dataset.map(update_inference_info)
|
| 474 |
else:
|
| 475 |
# Create new inference_info column
|
| 476 |
logger.info("Creating new inference_info column")
|
| 477 |
inference_list = [json.dumps([inference_entry])] * len(dataset)
|
| 478 |
dataset = dataset.add_column("inference_info", inference_list)
|
| 479 |
-
|
| 480 |
# Push to hub
|
| 481 |
logger.info(f"Pushing to {output_dataset}")
|
| 482 |
dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN)
|
| 483 |
-
|
| 484 |
# Create and push dataset card
|
| 485 |
logger.info("Creating dataset card")
|
| 486 |
card_content = create_dataset_card(
|
|
@@ -498,12 +504,14 @@ def main(
|
|
| 498 |
image_column=image_column,
|
| 499 |
split=split,
|
| 500 |
)
|
| 501 |
-
|
| 502 |
card = DatasetCard(card_content)
|
| 503 |
card.push_to_hub(output_dataset, token=HF_TOKEN)
|
| 504 |
-
|
| 505 |
logger.info("✅ PaddleOCR-VL processing complete!")
|
| 506 |
-
logger.info(
|
|
|
|
|
|
|
| 507 |
logger.info(f"Processing time: {processing_time_str}")
|
| 508 |
logger.info(f"Task mode: {task_mode} - {TASK_DESCRIPTIONS[task_mode]}")
|
| 509 |
|
|
@@ -533,21 +541,27 @@ if __name__ == "__main__":
|
|
| 533 |
print("\n2. Table extraction:")
|
| 534 |
print(" uv run paddleocr-vl.py docs tables-extracted --task-mode table")
|
| 535 |
print("\n3. Formula recognition:")
|
| 536 |
-
print(
|
|
|
|
|
|
|
| 537 |
print("\n4. Chart analysis:")
|
| 538 |
print(" uv run paddleocr-vl.py diagrams charts-analyzed --task-mode chart")
|
| 539 |
print("\n5. Test with small sample:")
|
| 540 |
print(" uv run paddleocr-vl.py dataset test --max-samples 10 --shuffle")
|
| 541 |
print("\n6. Running on HF Jobs:")
|
| 542 |
print(" hf jobs uv run --flavor l4x1 \\")
|
| 543 |
-
print(
|
|
|
|
|
|
|
| 544 |
print(" -e HF_HUB_ENABLE_HF_TRANSFER=1 \\")
|
| 545 |
-
print(
|
|
|
|
|
|
|
| 546 |
print(" input-dataset output-dataset --task-mode ocr")
|
| 547 |
print("\n" + "=" * 80)
|
| 548 |
print("\nFor full help, run: uv run paddleocr-vl.py --help")
|
| 549 |
sys.exit(0)
|
| 550 |
-
|
| 551 |
parser = argparse.ArgumentParser(
|
| 552 |
description="Document processing using PaddleOCR-VL (0.9B task-specific model)",
|
| 553 |
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
@@ -578,7 +592,7 @@ Examples:
|
|
| 578 |
uv run paddleocr-vl.py images output --no-smart-resize
|
| 579 |
""",
|
| 580 |
)
|
| 581 |
-
|
| 582 |
parser.add_argument("input_dataset", help="Input dataset ID from Hugging Face Hub")
|
| 583 |
parser.add_argument("output_dataset", help="Output dataset ID for Hugging Face Hub")
|
| 584 |
parser.add_argument(
|
|
@@ -652,9 +666,9 @@ Examples:
|
|
| 652 |
"--output-column",
|
| 653 |
help="Column name for output (default: paddleocr_[task_mode])",
|
| 654 |
)
|
| 655 |
-
|
| 656 |
args = parser.parse_args()
|
| 657 |
-
|
| 658 |
main(
|
| 659 |
input_dataset=args.input_dataset,
|
| 660 |
output_dataset=args.output_dataset,
|
|
@@ -673,4 +687,4 @@ Examples:
|
|
| 673 |
shuffle=args.shuffle,
|
| 674 |
seed=args.seed,
|
| 675 |
output_column=args.output_column,
|
| 676 |
-
)
|
|
|
|
| 2 |
# requires-python = ">=3.11"
|
| 3 |
# dependencies = [
|
| 4 |
# "datasets",
|
| 5 |
+
# "huggingface-hub",
|
| 6 |
# "pillow",
|
| 7 |
# "vllm",
|
| 8 |
# "tqdm",
|
|
|
|
| 97 |
) -> tuple[int, int]:
|
| 98 |
"""
|
| 99 |
PaddleOCR-VL's intelligent resize logic.
|
| 100 |
+
|
| 101 |
Rescales the image so that:
|
| 102 |
1. Both dimensions are divisible by 'factor' (28)
|
| 103 |
2. Total pixels are within [min_pixels, max_pixels]
|
| 104 |
3. Aspect ratio is maintained as closely as possible
|
| 105 |
+
|
| 106 |
Args:
|
| 107 |
height: Original image height
|
| 108 |
width: Original image width
|
| 109 |
factor: Dimension divisibility factor (default: 28)
|
| 110 |
min_pixels: Minimum total pixels (default: 100,880)
|
| 111 |
max_pixels: Maximum total pixels (default: 1,003,520)
|
| 112 |
+
|
| 113 |
Returns:
|
| 114 |
Tuple of (new_height, new_width)
|
| 115 |
"""
|
|
|
|
| 126 |
f"Extreme aspect ratio detected: {max(height, width) / min(height, width):.1f}"
|
| 127 |
)
|
| 128 |
# Continue anyway, but warn about potential issues
|
| 129 |
+
|
| 130 |
h_bar = round(height / factor) * factor
|
| 131 |
w_bar = round(width / factor) * factor
|
| 132 |
+
|
| 133 |
if h_bar * w_bar > max_pixels:
|
| 134 |
beta = math.sqrt((height * width) / max_pixels)
|
| 135 |
h_bar = math.floor(height / beta / factor) * factor
|
|
|
|
| 138 |
beta = math.sqrt(min_pixels / (height * width))
|
| 139 |
h_bar = math.ceil(height * beta / factor) * factor
|
| 140 |
w_bar = math.ceil(width * beta / factor) * factor
|
| 141 |
+
|
| 142 |
return h_bar, w_bar
|
| 143 |
|
| 144 |
|
|
|
|
| 149 |
) -> List[Dict]:
|
| 150 |
"""
|
| 151 |
Create chat message for PaddleOCR-VL processing.
|
| 152 |
+
|
| 153 |
PaddleOCR-VL expects a specific format with the task prefix after the image.
|
| 154 |
"""
|
| 155 |
# Convert to PIL Image if needed
|
|
|
|
| 164 |
|
| 165 |
# Convert to RGB
|
| 166 |
pil_img = pil_img.convert("RGB")
|
| 167 |
+
|
| 168 |
# Apply smart resize if requested
|
| 169 |
if apply_smart_resize:
|
| 170 |
original_size = pil_img.size
|
|
|
|
| 207 |
) -> str:
|
| 208 |
"""Create a dataset card documenting the OCR process."""
|
| 209 |
task_description = TASK_DESCRIPTIONS[task_mode]
|
| 210 |
+
|
| 211 |
return f"""---
|
| 212 |
tags:
|
| 213 |
- ocr
|
|
|
|
| 334 |
output_column: str = None,
|
| 335 |
):
|
| 336 |
"""Process images from HF dataset through PaddleOCR-VL model."""
|
| 337 |
+
|
| 338 |
# Check CUDA availability first
|
| 339 |
check_cuda_availability()
|
| 340 |
+
|
| 341 |
# Track processing start time
|
| 342 |
start_time = datetime.now()
|
| 343 |
+
|
| 344 |
# Enable HF_TRANSFER for faster downloads
|
| 345 |
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
|
| 346 |
+
|
| 347 |
# Login to HF if token provided
|
| 348 |
HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
|
| 349 |
if HF_TOKEN:
|
| 350 |
login(token=HF_TOKEN)
|
| 351 |
+
|
| 352 |
# Validate task mode
|
| 353 |
if task_mode not in TASK_MODES:
|
| 354 |
raise ValueError(
|
| 355 |
f"Invalid task_mode '{task_mode}'. Choose from: {list(TASK_MODES.keys())}"
|
| 356 |
)
|
| 357 |
+
|
| 358 |
# Auto-generate output column name based on task mode
|
| 359 |
if output_column is None:
|
| 360 |
output_column = f"paddleocr_{task_mode}"
|
| 361 |
+
|
| 362 |
logger.info(f"Using task mode: {task_mode} - {TASK_DESCRIPTIONS[task_mode]}")
|
| 363 |
logger.info(f"Output will be written to column: {output_column}")
|
| 364 |
+
|
| 365 |
# Load dataset
|
| 366 |
logger.info(f"Loading dataset: {input_dataset}")
|
| 367 |
dataset = load_dataset(input_dataset, split=split)
|
| 368 |
+
|
| 369 |
# Validate image column
|
| 370 |
if image_column not in dataset.column_names:
|
| 371 |
raise ValueError(
|
| 372 |
f"Column '{image_column}' not found. Available: {dataset.column_names}"
|
| 373 |
)
|
| 374 |
+
|
| 375 |
# Shuffle if requested
|
| 376 |
if shuffle:
|
| 377 |
logger.info(f"Shuffling dataset with seed {seed}")
|
| 378 |
dataset = dataset.shuffle(seed=seed)
|
| 379 |
+
|
| 380 |
# Limit samples if requested
|
| 381 |
if max_samples:
|
| 382 |
dataset = dataset.select(range(min(max_samples, len(dataset))))
|
| 383 |
logger.info(f"Limited to {len(dataset)} samples")
|
| 384 |
+
|
| 385 |
# Initialize vLLM model
|
| 386 |
model_name = "PaddlePaddle/PaddleOCR-VL"
|
| 387 |
logger.info(f"Initializing vLLM with {model_name}")
|
| 388 |
logger.info("This may take a minute on first run (model is only 0.9B)...")
|
| 389 |
+
|
| 390 |
llm = LLM(
|
| 391 |
model=model_name,
|
| 392 |
trust_remote_code=True,
|
|
|
|
| 394 |
gpu_memory_utilization=gpu_memory_utilization,
|
| 395 |
limit_mm_per_prompt={"image": 1},
|
| 396 |
)
|
| 397 |
+
|
| 398 |
# Sampling parameters - deterministic for OCR
|
| 399 |
sampling_params = SamplingParams(
|
| 400 |
temperature=temperature,
|
| 401 |
max_tokens=max_tokens,
|
| 402 |
)
|
| 403 |
+
|
| 404 |
logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
|
| 405 |
if apply_smart_resize:
|
| 406 |
logger.info("Smart resize enabled (PaddleOCR-VL's adaptive resolution)")
|
| 407 |
+
|
| 408 |
# Process images in batches
|
| 409 |
all_outputs = []
|
| 410 |
+
|
| 411 |
for batch_indices in tqdm(
|
| 412 |
partition_all(batch_size, range(len(dataset))),
|
| 413 |
total=(len(dataset) + batch_size - 1) // batch_size,
|
|
|
|
| 415 |
):
|
| 416 |
batch_indices = list(batch_indices)
|
| 417 |
batch_images = [dataset[i][image_column] for i in batch_indices]
|
| 418 |
+
|
| 419 |
try:
|
| 420 |
# Create messages for batch with task-specific prefix
|
| 421 |
batch_messages = [
|
| 422 |
+
make_ocr_message(
|
| 423 |
+
img, task_mode=task_mode, apply_smart_resize=apply_smart_resize
|
| 424 |
+
)
|
| 425 |
for img in batch_images
|
| 426 |
]
|
| 427 |
+
|
| 428 |
# Process with vLLM
|
| 429 |
outputs = llm.chat(batch_messages, sampling_params)
|
| 430 |
+
|
| 431 |
# Extract outputs
|
| 432 |
for output in outputs:
|
| 433 |
text = output.outputs[0].text.strip()
|
| 434 |
all_outputs.append(text)
|
| 435 |
+
|
| 436 |
except Exception as e:
|
| 437 |
logger.error(f"Error processing batch: {e}")
|
| 438 |
# Add error placeholders for failed batch
|
| 439 |
all_outputs.extend([f"[{task_mode.upper()} ERROR]"] * len(batch_images))
|
| 440 |
+
|
| 441 |
# Calculate processing time
|
| 442 |
processing_duration = datetime.now() - start_time
|
| 443 |
processing_time_str = f"{processing_duration.total_seconds() / 60:.1f} min"
|
| 444 |
+
|
| 445 |
# Add output column to dataset
|
| 446 |
logger.info(f"Adding '{output_column}' column to dataset")
|
| 447 |
dataset = dataset.add_column(output_column, all_outputs)
|
| 448 |
+
|
| 449 |
# Handle inference_info tracking (for multi-model comparisons)
|
| 450 |
inference_entry = {
|
| 451 |
"model_id": model_name,
|
|
|
|
| 458 |
"max_tokens": max_tokens,
|
| 459 |
"smart_resize": apply_smart_resize,
|
| 460 |
}
|
| 461 |
+
|
| 462 |
if "inference_info" in dataset.column_names:
|
| 463 |
# Append to existing inference info
|
| 464 |
logger.info("Updating existing inference_info column")
|
| 465 |
+
|
| 466 |
def update_inference_info(example):
|
| 467 |
try:
|
| 468 |
+
existing_info = (
|
| 469 |
+
json.loads(example["inference_info"])
|
| 470 |
+
if example["inference_info"]
|
| 471 |
+
else []
|
| 472 |
+
)
|
| 473 |
except (json.JSONDecodeError, TypeError):
|
| 474 |
existing_info = []
|
| 475 |
+
|
| 476 |
existing_info.append(inference_entry)
|
| 477 |
return {"inference_info": json.dumps(existing_info)}
|
| 478 |
+
|
| 479 |
dataset = dataset.map(update_inference_info)
|
| 480 |
else:
|
| 481 |
# Create new inference_info column
|
| 482 |
logger.info("Creating new inference_info column")
|
| 483 |
inference_list = [json.dumps([inference_entry])] * len(dataset)
|
| 484 |
dataset = dataset.add_column("inference_info", inference_list)
|
| 485 |
+
|
| 486 |
# Push to hub
|
| 487 |
logger.info(f"Pushing to {output_dataset}")
|
| 488 |
dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN)
|
| 489 |
+
|
| 490 |
# Create and push dataset card
|
| 491 |
logger.info("Creating dataset card")
|
| 492 |
card_content = create_dataset_card(
|
|
|
|
| 504 |
image_column=image_column,
|
| 505 |
split=split,
|
| 506 |
)
|
| 507 |
+
|
| 508 |
card = DatasetCard(card_content)
|
| 509 |
card.push_to_hub(output_dataset, token=HF_TOKEN)
|
| 510 |
+
|
| 511 |
logger.info("✅ PaddleOCR-VL processing complete!")
|
| 512 |
+
logger.info(
|
| 513 |
+
f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
|
| 514 |
+
)
|
| 515 |
logger.info(f"Processing time: {processing_time_str}")
|
| 516 |
logger.info(f"Task mode: {task_mode} - {TASK_DESCRIPTIONS[task_mode]}")
|
| 517 |
|
|
|
|
| 541 |
print("\n2. Table extraction:")
|
| 542 |
print(" uv run paddleocr-vl.py docs tables-extracted --task-mode table")
|
| 543 |
print("\n3. Formula recognition:")
|
| 544 |
+
print(
|
| 545 |
+
" uv run paddleocr-vl.py papers formulas --task-mode formula --batch-size 32"
|
| 546 |
+
)
|
| 547 |
print("\n4. Chart analysis:")
|
| 548 |
print(" uv run paddleocr-vl.py diagrams charts-analyzed --task-mode chart")
|
| 549 |
print("\n5. Test with small sample:")
|
| 550 |
print(" uv run paddleocr-vl.py dataset test --max-samples 10 --shuffle")
|
| 551 |
print("\n6. Running on HF Jobs:")
|
| 552 |
print(" hf jobs uv run --flavor l4x1 \\")
|
| 553 |
+
print(
|
| 554 |
+
' -e HF_TOKEN=$(python3 -c "from huggingface_hub import get_token; print(get_token())") \\'
|
| 555 |
+
)
|
| 556 |
print(" -e HF_HUB_ENABLE_HF_TRANSFER=1 \\")
|
| 557 |
+
print(
|
| 558 |
+
" https://huggingface.co/datasets/uv-scripts/ocr/raw/main/paddleocr-vl.py \\"
|
| 559 |
+
)
|
| 560 |
print(" input-dataset output-dataset --task-mode ocr")
|
| 561 |
print("\n" + "=" * 80)
|
| 562 |
print("\nFor full help, run: uv run paddleocr-vl.py --help")
|
| 563 |
sys.exit(0)
|
| 564 |
+
|
| 565 |
parser = argparse.ArgumentParser(
|
| 566 |
description="Document processing using PaddleOCR-VL (0.9B task-specific model)",
|
| 567 |
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
|
| 592 |
uv run paddleocr-vl.py images output --no-smart-resize
|
| 593 |
""",
|
| 594 |
)
|
| 595 |
+
|
| 596 |
parser.add_argument("input_dataset", help="Input dataset ID from Hugging Face Hub")
|
| 597 |
parser.add_argument("output_dataset", help="Output dataset ID for Hugging Face Hub")
|
| 598 |
parser.add_argument(
|
|
|
|
| 666 |
"--output-column",
|
| 667 |
help="Column name for output (default: paddleocr_[task_mode])",
|
| 668 |
)
|
| 669 |
+
|
| 670 |
args = parser.parse_args()
|
| 671 |
+
|
| 672 |
main(
|
| 673 |
input_dataset=args.input_dataset,
|
| 674 |
output_dataset=args.output_dataset,
|
|
|
|
| 687 |
shuffle=args.shuffle,
|
| 688 |
seed=args.seed,
|
| 689 |
output_column=args.output_column,
|
| 690 |
+
)
|