Upload 3 files
Browse files- image_dataset.ipynb +765 -0
- structured_docs_dataset.ipynb +355 -0
- unstructured_docs_dataset.ipynb +333 -0
image_dataset.ipynb
ADDED
|
@@ -0,0 +1,765 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": null,
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"outputs": [],
|
| 8 |
+
"source": [
|
| 9 |
+
"import pandas as pd\n",
|
| 10 |
+
"import requests\n",
|
| 11 |
+
"import zipfile\n",
|
| 12 |
+
"import io\n",
|
| 13 |
+
"import os\n",
|
| 14 |
+
"import shutil\n",
|
| 15 |
+
"from PIL import Image as PILImage, ImageFile\n",
|
| 16 |
+
"from tqdm import tqdm\n",
|
| 17 |
+
"from datasets import Dataset, Features, Value, Image, load_dataset\n",
|
| 18 |
+
"from huggingface_hub import login, HfApi\n",
|
| 19 |
+
"import cv2\n",
|
| 20 |
+
"import concurrent.futures\n",
|
| 21 |
+
"import csv"
|
| 22 |
+
]
|
| 23 |
+
},
|
| 24 |
+
{
|
| 25 |
+
"cell_type": "code",
|
| 26 |
+
"execution_count": null,
|
| 27 |
+
"metadata": {},
|
| 28 |
+
"outputs": [],
|
| 29 |
+
"source": [
|
| 30 |
+
"# Allow loading of truncated images\n",
|
| 31 |
+
"ImageFile.LOAD_TRUNCATED_IMAGES = True"
|
| 32 |
+
]
|
| 33 |
+
},
|
| 34 |
+
{
|
| 35 |
+
"cell_type": "code",
|
| 36 |
+
"execution_count": null,
|
| 37 |
+
"metadata": {},
|
| 38 |
+
"outputs": [],
|
| 39 |
+
"source": [
|
| 40 |
+
"def read_excel_and_get_urls(excel_file):\n",
|
| 41 |
+
" \"\"\"\n",
|
| 42 |
+
" Read Excel file and extract URLs, tilenames, and zones\n",
|
| 43 |
+
" \n",
|
| 44 |
+
" Args:\n",
|
| 45 |
+
" excel_file: Path to Excel file\n",
|
| 46 |
+
" \n",
|
| 47 |
+
" Returns:\n",
|
| 48 |
+
" DataFrame with TILENAME, ZONE, URL columns\n",
|
| 49 |
+
" \"\"\"\n",
|
| 50 |
+
" print(f\"Reading Excel file: {excel_file}\")\n",
|
| 51 |
+
" df = pd.read_excel(excel_file)\n",
|
| 52 |
+
" \n",
|
| 53 |
+
" # Ensure expected columns exist\n",
|
| 54 |
+
" required_columns = ['TILENAME', 'ZONE', 'URL']\n",
|
| 55 |
+
" for col in required_columns:\n",
|
| 56 |
+
" if col not in df.columns:\n",
|
| 57 |
+
" raise ValueError(f\"Required column '{col}' not found in Excel file.\")\n",
|
| 58 |
+
" \n",
|
| 59 |
+
" print(f\"Found {len(df)} entries in Excel file\")\n",
|
| 60 |
+
" return df\n",
|
| 61 |
+
"\n",
|
| 62 |
+
"def extract_filename_from_url(url):\n",
|
| 63 |
+
" \"\"\"\n",
|
| 64 |
+
" Extract the base filename from the URL\n",
|
| 65 |
+
" \n",
|
| 66 |
+
" Args:\n",
|
| 67 |
+
" url: URL of the zip file\n",
|
| 68 |
+
" \n",
|
| 69 |
+
" Returns:\n",
|
| 70 |
+
" Base filename without extension\n",
|
| 71 |
+
" \"\"\"\n",
|
| 72 |
+
" # Extract filename from URL\n",
|
| 73 |
+
" # This may need adjustment based on the URL format\n",
|
| 74 |
+
" filename = url.split('/')[-1]\n",
|
| 75 |
+
" # Remove .zip extension if present\n",
|
| 76 |
+
" if filename.lower().endswith('.zip'):\n",
|
| 77 |
+
" filename = os.path.splitext(filename)[0]\n",
|
| 78 |
+
" return filename\n",
|
| 79 |
+
"\n",
|
| 80 |
+
"\n",
|
| 81 |
+
"def download_and_extract_jp2(tilename, zone, url, jp2_dir):\n",
|
| 82 |
+
" \"\"\"\n",
|
| 83 |
+
" Download a zip file from the given URL and extract only the JP2 image file\n",
|
| 84 |
+
" \n",
|
| 85 |
+
" Args:\n",
|
| 86 |
+
" tilename: Name of the tile\n",
|
| 87 |
+
" zone: Zone identifier\n",
|
| 88 |
+
" url: URL to the zip file\n",
|
| 89 |
+
" jp2_dir: Directory to save JP2 images\n",
|
| 90 |
+
" \n",
|
| 91 |
+
" Returns:\n",
|
| 92 |
+
" Dictionary with image information (jp2_path, tilename, zone)\n",
|
| 93 |
+
" \"\"\"\n",
|
| 94 |
+
" try:\n",
|
| 95 |
+
" # Download the zip file\n",
|
| 96 |
+
" response = requests.get(url, stream=True)\n",
|
| 97 |
+
" \n",
|
| 98 |
+
" if response.status_code != 200:\n",
|
| 99 |
+
" print(f\"Failed to download {tilename}: {response.status_code}\")\n",
|
| 100 |
+
" return None\n",
|
| 101 |
+
" \n",
|
| 102 |
+
" # Ensure JP2 directory exists\n",
|
| 103 |
+
" os.makedirs(jp2_dir, exist_ok=True)\n",
|
| 104 |
+
" \n",
|
| 105 |
+
" # Extract image files\n",
|
| 106 |
+
" with zipfile.ZipFile(io.BytesIO(response.content)) as zip_ref:\n",
|
| 107 |
+
" # Get all files in the zip\n",
|
| 108 |
+
" all_files = zip_ref.namelist()\n",
|
| 109 |
+
" \n",
|
| 110 |
+
" # Filter for JP2 image files\n",
|
| 111 |
+
" jp2_files = [f for f in all_files if f.lower().endswith('.jp2')]\n",
|
| 112 |
+
" \n",
|
| 113 |
+
" if not jp2_files:\n",
|
| 114 |
+
" print(f\"No JP2 files found in {tilename} zip\")\n",
|
| 115 |
+
" return None\n",
|
| 116 |
+
" \n",
|
| 117 |
+
" # Get the first JP2 file (assuming one image per zip)\n",
|
| 118 |
+
" jp2_file = jp2_files[0]\n",
|
| 119 |
+
" jp2_filename = os.path.basename(jp2_file)\n",
|
| 120 |
+
" jp2_path = os.path.join(jp2_dir, jp2_filename)\n",
|
| 121 |
+
" \n",
|
| 122 |
+
" # Extract JP2 file\n",
|
| 123 |
+
" with zip_ref.open(jp2_file) as source, open(jp2_path, 'wb') as target:\n",
|
| 124 |
+
" shutil.copyfileobj(source, target)\n",
|
| 125 |
+
" \n",
|
| 126 |
+
" return {\n",
|
| 127 |
+
" \"jp2_path\": jp2_path,\n",
|
| 128 |
+
" \"tilename\": tilename,\n",
|
| 129 |
+
" \"zone\": zone\n",
|
| 130 |
+
" }\n",
|
| 131 |
+
" \n",
|
| 132 |
+
" except Exception as e:\n",
|
| 133 |
+
" print(f\"Error processing {tilename}: {e}\")\n",
|
| 134 |
+
" return None\n",
|
| 135 |
+
" \n",
|
| 136 |
+
"def process_file(jp2_path, jpeg_path):\n",
|
| 137 |
+
" try:\n",
|
| 138 |
+
" # Read JP2 image\n",
|
| 139 |
+
" img = cv2.imread(jp2_path)\n",
|
| 140 |
+
" \n",
|
| 141 |
+
" # Check if the image is read properly\n",
|
| 142 |
+
" if img is None:\n",
|
| 143 |
+
" print(f\"Error reading {jp2_path}, skipping.\")\n",
|
| 144 |
+
" return\n",
|
| 145 |
+
" \n",
|
| 146 |
+
" # Save as JPEG\n",
|
| 147 |
+
" cv2.imwrite(jpeg_path, img, [int(cv2.IMWRITE_JPEG_QUALITY), 95])\n",
|
| 148 |
+
" except Exception as e:\n",
|
| 149 |
+
" print(f\"Error converting {jp2_path}: {e}\")\n",
|
| 150 |
+
"\n",
|
| 151 |
+
"def convert_jp2_to_jpeg(jp2_dir, jpeg_dir, max_workers=4):\n",
|
| 152 |
+
" \"\"\"\n",
|
| 153 |
+
" Convert all JP2 files in a directory to JPEG using OpenCV with multithreading.\n",
|
| 154 |
+
" \n",
|
| 155 |
+
" Args:\n",
|
| 156 |
+
" jp2_dir: Directory containing JP2 files\n",
|
| 157 |
+
" jpeg_dir: Directory to save converted JPEG images\n",
|
| 158 |
+
" max_workers: Number of threads to use for processing\n",
|
| 159 |
+
" \"\"\"\n",
|
| 160 |
+
" # Ensure output directory exists\n",
|
| 161 |
+
" os.makedirs(jpeg_dir, exist_ok=True)\n",
|
| 162 |
+
" \n",
|
| 163 |
+
" # Get all JP2 files\n",
|
| 164 |
+
" input_files = [f for f in os.listdir(jp2_dir) if f.lower().endswith('.jp2') and f != '.DS_Store']\n",
|
| 165 |
+
" \n",
|
| 166 |
+
" print(f\"Found {len(input_files)} JP2 files to convert\")\n",
|
| 167 |
+
" \n",
|
| 168 |
+
" # Prepare task list\n",
|
| 169 |
+
" tasks = []\n",
|
| 170 |
+
" for f in input_files:\n",
|
| 171 |
+
" jp2_path = os.path.join(jp2_dir, f)\n",
|
| 172 |
+
" jpeg_filename = os.path.splitext(f)[0] + \".jpg\"\n",
|
| 173 |
+
" jpeg_path = os.path.join(jpeg_dir, jpeg_filename)\n",
|
| 174 |
+
" \n",
|
| 175 |
+
" # Skip if already processed\n",
|
| 176 |
+
" if os.path.isfile(jpeg_path):\n",
|
| 177 |
+
" print(f\"Already processed: {f}\")\n",
|
| 178 |
+
" continue\n",
|
| 179 |
+
" \n",
|
| 180 |
+
" tasks.append((jp2_path, jpeg_path))\n",
|
| 181 |
+
" \n",
|
| 182 |
+
" # Process files in parallel\n",
|
| 183 |
+
" with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n",
|
| 184 |
+
" list(tqdm(executor.map(lambda args: process_file(*args), tasks), total=len(tasks), desc=\"Converting JP2 to JPEG\"))\n",
|
| 185 |
+
"\n",
|
| 186 |
+
"def convert_jp2_to_jpeg(jp2_dir, jpeg_dir):\n",
|
| 187 |
+
" \"\"\"\n",
|
| 188 |
+
" Convert all JP2 files in a directory to JPEG using OpenCV.\n",
|
| 189 |
+
" \n",
|
| 190 |
+
" Args:\n",
|
| 191 |
+
" jp2_dir: Directory containing JP2 files\n",
|
| 192 |
+
" jpeg_dir: Directory to save converted JPEG images\n",
|
| 193 |
+
" \"\"\"\n",
|
| 194 |
+
" # Ensure output directory exists\n",
|
| 195 |
+
" os.makedirs(jpeg_dir, exist_ok=True)\n",
|
| 196 |
+
" \n",
|
| 197 |
+
" # Get all JP2 files\n",
|
| 198 |
+
" input_files = [f for f in os.listdir(jp2_dir) if f.lower().endswith('.jp2') and f != '.DS_Store']\n",
|
| 199 |
+
" \n",
|
| 200 |
+
" print(f\"Found {len(input_files)} JP2 files to convert\")\n",
|
| 201 |
+
" \n",
|
| 202 |
+
" # Process files\n",
|
| 203 |
+
" for f in tqdm(input_files, desc=\"Converting JP2 to JPEG\"):\n",
|
| 204 |
+
" try:\n",
|
| 205 |
+
" jp2_path = os.path.join(jp2_dir, f)\n",
|
| 206 |
+
" jpeg_filename = os.path.splitext(f)[0] + \".jpg\"\n",
|
| 207 |
+
" jpeg_path = os.path.join(jpeg_dir, jpeg_filename)\n",
|
| 208 |
+
" \n",
|
| 209 |
+
" # Skip if already processed\n",
|
| 210 |
+
" if os.path.isfile(jpeg_path):\n",
|
| 211 |
+
" print(f\"Already processed: {f}\")\n",
|
| 212 |
+
" continue\n",
|
| 213 |
+
" \n",
|
| 214 |
+
" # Read JP2 image\n",
|
| 215 |
+
" img = cv2.imread(jp2_path)\n",
|
| 216 |
+
" \n",
|
| 217 |
+
" # Check if the image is read properly\n",
|
| 218 |
+
" if img is None:\n",
|
| 219 |
+
" print(f\"Error reading {f}, skipping.\")\n",
|
| 220 |
+
" continue\n",
|
| 221 |
+
" \n",
|
| 222 |
+
" # Save as JPEG\n",
|
| 223 |
+
" cv2.imwrite(jpeg_path, img, [int(cv2.IMWRITE_JPEG_QUALITY), 95])\n",
|
| 224 |
+
" except Exception as e:\n",
|
| 225 |
+
" print(f\"Error converting {f}: {e}\")\n",
|
| 226 |
+
"\n",
|
| 227 |
+
"def convert_jp2_to_jpeg(jp2_dir, jpeg_dir):\n",
|
| 228 |
+
" \"\"\"\n",
|
| 229 |
+
" Convert all JP2 files in a directory to JPEG\n",
|
| 230 |
+
" \n",
|
| 231 |
+
" Args:\n",
|
| 232 |
+
" jp2_dir: Directory containing JP2 files\n",
|
| 233 |
+
" jpeg_dir: Directory to save converted JPEG images\n",
|
| 234 |
+
" \"\"\"\n",
|
| 235 |
+
" # Ensure directories exist\n",
|
| 236 |
+
" if not os.path.exists(jpeg_dir):\n",
|
| 237 |
+
" os.makedirs(jpeg_dir)\n",
|
| 238 |
+
" if not os.path.exists(jp2_dir):\n",
|
| 239 |
+
" os.makedirs(jp2_dir)\n",
|
| 240 |
+
" \n",
|
| 241 |
+
" # Get all JP2 files\n",
|
| 242 |
+
" input_files = os.listdir(jp2_dir)\n",
|
| 243 |
+
" input_files = [f for f in input_files if f.lower().endswith('.jp2') and f != '.DS_Store']\n",
|
| 244 |
+
" \n",
|
| 245 |
+
" print(f\"Found {len(input_files)} JP2 files to convert\")\n",
|
| 246 |
+
" \n",
|
| 247 |
+
" # Process files one by one\n",
|
| 248 |
+
" for f in tqdm(input_files, desc=\"Converting JP2 to JPEG\"):\n",
|
| 249 |
+
" try:\n",
|
| 250 |
+
" jp2_path = os.path.join(jp2_dir, f)\n",
|
| 251 |
+
" jpeg_filename = os.path.splitext(f)[0] + \".jpg\"\n",
|
| 252 |
+
" jpeg_path = os.path.join(jpeg_dir, jpeg_filename)\n",
|
| 253 |
+
" \n",
|
| 254 |
+
" # Skip if already processed\n",
|
| 255 |
+
" if os.path.isfile(jpeg_path):\n",
|
| 256 |
+
" print(f\"Already processed: {f}\")\n",
|
| 257 |
+
" continue\n",
|
| 258 |
+
" \n",
|
| 259 |
+
" # Open and convert the image\n",
|
| 260 |
+
" im = PILImage.open(jp2_path)\n",
|
| 261 |
+
" if im.mode != 'RGB':\n",
|
| 262 |
+
" im = im.convert('RGB')\n",
|
| 263 |
+
" \n",
|
| 264 |
+
" # Save as JPEG\n",
|
| 265 |
+
" im.save(jpeg_path, 'JPEG', quality=95)\n",
|
| 266 |
+
" im.close()\n",
|
| 267 |
+
" except Exception as e:\n",
|
| 268 |
+
" print(f\"Error converting {f}: {e}\")\n",
|
| 269 |
+
"\n",
|
| 270 |
+
"def recreate_image_info_list(excel_file, jpeg_dir):\n",
|
| 271 |
+
" \"\"\"\n",
|
| 272 |
+
" Recreate image_info_list by matching jpeg files with Excel entries\n",
|
| 273 |
+
" \n",
|
| 274 |
+
" Args:\n",
|
| 275 |
+
" excel_file: Path to Excel file\n",
|
| 276 |
+
" jpeg_dir: Directory containing JPEG files\n",
|
| 277 |
+
" \n",
|
| 278 |
+
" Returns:\n",
|
| 279 |
+
" List of dictionaries with image information\n",
|
| 280 |
+
" \"\"\"\n",
|
| 281 |
+
" # Read Excel file\n",
|
| 282 |
+
" df = read_excel_and_get_urls(excel_file)\n",
|
| 283 |
+
" \n",
|
| 284 |
+
" # Create mapping from filename to tilename and zone\n",
|
| 285 |
+
" filename_to_metadata = {}\n",
|
| 286 |
+
" \n",
|
| 287 |
+
" # Try different approaches to match filenames\n",
|
| 288 |
+
" for _, row in df.iterrows():\n",
|
| 289 |
+
" tilename = row['TILENAME']\n",
|
| 290 |
+
" zone = row['ZONE']\n",
|
| 291 |
+
" url = row['URL']\n",
|
| 292 |
+
" \n",
|
| 293 |
+
" # Extract filename from URL as a potential match criterion\n",
|
| 294 |
+
" extracted_filename = extract_filename_from_url(url)\n",
|
| 295 |
+
" filename_to_metadata[extracted_filename] = {'tilename': tilename, 'zone': zone}\n",
|
| 296 |
+
" \n",
|
| 297 |
+
" # Also map the tilename directly as another potential match\n",
|
| 298 |
+
" filename_to_metadata[tilename] = {'tilename': tilename, 'zone': zone}\n",
|
| 299 |
+
" \n",
|
| 300 |
+
" # Get all JPEG files\n",
|
| 301 |
+
" jpeg_files = [f for f in os.listdir(jpeg_dir) if f.lower().endswith('.jpg') or f.lower().endswith('.jpeg')]\n",
|
| 302 |
+
" print(f\"Found {len(jpeg_files)} JPEG files in the directory\")\n",
|
| 303 |
+
" \n",
|
| 304 |
+
" # Match JPEG files to metadata\n",
|
| 305 |
+
" image_info_list = []\n",
|
| 306 |
+
" unmatched_files = []\n",
|
| 307 |
+
" \n",
|
| 308 |
+
" for jpeg_file in tqdm(jpeg_files, desc=\"Matching JPEG files to metadata\"):\n",
|
| 309 |
+
" jpeg_path = os.path.join(jpeg_dir, jpeg_file)\n",
|
| 310 |
+
" base_name = os.path.splitext(jpeg_file)[0]\n",
|
| 311 |
+
" \n",
|
| 312 |
+
" # Try different matching strategies\n",
|
| 313 |
+
" metadata = None\n",
|
| 314 |
+
" \n",
|
| 315 |
+
" # Direct match with the extracted filename\n",
|
| 316 |
+
" if base_name in filename_to_metadata:\n",
|
| 317 |
+
" metadata = filename_to_metadata[base_name]\n",
|
| 318 |
+
" else:\n",
|
| 319 |
+
" # Try partial matches\n",
|
| 320 |
+
" matched_keys = [key for key in filename_to_metadata.keys() if key in base_name or base_name in key]\n",
|
| 321 |
+
" if matched_keys:\n",
|
| 322 |
+
" # Use the first match if multiple found\n",
|
| 323 |
+
" metadata = filename_to_metadata[matched_keys[0]]\n",
|
| 324 |
+
" \n",
|
| 325 |
+
" if metadata:\n",
|
| 326 |
+
" image_info_list.append({\n",
|
| 327 |
+
" \"path\": jpeg_path,\n",
|
| 328 |
+
" \"tilename\": metadata['tilename'],\n",
|
| 329 |
+
" \"zone\": metadata['zone']\n",
|
| 330 |
+
" })\n",
|
| 331 |
+
" else:\n",
|
| 332 |
+
" unmatched_files.append(jpeg_file)\n",
|
| 333 |
+
" \n",
|
| 334 |
+
" print(f\"Successfully matched {len(image_info_list)} JPEG files with metadata\")\n",
|
| 335 |
+
" \n",
|
| 336 |
+
" if unmatched_files:\n",
|
| 337 |
+
" print(f\"Warning: Could not match {len(unmatched_files)} files with metadata\")\n",
|
| 338 |
+
" if len(unmatched_files) < 10:\n",
|
| 339 |
+
" print(\"Unmatched files:\", unmatched_files)\n",
|
| 340 |
+
" else:\n",
|
| 341 |
+
" print(\"First 10 unmatched files:\", unmatched_files[:10])\n",
|
| 342 |
+
" \n",
|
| 343 |
+
" return image_info_list\n",
|
| 344 |
+
"\n",
|
| 345 |
+
"\n",
|
| 346 |
+
"def organize_images_for_imagefolder(excel_file, jpeg_dir, output_dir, rename_instead_of_copy=True):\n",
|
| 347 |
+
" \"\"\"\n",
|
| 348 |
+
" Organize images and create metadata for ImageFolder format\n",
|
| 349 |
+
" \n",
|
| 350 |
+
" Args:\n",
|
| 351 |
+
" excel_file: Path to Excel file with metadata\n",
|
| 352 |
+
" jpeg_dir: Directory containing JPEG files\n",
|
| 353 |
+
" output_dir: Directory to save organized images and metadata\n",
|
| 354 |
+
" rename_instead_of_copy: If True, rename/move files instead of copying them\n",
|
| 355 |
+
" \n",
|
| 356 |
+
" Returns:\n",
|
| 357 |
+
" Path to the organized dataset directory\n",
|
| 358 |
+
" \"\"\"\n",
|
| 359 |
+
" print(f\"Reading Excel file: {excel_file}\")\n",
|
| 360 |
+
" df = pd.read_excel(excel_file)\n",
|
| 361 |
+
" \n",
|
| 362 |
+
" # Ensure required columns exist\n",
|
| 363 |
+
" required_columns = ['TILENAME', 'ZONE', 'URL']\n",
|
| 364 |
+
" for col in required_columns:\n",
|
| 365 |
+
" if col not in df.columns:\n",
|
| 366 |
+
" raise ValueError(f\"Required column '{col}' not found in Excel file.\")\n",
|
| 367 |
+
" \n",
|
| 368 |
+
" # Use the JPEG directory as the train directory if we're renaming\n",
|
| 369 |
+
" if rename_instead_of_copy:\n",
|
| 370 |
+
" # Create parent directory if it doesn't exist\n",
|
| 371 |
+
" os.makedirs(output_dir, exist_ok=True)\n",
|
| 372 |
+
" \n",
|
| 373 |
+
" # Just rename the jpeg_dir to be inside the output_dir\n",
|
| 374 |
+
" train_dir = os.path.join(output_dir, \"train\")\n",
|
| 375 |
+
" \n",
|
| 376 |
+
" # If the train directory already exists but is different from jpeg_dir, handle it\n",
|
| 377 |
+
" if os.path.exists(train_dir) and os.path.abspath(train_dir) != os.path.abspath(jpeg_dir):\n",
|
| 378 |
+
" response = input(f\"Train directory {train_dir} already exists. Do you want to replace it? (yes/no): \")\n",
|
| 379 |
+
" if response.lower() == 'yes':\n",
|
| 380 |
+
" shutil.rmtree(train_dir)\n",
|
| 381 |
+
" else:\n",
|
| 382 |
+
" print(\"Using existing train directory.\")\n",
|
| 383 |
+
" \n",
|
| 384 |
+
" # If train_dir doesn't exist, rename jpeg_dir to train_dir\n",
|
| 385 |
+
" if not os.path.exists(train_dir):\n",
|
| 386 |
+
" print(f\"Renaming directory {jpeg_dir} to {train_dir}\")\n",
|
| 387 |
+
" shutil.move(jpeg_dir, train_dir)\n",
|
| 388 |
+
" # If jpeg_dir is already the train_dir, do nothing\n",
|
| 389 |
+
" elif os.path.abspath(train_dir) == os.path.abspath(jpeg_dir):\n",
|
| 390 |
+
" print(f\"JPEG directory is already {train_dir}, no renaming needed\")\n",
|
| 391 |
+
" else:\n",
|
| 392 |
+
" # Create the output directory structure for copying\n",
|
| 393 |
+
" os.makedirs(output_dir, exist_ok=True)\n",
|
| 394 |
+
" train_dir = os.path.join(output_dir, \"train\")\n",
|
| 395 |
+
" os.makedirs(train_dir, exist_ok=True)\n",
|
| 396 |
+
" \n",
|
| 397 |
+
" # Get all JPEG files\n",
|
| 398 |
+
" jpeg_files = [f for f in os.listdir(train_dir) if f.lower().endswith('.jpg') or f.lower().endswith('.jpeg')]\n",
|
| 399 |
+
" print(f\"Found {len(jpeg_files)} JPEG files\")\n",
|
| 400 |
+
" \n",
|
| 401 |
+
" # Create a mapping of filename to metadata\n",
|
| 402 |
+
" filename_to_metadata = {}\n",
|
| 403 |
+
" for _, row in df.iterrows():\n",
|
| 404 |
+
" tilename = row['TILENAME']\n",
|
| 405 |
+
" zone = row['ZONE']\n",
|
| 406 |
+
" # Use both the full tilename and the base name for matching\n",
|
| 407 |
+
" filename_to_metadata[tilename] = {'tilename': tilename, 'zone': zone}\n",
|
| 408 |
+
" filename_to_metadata[os.path.basename(tilename)] = {'tilename': tilename, 'zone': zone}\n",
|
| 409 |
+
" \n",
|
| 410 |
+
" # Create metadata.csv file\n",
|
| 411 |
+
" metadata_path = os.path.join(train_dir, \"metadata.csv\")\n",
|
| 412 |
+
" with open(metadata_path, 'w', newline='') as csvfile:\n",
|
| 413 |
+
" fieldnames = ['file_name', 'tilename', 'zone']\n",
|
| 414 |
+
" writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n",
|
| 415 |
+
" writer.writeheader()\n",
|
| 416 |
+
" \n",
|
| 417 |
+
" # Add files to metadata\n",
|
| 418 |
+
" successful_matches = 0\n",
|
| 419 |
+
" for jpeg_file in tqdm(jpeg_files, desc=\"Creating metadata for images\"):\n",
|
| 420 |
+
" # Try to match the image to metadata\n",
|
| 421 |
+
" base_name = os.path.splitext(jpeg_file)[0]\n",
|
| 422 |
+
" \n",
|
| 423 |
+
" # Find matching metadata using different strategies\n",
|
| 424 |
+
" metadata = None\n",
|
| 425 |
+
" if base_name in filename_to_metadata:\n",
|
| 426 |
+
" metadata = filename_to_metadata[base_name]\n",
|
| 427 |
+
" else:\n",
|
| 428 |
+
" # Try partial matches\n",
|
| 429 |
+
" matched_keys = [key for key in filename_to_metadata.keys() \n",
|
| 430 |
+
" if key in base_name or base_name in key]\n",
|
| 431 |
+
" if matched_keys:\n",
|
| 432 |
+
" metadata = filename_to_metadata[matched_keys[0]]\n",
|
| 433 |
+
" \n",
|
| 434 |
+
" if metadata:\n",
|
| 435 |
+
" # Add to metadata.csv\n",
|
| 436 |
+
" writer.writerow({\n",
|
| 437 |
+
" 'file_name': jpeg_file,\n",
|
| 438 |
+
" 'tilename': metadata['tilename'],\n",
|
| 439 |
+
" 'zone': metadata['zone']\n",
|
| 440 |
+
" })\n",
|
| 441 |
+
" successful_matches += 1\n",
|
| 442 |
+
" else:\n",
|
| 443 |
+
" print(f\"Could not find metadata for {jpeg_file}\")\n",
|
| 444 |
+
" \n",
|
| 445 |
+
" print(f\"Successfully matched {successful_matches} images with metadata\")\n",
|
| 446 |
+
" \n",
|
| 447 |
+
" return output_dir\n",
|
| 448 |
+
"\n",
|
| 449 |
+
"def upload_dataset_to_hub(dataset_dir, repo_name):\n",
|
| 450 |
+
" \"\"\"\n",
|
| 451 |
+
" Upload the dataset to the Hugging Face Hub\n",
|
| 452 |
+
" \n",
|
| 453 |
+
" Args:\n",
|
| 454 |
+
" dataset_dir: Directory containing the organized dataset\n",
|
| 455 |
+
" repo_name: Name of the repository on Hugging Face Hub\n",
|
| 456 |
+
" \"\"\"\n",
|
| 457 |
+
" # Load the dataset using ImageFolder\n",
|
| 458 |
+
" print(f\"Loading dataset from {dataset_dir}\")\n",
|
| 459 |
+
" dataset = load_dataset(\"imagefolder\", data_dir=dataset_dir)\n",
|
| 460 |
+
" \n",
|
| 461 |
+
" # Push to Hugging Face Hub\n",
|
| 462 |
+
" print(f\"Pushing dataset to Hugging Face Hub: {repo_name}\")\n",
|
| 463 |
+
" dataset.push_to_hub(repo_name)\n",
|
| 464 |
+
" print(\"Dataset uploaded successfully!\")\n"
|
| 465 |
+
]
|
| 466 |
+
},
|
| 467 |
+
{
|
| 468 |
+
"cell_type": "code",
|
| 469 |
+
"execution_count": null,
|
| 470 |
+
"metadata": {},
|
| 471 |
+
"outputs": [
|
| 472 |
+
{
|
| 473 |
+
"name": "stdout",
|
| 474 |
+
"output_type": "stream",
|
| 475 |
+
"text": [
|
| 476 |
+
"Step 4: Creating image info list\n",
|
| 477 |
+
"Reading Excel file: /fsx/avijit/projects/datacommonsMA/massgis_2023_aerial_imagery/COQ2023INDEX_POLY.xlsx\n",
|
| 478 |
+
"Found 10218 entries in Excel file\n",
|
| 479 |
+
"Found 10218 JPEG files in the directory\n"
|
| 480 |
+
]
|
| 481 |
+
},
|
| 482 |
+
{
|
| 483 |
+
"name": "stderr",
|
| 484 |
+
"output_type": "stream",
|
| 485 |
+
"text": [
|
| 486 |
+
"Matching JPEG files to metadata: 100%|█████████████████| 10218/10218 [00:00<00:00, 616174.46it/s]"
|
| 487 |
+
]
|
| 488 |
+
},
|
| 489 |
+
{
|
| 490 |
+
"name": "stdout",
|
| 491 |
+
"output_type": "stream",
|
| 492 |
+
"text": [
|
| 493 |
+
"Successfully matched 10218 JPEG files with metadata\n",
|
| 494 |
+
"Found 10218 matched JPEG files\n",
|
| 495 |
+
"Step 5: Deleting JP2 files...\n"
|
| 496 |
+
]
|
| 497 |
+
},
|
| 498 |
+
{
|
| 499 |
+
"name": "stderr",
|
| 500 |
+
"output_type": "stream",
|
| 501 |
+
"text": [
|
| 502 |
+
"\n"
|
| 503 |
+
]
|
| 504 |
+
},
|
| 505 |
+
{
|
| 506 |
+
"name": "stdout",
|
| 507 |
+
"output_type": "stream",
|
| 508 |
+
"text": [
|
| 509 |
+
"JP2 files deleted\n"
|
| 510 |
+
]
|
| 511 |
+
}
|
| 512 |
+
],
|
| 513 |
+
"source": [
|
| 514 |
+
"# Excel file path\n",
|
| 515 |
+
"excel_file = \"/fsx/avijit/projects/datacommonsMA/massgis_2023_aerial_imagery/COQ2023INDEX_POLY.xlsx\"\n",
|
| 516 |
+
"\n",
|
| 517 |
+
"# Define directories\n",
|
| 518 |
+
"base_dir = \"/fsx/avijit/projects/datacommonsMA/massgis_2023_aerial_imagery\"\n",
|
| 519 |
+
"output_dir = \"/fsx/avijit/projects/datacommonsMA/massgis_2023_aerial_imagery/imagefolder-dataset\"\n",
|
| 520 |
+
"\n",
|
| 521 |
+
"jp2_dir = os.path.join(base_dir, \"jp2-files\")\n",
|
| 522 |
+
"jpeg_dir = os.path.join(base_dir, \"jpeg-files\")\n",
|
| 523 |
+
"\n",
|
| 524 |
+
"# Ensure directories exist\n",
|
| 525 |
+
"os.makedirs(jp2_dir, exist_ok=True)\n",
|
| 526 |
+
"os.makedirs(jpeg_dir, exist_ok=True)\n",
|
| 527 |
+
"\n",
|
| 528 |
+
"Step 1: Read Excel file\n",
|
| 529 |
+
"print(\"Step 1: Reading Excel file\")\n",
|
| 530 |
+
"df = read_excel_and_get_urls(excel_file)\n",
|
| 531 |
+
"\n",
|
| 532 |
+
"# Step 2: Download and extract JP2 files\n",
|
| 533 |
+
"print(\"Step 2: Downloading and extracting JP2 files\")\n",
|
| 534 |
+
"jp2_info_list = []\n",
|
| 535 |
+
"\n",
|
| 536 |
+
"for idx, row in tqdm(df.iterrows(), total=len(df), desc=\"Downloading ZIP files\"):\n",
|
| 537 |
+
" tilename = row['TILENAME']\n",
|
| 538 |
+
" zone = row['ZONE']\n",
|
| 539 |
+
" url = row['URL']\n",
|
| 540 |
+
" \n",
|
| 541 |
+
" info = download_and_extract_jp2(tilename, zone, url, jp2_dir)\n",
|
| 542 |
+
" if info is not None:\n",
|
| 543 |
+
" jp2_info_list.append(info)\n",
|
| 544 |
+
"\n",
|
| 545 |
+
"print(f\"Successfully downloaded {len(jp2_info_list)} JP2 files\")\n",
|
| 546 |
+
"\n",
|
| 547 |
+
"# Step 3: Batch convert JP2 to JPEG\n",
|
| 548 |
+
"print(\"Step 3: Converting JP2 to JPEG\")\n",
|
| 549 |
+
"convert_jp2_to_jpeg(jp2_dir, jpeg_dir)\n",
|
| 550 |
+
"\n",
|
| 551 |
+
"#Step 4: Create image info list for dataset creation\n",
|
| 552 |
+
"print(\"Step 4: Creating image info list\")\n",
|
| 553 |
+
"image_info_list = recreate_image_info_list(excel_file, jpeg_dir)\n",
|
| 554 |
+
" \n",
|
| 555 |
+
"print(f\"Found {len(image_info_list)} matched JPEG files\")\n",
|
| 556 |
+
"\n",
|
| 557 |
+
"# Step 5: Delete JP2 files to save space\n",
|
| 558 |
+
"print(\"Step 5: Deleting JP2 files...\")\n",
|
| 559 |
+
"shutil.rmtree(jp2_dir)\n",
|
| 560 |
+
"print(\"JP2 files deleted\")\n",
|
| 561 |
+
" "
|
| 562 |
+
]
|
| 563 |
+
},
|
| 564 |
+
{
|
| 565 |
+
"cell_type": "code",
|
| 566 |
+
"execution_count": null,
|
| 567 |
+
"metadata": {},
|
| 568 |
+
"outputs": [],
|
| 569 |
+
"source": [
|
| 570 |
+
"# Login to Hugging Face (use your API token)\n",
|
| 571 |
+
"api_token = input(\"Enter your Hugging Face API token: \")\n",
|
| 572 |
+
"login(token=api_token)\n",
|
| 573 |
+
"\n",
|
| 574 |
+
"\n",
|
| 575 |
+
"# Create and push Hugging Face dataset\n",
|
| 576 |
+
"hf_dataset_name = input(\"Enter the name for your Hugging Face dataset (username/dataset-name): \")\n",
|
| 577 |
+
"upload_dataset_to_hub(image_info_list, hf_dataset_name)\n",
|
| 578 |
+
"\n",
|
| 579 |
+
"print(\"Done!\")"
|
| 580 |
+
]
|
| 581 |
+
},
|
| 582 |
+
{
|
| 583 |
+
"cell_type": "code",
|
| 584 |
+
"execution_count": 19,
|
| 585 |
+
"metadata": {},
|
| 586 |
+
"outputs": [
|
| 587 |
+
{
|
| 588 |
+
"name": "stdout",
|
| 589 |
+
"output_type": "stream",
|
| 590 |
+
"text": [
|
| 591 |
+
"Reading metadata from /fsx/avijit/projects/datacommonsMA/massgis_2023_aerial_imagery/imagefolder-dataset/train/metadata.csv\n",
|
| 592 |
+
"Selected 50 samples\n"
|
| 593 |
+
]
|
| 594 |
+
},
|
| 595 |
+
{
|
| 596 |
+
"name": "stderr",
|
| 597 |
+
"output_type": "stream",
|
| 598 |
+
"text": [
|
| 599 |
+
"Processing images: 0%| | 0/50 [00:00<?, ?it/s]"
|
| 600 |
+
]
|
| 601 |
+
},
|
| 602 |
+
{
|
| 603 |
+
"name": "stderr",
|
| 604 |
+
"output_type": "stream",
|
| 605 |
+
"text": [
|
| 606 |
+
"/fsx/avijit/anaconda3/envs/py312/lib/python3.12/site-packages/PIL/Image.py:3402: DecompressionBombWarning: Image size (100000000 pixels) exceeds limit of 89478485 pixels, could be decompression bomb DOS attack.\n",
|
| 607 |
+
" warnings.warn(\n",
|
| 608 |
+
"Processing images: 100%|█████████████████████████████████████��███| 50/50 [00:48<00:00, 1.04it/s]"
|
| 609 |
+
]
|
| 610 |
+
},
|
| 611 |
+
{
|
| 612 |
+
"name": "stdout",
|
| 613 |
+
"output_type": "stream",
|
| 614 |
+
"text": [
|
| 615 |
+
"Saved sample dataset to /fsx/avijit/projects/datacommonsMA/massgis_2023_aerial_imagery/imagefolder-dataset/data/sample_dataset_256x256.parquet\n",
|
| 616 |
+
"Processed images saved in /fsx/avijit/projects/datacommonsMA/massgis_2023_aerial_imagery/imagefolder-dataset/data/processed_images\n",
|
| 617 |
+
"File size: 0.00 MB\n",
|
| 618 |
+
"Schema of the created Parquet file:\n",
|
| 619 |
+
"image: string\n",
|
| 620 |
+
"tilename: string\n",
|
| 621 |
+
"zone: int64\n"
|
| 622 |
+
]
|
| 623 |
+
},
|
| 624 |
+
{
|
| 625 |
+
"name": "stderr",
|
| 626 |
+
"output_type": "stream",
|
| 627 |
+
"text": [
|
| 628 |
+
"\n"
|
| 629 |
+
]
|
| 630 |
+
}
|
| 631 |
+
],
|
| 632 |
+
"source": [
|
| 633 |
+
"import os\n",
|
| 634 |
+
"import pandas as pd\n",
|
| 635 |
+
"import pyarrow as pa\n",
|
| 636 |
+
"import pyarrow.parquet as pq\n",
|
| 637 |
+
"from PIL import Image as PILImage\n",
|
| 638 |
+
"import numpy as np\n",
|
| 639 |
+
"from tqdm import tqdm\n",
|
| 640 |
+
"\n",
|
| 641 |
+
"base_dir = \"/fsx/avijit/projects/datacommonsMA/massgis_2023_aerial_imagery/imagefolder-dataset\"\n",
|
| 642 |
+
"train_dir = os.path.join(base_dir, \"train\")\n",
|
| 643 |
+
"output_dir = os.path.join(base_dir, \"data\") # Matches the preview config\n",
|
| 644 |
+
"output_path = os.path.join(output_dir, \"sample_dataset_256x256.parquet\")\n",
|
| 645 |
+
"\n",
|
| 646 |
+
"# Create the output directory if it doesn't exist\n",
|
| 647 |
+
"os.makedirs(output_dir, exist_ok=True)\n",
|
| 648 |
+
"\n",
|
| 649 |
+
"target_size = (256, 256)\n",
|
| 650 |
+
"num_samples = 50\n",
|
| 651 |
+
"\n",
|
| 652 |
+
"metadata_path = os.path.join(train_dir, \"metadata.csv\")\n",
|
| 653 |
+
"print(f\"Reading metadata from {metadata_path}\")\n",
|
| 654 |
+
"metadata_df = pd.read_csv(metadata_path)\n",
|
| 655 |
+
"\n",
|
| 656 |
+
"# Take a random sample of 50 rows\n",
|
| 657 |
+
"if len(metadata_df) > num_samples:\n",
|
| 658 |
+
" metadata_df = metadata_df.sample(n=num_samples, random_state=42)\n",
|
| 659 |
+
"\n",
|
| 660 |
+
"print(f\"Selected {len(metadata_df)} samples\")\n",
|
| 661 |
+
"\n",
|
| 662 |
+
"# Initialize lists to store data\n",
|
| 663 |
+
"image_paths = []\n",
|
| 664 |
+
"tilenames = []\n",
|
| 665 |
+
"zones = []\n",
|
| 666 |
+
"\n",
|
| 667 |
+
"processed_images_dir = os.path.join(output_dir, \"processed_images\")\n",
|
| 668 |
+
"os.makedirs(processed_images_dir, exist_ok=True)\n",
|
| 669 |
+
"\n",
|
| 670 |
+
"# Process each image\n",
|
| 671 |
+
"for _, row in tqdm(metadata_df.iterrows(), total=len(metadata_df), desc=\"Processing images\"):\n",
|
| 672 |
+
" try:\n",
|
| 673 |
+
" # Get image path\n",
|
| 674 |
+
" img_path = os.path.join(train_dir, row['file_name'])\n",
|
| 675 |
+
" \n",
|
| 676 |
+
" # Open, resize, and save the processed image\n",
|
| 677 |
+
" with PILImage.open(img_path) as img:\n",
|
| 678 |
+
" # Convert to RGB if needed\n",
|
| 679 |
+
" if img.mode != 'RGB':\n",
|
| 680 |
+
" img = img.convert('RGB')\n",
|
| 681 |
+
" \n",
|
| 682 |
+
" # Resize to target size\n",
|
| 683 |
+
" img_resized = img.resize(target_size)\n",
|
| 684 |
+
" \n",
|
| 685 |
+
" # Save the resized image in a separate folder\n",
|
| 686 |
+
" output_image_path = os.path.join(processed_images_dir, row['file_name'])\n",
|
| 687 |
+
" img_resized.save(output_image_path, format='JPEG', quality=90)\n",
|
| 688 |
+
" \n",
|
| 689 |
+
" # Append data\n",
|
| 690 |
+
" image_paths.append(output_image_path) # Store file path instead of bytes\n",
|
| 691 |
+
" tilenames.append(row['tilename'])\n",
|
| 692 |
+
" zones.append(int(row['zone'])) # Ensure zone is an integer\n",
|
| 693 |
+
" except Exception as e:\n",
|
| 694 |
+
" print(f\"Error processing {row['file_name']}: {e}\")\n",
|
| 695 |
+
"\n",
|
| 696 |
+
"# Convert lists to Arrow arrays\n",
|
| 697 |
+
"image_array = pa.array(image_paths, type=pa.string()) # Store paths as strings\n",
|
| 698 |
+
"tilename_array = pa.array(tilenames, type=pa.string())\n",
|
| 699 |
+
"zone_array = pa.array(zones, type=pa.int64())\n",
|
| 700 |
+
"\n",
|
| 701 |
+
"# Define schema explicitly\n",
|
| 702 |
+
"schema = pa.schema([\n",
|
| 703 |
+
" ('image', pa.string()), # Store as file path (datasets library will auto-load)\n",
|
| 704 |
+
" ('tilename', pa.string()),\n",
|
| 705 |
+
" ('zone', pa.int64())\n",
|
| 706 |
+
"])\n",
|
| 707 |
+
"\n",
|
| 708 |
+
"# Create Arrow Table using the schema\n",
|
| 709 |
+
"table = pa.Table.from_arrays([image_array, tilename_array, zone_array], schema=schema)\n",
|
| 710 |
+
"\n",
|
| 711 |
+
"# Write to Parquet\n",
|
| 712 |
+
"pq.write_table(table, output_path, compression='snappy', flavor=['spark'])\n",
|
| 713 |
+
"\n",
|
| 714 |
+
"print(f\"Saved sample dataset to {output_path}\")\n",
|
| 715 |
+
"print(f\"Processed images saved in {processed_images_dir}\")\n",
|
| 716 |
+
"print(f\"File size: {os.path.getsize(output_path) / (1024 * 1024):.2f} MB\")\n",
|
| 717 |
+
"\n",
|
| 718 |
+
"# To check if the schema matches what's expected\n",
|
| 719 |
+
"print(\"Schema of the created Parquet file:\")\n",
|
| 720 |
+
"parquet_schema = pq.read_schema(output_path)\n",
|
| 721 |
+
"print(parquet_schema)\n"
|
| 722 |
+
]
|
| 723 |
+
},
|
| 724 |
+
{
|
| 725 |
+
"cell_type": "code",
|
| 726 |
+
"execution_count": 20,
|
| 727 |
+
"metadata": {},
|
| 728 |
+
"outputs": [
|
| 729 |
+
{
|
| 730 |
+
"name": "stdout",
|
| 731 |
+
"output_type": "stream",
|
| 732 |
+
"text": [
|
| 733 |
+
"<pyarrow._parquet.FileMetaData object at 0x7fa0858cc590>\n",
|
| 734 |
+
" created_by: parquet-cpp-arrow version 19.0.0\n",
|
| 735 |
+
" num_columns: 3\n",
|
| 736 |
+
" num_rows: 50\n",
|
| 737 |
+
" num_row_groups: 1\n",
|
| 738 |
+
" format_version: 2.6\n",
|
| 739 |
+
" serialized_size: 943\n"
|
| 740 |
+
]
|
| 741 |
+
}
|
| 742 |
+
],
|
| 743 |
+
"source": [
|
| 744 |
+
"parquet_file = pq.ParquetFile(output_path)\n",
|
| 745 |
+
"print(parquet_file.metadata)"
|
| 746 |
+
]
|
| 747 |
+
},
|
| 748 |
+
{
|
| 749 |
+
"cell_type": "code",
|
| 750 |
+
"execution_count": null,
|
| 751 |
+
"metadata": {},
|
| 752 |
+
"outputs": [],
|
| 753 |
+
"source": []
|
| 754 |
+
}
|
| 755 |
+
],
|
| 756 |
+
"metadata": {
|
| 757 |
+
"kernelspec": {
|
| 758 |
+
"display_name": "py312",
|
| 759 |
+
"language": "python",
|
| 760 |
+
"name": "py312"
|
| 761 |
+
}
|
| 762 |
+
},
|
| 763 |
+
"nbformat": 4,
|
| 764 |
+
"nbformat_minor": 2
|
| 765 |
+
}
|
structured_docs_dataset.ipynb
ADDED
|
@@ -0,0 +1,355 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": 9,
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"outputs": [],
|
| 8 |
+
"source": [
|
| 9 |
+
"import os\n",
|
| 10 |
+
"import pandas as pd\n",
|
| 11 |
+
"import glob\n",
|
| 12 |
+
"import re"
|
| 13 |
+
]
|
| 14 |
+
},
|
| 15 |
+
{
|
| 16 |
+
"cell_type": "code",
|
| 17 |
+
"execution_count": 24,
|
| 18 |
+
"metadata": {},
|
| 19 |
+
"outputs": [],
|
| 20 |
+
"source": [
|
| 21 |
+
"def combine_excel_files(directory_path, output_csv='combined_mcas_data.csv', output_parquet='combined_mcas_data.parquet'):\n",
|
| 22 |
+
" \"\"\"\n",
|
| 23 |
+
" Combine all Excel files in a directory into a single dataset with proper headers.\n",
|
| 24 |
+
" Converts to both CSV and Parquet formats.\n",
|
| 25 |
+
" Handles data types: numbers for most fields, strings for District name, subject, district code and year.\n",
|
| 26 |
+
" \n",
|
| 27 |
+
" Parameters:\n",
|
| 28 |
+
" directory_path (str): Path to the directory containing Excel files\n",
|
| 29 |
+
" output_csv (str): Name of the output CSV file\n",
|
| 30 |
+
" output_parquet (str): Name of the output Parquet file\n",
|
| 31 |
+
" \"\"\"\n",
|
| 32 |
+
" # Get list of all Excel files in the directory\n",
|
| 33 |
+
" all_files = glob.glob(os.path.join(directory_path, \"*.xlsx\"))\n",
|
| 34 |
+
" \n",
|
| 35 |
+
" # Create an empty list to store DataFrames\n",
|
| 36 |
+
" dfs = []\n",
|
| 37 |
+
" all_columns_sets = []\n",
|
| 38 |
+
" \n",
|
| 39 |
+
" # First pass - examine structure of the first file to identify headers\n",
|
| 40 |
+
" if all_files:\n",
|
| 41 |
+
" # Read the first few rows of the first file to inspect\n",
|
| 42 |
+
" sample_df = pd.read_excel(all_files[0], header=None, nrows=10)\n",
|
| 43 |
+
" print(f\"Preview of first file ({all_files[0]}):\")\n",
|
| 44 |
+
" print(sample_df.head(10))\n",
|
| 45 |
+
" \n",
|
| 46 |
+
" # Find the actual header row by looking for rows where many columns have values\n",
|
| 47 |
+
" non_empty_counts = sample_df.notna().sum(axis=1)\n",
|
| 48 |
+
" potential_header_rows = non_empty_counts[non_empty_counts > 5].index.tolist()\n",
|
| 49 |
+
" \n",
|
| 50 |
+
" if potential_header_rows:\n",
|
| 51 |
+
" header_row = potential_header_rows[1] if len(potential_header_rows) > 1 else potential_header_rows[0]\n",
|
| 52 |
+
" # Use row index where we detect MCAS data pattern\n",
|
| 53 |
+
" for i in potential_header_rows:\n",
|
| 54 |
+
" if 'DISTRICT NAME' in str(sample_df.iloc[i].values).upper() or 'SCHOOL NAME' in str(sample_df.iloc[i].values).upper():\n",
|
| 55 |
+
" header_row = i\n",
|
| 56 |
+
" break\n",
|
| 57 |
+
" print(f\"Detected header row at index {header_row}: {sample_df.iloc[header_row].tolist()}\")\n",
|
| 58 |
+
" else:\n",
|
| 59 |
+
" header_row = 0\n",
|
| 60 |
+
" print(\"Could not detect header row, using first row\")\n",
|
| 61 |
+
" \n",
|
| 62 |
+
" # Get the first file's column order as a reference\n",
|
| 63 |
+
" first_file_columns = None\n",
|
| 64 |
+
" \n",
|
| 65 |
+
" # Loop through each Excel file\n",
|
| 66 |
+
" for file in all_files:\n",
|
| 67 |
+
" try:\n",
|
| 68 |
+
" # Extract year from filename\n",
|
| 69 |
+
" year_match = re.search(r'NextGenMCAS_(\\d{4})\\.xlsx', os.path.basename(file))\n",
|
| 70 |
+
" if year_match:\n",
|
| 71 |
+
" year = year_match.group(1)\n",
|
| 72 |
+
" else:\n",
|
| 73 |
+
" year = \"Unknown\"\n",
|
| 74 |
+
" \n",
|
| 75 |
+
" # Define columns that should be treated as strings\n",
|
| 76 |
+
" string_cols = []\n",
|
| 77 |
+
" \n",
|
| 78 |
+
" # First check what columns exist in the file\n",
|
| 79 |
+
" temp_df = pd.read_excel(file, header=header_row, nrows=0)\n",
|
| 80 |
+
" for col in temp_df.columns:\n",
|
| 81 |
+
" col_str = str(col).upper()\n",
|
| 82 |
+
" if ('DISTRICT' in col_str and ('CODE' in col_str or 'ID' in col_str or 'NUMBER' in col_str)) or \\\n",
|
| 83 |
+
" ('DISTRICT NAME' in col_str) or ('SUBJECT' in col_str):\n",
|
| 84 |
+
" string_cols.append(col)\n",
|
| 85 |
+
" \n",
|
| 86 |
+
" # Read the Excel file\n",
|
| 87 |
+
" df = pd.read_excel(\n",
|
| 88 |
+
" file, \n",
|
| 89 |
+
" header=header_row, \n",
|
| 90 |
+
" dtype={col: str for col in string_cols} # Convert specified columns to string\n",
|
| 91 |
+
" )\n",
|
| 92 |
+
" \n",
|
| 93 |
+
" # Clean column names\n",
|
| 94 |
+
" df.columns = [col.strip().replace('\\n', ' ') if isinstance(col, str) else str(col) for col in df.columns]\n",
|
| 95 |
+
" \n",
|
| 96 |
+
" # Store the first file's column order\n",
|
| 97 |
+
" if first_file_columns is None:\n",
|
| 98 |
+
" first_file_columns = df.columns.tolist()\n",
|
| 99 |
+
" \n",
|
| 100 |
+
" # Add year column as string\n",
|
| 101 |
+
" df['Year'] = str(year)\n",
|
| 102 |
+
" \n",
|
| 103 |
+
" # Store the columns from this file\n",
|
| 104 |
+
" all_columns_sets.append(set(df.columns))\n",
|
| 105 |
+
" \n",
|
| 106 |
+
" # Append to list\n",
|
| 107 |
+
" dfs.append(df)\n",
|
| 108 |
+
" print(f\"Successfully processed: {file} (Year: {year})\")\n",
|
| 109 |
+
" except Exception as e:\n",
|
| 110 |
+
" print(f\"Error processing {file}: {e}\")\n",
|
| 111 |
+
" \n",
|
| 112 |
+
" # Find common columns across all files (intersection of all column sets)\n",
|
| 113 |
+
" if all_columns_sets:\n",
|
| 114 |
+
" common_columns = set.intersection(*all_columns_sets)\n",
|
| 115 |
+
" print(f\"Common columns across all files: {common_columns}\")\n",
|
| 116 |
+
" \n",
|
| 117 |
+
" # Ensure 'Year' is in common columns\n",
|
| 118 |
+
" if 'Year' not in common_columns:\n",
|
| 119 |
+
" common_columns.add('Year')\n",
|
| 120 |
+
" \n",
|
| 121 |
+
" # Keep only common columns in each DataFrame\n",
|
| 122 |
+
" for i in range(len(dfs)):\n",
|
| 123 |
+
" dfs[i] = dfs[i][list(common_columns)]\n",
|
| 124 |
+
" \n",
|
| 125 |
+
" # Combine all DataFrames\n",
|
| 126 |
+
" if dfs:\n",
|
| 127 |
+
" # Combine DataFrames with only the common columns\n",
|
| 128 |
+
" combined_df = pd.concat(dfs, ignore_index=True)\n",
|
| 129 |
+
" \n",
|
| 130 |
+
" # Remove rows that are likely headers from other files\n",
|
| 131 |
+
" possible_header_rows = []\n",
|
| 132 |
+
" for col in common_columns:\n",
|
| 133 |
+
" if col != 'Year': # Skip checking Year column\n",
|
| 134 |
+
" for i, row in combined_df.iterrows():\n",
|
| 135 |
+
" for val in row:\n",
|
| 136 |
+
" if isinstance(val, str) and col.lower() in str(val).lower():\n",
|
| 137 |
+
" possible_header_rows.append(i)\n",
|
| 138 |
+
" \n",
|
| 139 |
+
" # Remove duplicate header rows\n",
|
| 140 |
+
" possible_header_rows = list(set(possible_header_rows))\n",
|
| 141 |
+
" print(f\"Removing {len(possible_header_rows)} possible header rows\")\n",
|
| 142 |
+
" combined_df = combined_df.drop(possible_header_rows, errors='ignore')\n",
|
| 143 |
+
" \n",
|
| 144 |
+
" # Identify string columns and numeric columns\n",
|
| 145 |
+
" string_columns = []\n",
|
| 146 |
+
" district_col = None\n",
|
| 147 |
+
" district_code_col = None\n",
|
| 148 |
+
" subject_col = None\n",
|
| 149 |
+
" \n",
|
| 150 |
+
" for col in combined_df.columns:\n",
|
| 151 |
+
" col_upper = col.upper()\n",
|
| 152 |
+
" if 'DISTRICT NAME' in col_upper:\n",
|
| 153 |
+
" district_col = col\n",
|
| 154 |
+
" string_columns.append(col)\n",
|
| 155 |
+
" elif ('DISTRICT' in col_upper and ('CODE' in col_upper or 'ID' in col_upper or 'NUMBER' in col_upper)):\n",
|
| 156 |
+
" district_code_col = col\n",
|
| 157 |
+
" string_columns.append(col)\n",
|
| 158 |
+
" elif 'SUBJECT' in col_upper:\n",
|
| 159 |
+
" subject_col = col\n",
|
| 160 |
+
" string_columns.append(col)\n",
|
| 161 |
+
" \n",
|
| 162 |
+
" # Add Year to string columns\n",
|
| 163 |
+
" string_columns.append('Year')\n",
|
| 164 |
+
" \n",
|
| 165 |
+
" # Convert all other columns to numeric\n",
|
| 166 |
+
" for col in combined_df.columns:\n",
|
| 167 |
+
" if col not in string_columns:\n",
|
| 168 |
+
" # Try to convert to numeric, replace errors with NaN\n",
|
| 169 |
+
" combined_df[col] = pd.to_numeric(combined_df[col], errors='coerce')\n",
|
| 170 |
+
" \n",
|
| 171 |
+
" # Reorder columns to put DISTRICT NAME and Year first\n",
|
| 172 |
+
" if district_col:\n",
|
| 173 |
+
" # Create new column order with district_col and Year first, then others in original order\n",
|
| 174 |
+
" remaining_cols = [col for col in combined_df.columns \n",
|
| 175 |
+
" if col != district_col and col != 'Year']\n",
|
| 176 |
+
" \n",
|
| 177 |
+
" # Sort remaining columns based on their order in the first file\n",
|
| 178 |
+
" if first_file_columns:\n",
|
| 179 |
+
" # Get the indices of each column in the original order\n",
|
| 180 |
+
" col_indices = {}\n",
|
| 181 |
+
" for i, col in enumerate(first_file_columns):\n",
|
| 182 |
+
" if col in remaining_cols:\n",
|
| 183 |
+
" col_indices[col] = i\n",
|
| 184 |
+
" \n",
|
| 185 |
+
" # Sort remaining columns based on their original indices\n",
|
| 186 |
+
" remaining_cols.sort(key=lambda col: col_indices.get(col, 999))\n",
|
| 187 |
+
" \n",
|
| 188 |
+
" new_column_order = [district_col, 'Year'] + remaining_cols\n",
|
| 189 |
+
" combined_df = combined_df[new_column_order]\n",
|
| 190 |
+
" \n",
|
| 191 |
+
" # Export to CSV\n",
|
| 192 |
+
" csv_path = os.path.join(directory_path, output_csv)\n",
|
| 193 |
+
" combined_df.to_csv(csv_path, index=False)\n",
|
| 194 |
+
" print(f\"Successfully created CSV: {csv_path}\")\n",
|
| 195 |
+
" \n",
|
| 196 |
+
" # Export to Parquet\n",
|
| 197 |
+
" parquet_path = os.path.join(directory_path, output_parquet)\n",
|
| 198 |
+
" combined_df.to_parquet(parquet_path, index=False)\n",
|
| 199 |
+
" print(f\"Successfully created Parquet: {parquet_path}\")\n",
|
| 200 |
+
" \n",
|
| 201 |
+
" print(f\"Combined {len(dfs)} Excel files with {len(combined_df)} total rows\")\n",
|
| 202 |
+
" print(f\"Final columns: {', '.join(combined_df.columns)}\")\n",
|
| 203 |
+
" \n",
|
| 204 |
+
" # Print data types for verification\n",
|
| 205 |
+
" print(\"\\nData types in final dataset:\")\n",
|
| 206 |
+
" for col, dtype in combined_df.dtypes.items():\n",
|
| 207 |
+
" print(f\"{col}: {dtype}\")\n",
|
| 208 |
+
" else:\n",
|
| 209 |
+
" print(\"No Excel files were successfully processed\")"
|
| 210 |
+
]
|
| 211 |
+
},
|
| 212 |
+
{
|
| 213 |
+
"cell_type": "code",
|
| 214 |
+
"execution_count": 25,
|
| 215 |
+
"metadata": {},
|
| 216 |
+
"outputs": [
|
| 217 |
+
{
|
| 218 |
+
"name": "stdout",
|
| 219 |
+
"output_type": "stream",
|
| 220 |
+
"text": [
|
| 221 |
+
"Preview of first file (/fsx/avijit/projects/datacommonsMA/mcas_results/NextGenMCAS_2018.xlsx):\n",
|
| 222 |
+
" 0 1 2 \\\n",
|
| 223 |
+
"0 2018 Next Generation MCAS Achievement Results NaN NaN \n",
|
| 224 |
+
"1 District Name District Code Subject \n",
|
| 225 |
+
"2 Abby Kelley Foster Charter Public (District) 04450000 ELA \n",
|
| 226 |
+
"3 Abby Kelley Foster Charter Public (District) 04450000 MATH \n",
|
| 227 |
+
"4 Abington 00010000 ELA \n",
|
| 228 |
+
"5 Abington 00010000 MATH \n",
|
| 229 |
+
"6 Academy Of the Pacific Rim Charter Public (Dis... 04120000 ELA \n",
|
| 230 |
+
"7 Academy Of the Pacific Rim Charter Public (Dis... 04120000 MATH \n",
|
| 231 |
+
"8 Acton-Boxborough 06000000 ELA \n",
|
| 232 |
+
"9 Acton-Boxborough 06000000 MATH \n",
|
| 233 |
+
"\n",
|
| 234 |
+
" 3 4 5 6 7 8 9 \\\n",
|
| 235 |
+
"0 NaN NaN NaN NaN NaN NaN NaN \n",
|
| 236 |
+
"1 M+E # M+E % E # E % M # M % PM # \n",
|
| 237 |
+
"2 313 44 31 4 282 39 347 \n",
|
| 238 |
+
"3 266 37 20 3 246 34 390 \n",
|
| 239 |
+
"4 554 55 90 9 464 46 374 \n",
|
| 240 |
+
"5 482 48 46 5 436 44 440 \n",
|
| 241 |
+
"6 138 44 15 5 123 39 151 \n",
|
| 242 |
+
"7 103 33 10 3 93 30 175 \n",
|
| 243 |
+
"8 1,908 73 522 20 1,386 53 622 \n",
|
| 244 |
+
"9 1,882 72 574 22 1,308 50 619 \n",
|
| 245 |
+
"\n",
|
| 246 |
+
" 10 11 12 13 14 \\\n",
|
| 247 |
+
"0 NaN NaN NaN NaN NaN \n",
|
| 248 |
+
"1 PM % NM # NM % No. of Students Included Avg. Scaled Score \n",
|
| 249 |
+
"2 48 56 8 716 497.3 \n",
|
| 250 |
+
"3 54 61 9 717 494.6 \n",
|
| 251 |
+
"4 37 75 7 1,003 502.3 \n",
|
| 252 |
+
"5 44 79 8 1,001 498.7 \n",
|
| 253 |
+
"6 48 26 8 315 497.3 \n",
|
| 254 |
+
"7 56 37 12 315 492.3 \n",
|
| 255 |
+
"8 24 92 4 2,622 513.3 \n",
|
| 256 |
+
"9 24 118 5 2,619 512.7 \n",
|
| 257 |
+
"\n",
|
| 258 |
+
" 15 16 \n",
|
| 259 |
+
"0 NaN NaN \n",
|
| 260 |
+
"1 SGP Included In SGP \n",
|
| 261 |
+
"2 48.7 586 \n",
|
| 262 |
+
"3 52.9 587 \n",
|
| 263 |
+
"4 50.2 784 \n",
|
| 264 |
+
"5 47.3 785 \n",
|
| 265 |
+
"6 57.7 289 \n",
|
| 266 |
+
"7 52.9 289 \n",
|
| 267 |
+
"8 58.9 2,069 \n",
|
| 268 |
+
"9 58.8 2,073 \n",
|
| 269 |
+
"Detected header row at index 1: ['District Name', 'District Code', 'Subject', 'M+E #', 'M+E %', 'E #', 'E %', 'M #', 'M %', 'PM #', 'PM %', 'NM #', 'NM %', 'No. of Students Included', 'Avg. Scaled Score', 'SGP', 'Included In SGP']\n",
|
| 270 |
+
"Successfully processed: /fsx/avijit/projects/datacommonsMA/mcas_results/NextGenMCAS_2018.xlsx (Year: 2018)\n",
|
| 271 |
+
"Successfully processed: /fsx/avijit/projects/datacommonsMA/mcas_results/NextGenMCAS_2017.xlsx (Year: 2017)\n",
|
| 272 |
+
"Successfully processed: /fsx/avijit/projects/datacommonsMA/mcas_results/NextGenMCAS_2020.xlsx (Year: 2020)\n",
|
| 273 |
+
"Successfully processed: /fsx/avijit/projects/datacommonsMA/mcas_results/NextGenMCAS_2024.xlsx (Year: 2024)\n",
|
| 274 |
+
"Successfully processed: /fsx/avijit/projects/datacommonsMA/mcas_results/NextGenMCAS_2021.xlsx (Year: 2021)\n",
|
| 275 |
+
"Successfully processed: /fsx/avijit/projects/datacommonsMA/mcas_results/NextGenMCAS_2022.xlsx (Year: 2022)\n",
|
| 276 |
+
"Successfully processed: /fsx/avijit/projects/datacommonsMA/mcas_results/NextGenMCAS_2023.xlsx (Year: 2023)\n",
|
| 277 |
+
"Successfully processed: /fsx/avijit/projects/datacommonsMA/mcas_results/NextGenMCAS_2019.xlsx (Year: 2019)\n",
|
| 278 |
+
"Common columns across all files: {'E %', 'M+E #', 'PM #', 'SGP', 'No. of Students Included', 'NM #', 'District Code', 'Avg. Scaled Score', 'M #', 'Year', 'Included In SGP', 'PM %', 'E #', 'NM %', 'M+E %', 'Subject', 'M %', 'District Name'}\n"
|
| 279 |
+
]
|
| 280 |
+
},
|
| 281 |
+
{
|
| 282 |
+
"name": "stderr",
|
| 283 |
+
"output_type": "stream",
|
| 284 |
+
"text": [
|
| 285 |
+
"/tmp/ipykernel_2144425/3133257492.py:108: FutureWarning: The behavior of DataFrame concatenation with empty or all-NA entries is deprecated. In a future version, this will no longer exclude empty or all-NA columns when determining the result dtypes. To retain the old behavior, exclude the relevant entries before the concat operation.\n",
|
| 286 |
+
" combined_df = pd.concat(dfs, ignore_index=True)\n"
|
| 287 |
+
]
|
| 288 |
+
},
|
| 289 |
+
{
|
| 290 |
+
"name": "stdout",
|
| 291 |
+
"output_type": "stream",
|
| 292 |
+
"text": [
|
| 293 |
+
"Removing 0 possible header rows\n",
|
| 294 |
+
"Successfully created CSV: /fsx/avijit/projects/datacommonsMA/mcas_results/combined_mcas_data.csv\n",
|
| 295 |
+
"Successfully created Parquet: /fsx/avijit/projects/datacommonsMA/mcas_results/combined_mcas_data.parquet\n",
|
| 296 |
+
"Combined 8 Excel files with 6741 total rows\n",
|
| 297 |
+
"Final columns: District Name, Year, District Code, Subject, M+E #, M+E %, E #, E %, M #, M %, PM #, PM %, NM #, NM %, No. of Students Included, Avg. Scaled Score, SGP, Included In SGP\n",
|
| 298 |
+
"\n",
|
| 299 |
+
"Data types in final dataset:\n",
|
| 300 |
+
"District Name: object\n",
|
| 301 |
+
"Year: object\n",
|
| 302 |
+
"District Code: object\n",
|
| 303 |
+
"Subject: object\n",
|
| 304 |
+
"M+E #: float64\n",
|
| 305 |
+
"M+E %: int64\n",
|
| 306 |
+
"E #: float64\n",
|
| 307 |
+
"E %: int64\n",
|
| 308 |
+
"M #: float64\n",
|
| 309 |
+
"M %: int64\n",
|
| 310 |
+
"PM #: float64\n",
|
| 311 |
+
"PM %: int64\n",
|
| 312 |
+
"NM #: float64\n",
|
| 313 |
+
"NM %: int64\n",
|
| 314 |
+
"No. of Students Included: float64\n",
|
| 315 |
+
"Avg. Scaled Score: float64\n",
|
| 316 |
+
"SGP: float64\n",
|
| 317 |
+
"Included In SGP: float64\n"
|
| 318 |
+
]
|
| 319 |
+
}
|
| 320 |
+
],
|
| 321 |
+
"source": [
|
| 322 |
+
"data_folder = \"/fsx/avijit/projects/datacommonsMA/mcas_results\"\n",
|
| 323 |
+
"combine_excel_files(data_folder)"
|
| 324 |
+
]
|
| 325 |
+
},
|
| 326 |
+
{
|
| 327 |
+
"cell_type": "code",
|
| 328 |
+
"execution_count": null,
|
| 329 |
+
"metadata": {},
|
| 330 |
+
"outputs": [],
|
| 331 |
+
"source": []
|
| 332 |
+
}
|
| 333 |
+
],
|
| 334 |
+
"metadata": {
|
| 335 |
+
"kernelspec": {
|
| 336 |
+
"display_name": "py312",
|
| 337 |
+
"language": "python",
|
| 338 |
+
"name": "py312"
|
| 339 |
+
},
|
| 340 |
+
"language_info": {
|
| 341 |
+
"codemirror_mode": {
|
| 342 |
+
"name": "ipython",
|
| 343 |
+
"version": 3
|
| 344 |
+
},
|
| 345 |
+
"file_extension": ".py",
|
| 346 |
+
"mimetype": "text/x-python",
|
| 347 |
+
"name": "python",
|
| 348 |
+
"nbconvert_exporter": "python",
|
| 349 |
+
"pygments_lexer": "ipython3",
|
| 350 |
+
"version": "3.12.9"
|
| 351 |
+
}
|
| 352 |
+
},
|
| 353 |
+
"nbformat": 4,
|
| 354 |
+
"nbformat_minor": 2
|
| 355 |
+
}
|
unstructured_docs_dataset.ipynb
ADDED
|
@@ -0,0 +1,333 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": 1,
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"outputs": [],
|
| 8 |
+
"source": [
|
| 9 |
+
"import torch\n",
|
| 10 |
+
"from PIL import Image\n",
|
| 11 |
+
"from transformers import AutoProcessor, AutoModelForVision2Seq\n",
|
| 12 |
+
"import re\n",
|
| 13 |
+
"import html\n",
|
| 14 |
+
"from threading import Thread\n",
|
| 15 |
+
"from transformers.generation.streamers import TextIteratorStreamer\n",
|
| 16 |
+
"from docling_core.types.doc import DoclingDocument\n",
|
| 17 |
+
"from docling_core.types.doc.document import DocTagsDocument\n",
|
| 18 |
+
"import fitz\n",
|
| 19 |
+
"import os\n",
|
| 20 |
+
"import pandas as pd\n",
|
| 21 |
+
"import json\n",
|
| 22 |
+
"from tqdm import tqdm"
|
| 23 |
+
]
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"cell_type": "code",
|
| 27 |
+
"execution_count": 10,
|
| 28 |
+
"metadata": {},
|
| 29 |
+
"outputs": [],
|
| 30 |
+
"source": [
|
| 31 |
+
"# Load model and processor\n",
|
| 32 |
+
"processor = AutoProcessor.from_pretrained(\"ds4sd/SmolDocling-256M-preview\")\n",
|
| 33 |
+
"model = AutoModelForVision2Seq.from_pretrained(\n",
|
| 34 |
+
" \"ds4sd/SmolDocling-256M-preview\", \n",
|
| 35 |
+
" torch_dtype=torch.bfloat16\n",
|
| 36 |
+
").to(\"cuda\" if torch.cuda.is_available() else \"cpu\")"
|
| 37 |
+
]
|
| 38 |
+
},
|
| 39 |
+
{
|
| 40 |
+
"cell_type": "code",
|
| 41 |
+
"execution_count": 11,
|
| 42 |
+
"metadata": {},
|
| 43 |
+
"outputs": [],
|
| 44 |
+
"source": [
|
| 45 |
+
"def image_to_markdown(image, query_text=\"Convert this page to docling.\"):\n",
|
| 46 |
+
" \"\"\"\n",
|
| 47 |
+
" Convert an input image to markdown output using SmolDocling model\n",
|
| 48 |
+
" \n",
|
| 49 |
+
" Parameters:\n",
|
| 50 |
+
" image: Input image file in RGB\n",
|
| 51 |
+
" query_text (str): Query text to guide the conversion (default: \"Convert this page to docling.\")\n",
|
| 52 |
+
" \n",
|
| 53 |
+
" Returns:\n",
|
| 54 |
+
" str: Markdown output of the converted image\n",
|
| 55 |
+
" \"\"\"\n",
|
| 56 |
+
" \n",
|
| 57 |
+
" # Special handling for code or OTSL content\n",
|
| 58 |
+
" if \"OTSL\" in query_text or \"code\" in query_text:\n",
|
| 59 |
+
" # Add padding to image as in the original code\n",
|
| 60 |
+
" width, height = image.size\n",
|
| 61 |
+
" pad_w = int(width * 0.1) # 10% padding\n",
|
| 62 |
+
" pad_h = int(height * 0.1) # 10% padding\n",
|
| 63 |
+
" corner_pixel = image.getpixel((0, 0))\n",
|
| 64 |
+
" from PIL import ImageOps\n",
|
| 65 |
+
" image = ImageOps.expand(image, border=(pad_w, pad_h, pad_w, pad_h), fill=corner_pixel)\n",
|
| 66 |
+
" \n",
|
| 67 |
+
" # Prepare input for the model\n",
|
| 68 |
+
" resulting_messages = [\n",
|
| 69 |
+
" {\n",
|
| 70 |
+
" \"role\": \"user\",\n",
|
| 71 |
+
" \"content\": [{\"type\": \"image\"}] + [\n",
|
| 72 |
+
" {\"type\": \"text\", \"text\": query_text}\n",
|
| 73 |
+
" ]\n",
|
| 74 |
+
" }\n",
|
| 75 |
+
" ]\n",
|
| 76 |
+
" \n",
|
| 77 |
+
" prompt = processor.apply_chat_template(resulting_messages, add_generation_prompt=True)\n",
|
| 78 |
+
" inputs = processor(text=prompt, images=[[image]], return_tensors=\"pt\").to(model.device)\n",
|
| 79 |
+
" \n",
|
| 80 |
+
" # Generate output using streamer for better memory management\n",
|
| 81 |
+
" streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=False)\n",
|
| 82 |
+
" generation_args = dict(inputs, streamer=streamer, max_new_tokens=8192)\n",
|
| 83 |
+
" \n",
|
| 84 |
+
" thread = Thread(target=model.generate, kwargs=generation_args)\n",
|
| 85 |
+
" thread.start()\n",
|
| 86 |
+
" \n",
|
| 87 |
+
" # Collect the generated output\n",
|
| 88 |
+
" full_output = \"\"\n",
|
| 89 |
+
" for new_text in streamer:\n",
|
| 90 |
+
" full_output += new_text\n",
|
| 91 |
+
" \n",
|
| 92 |
+
" # Clean up the output\n",
|
| 93 |
+
" cleaned_output = full_output.replace(\"<end_of_utterance>\", \"\").strip()\n",
|
| 94 |
+
" \n",
|
| 95 |
+
" # Process doctags if present\n",
|
| 96 |
+
" if any(tag in cleaned_output for tag in [\"<doctag>\", \"<otsl>\", \"<code>\", \"<chart>\", \"<formula>\"]):\n",
|
| 97 |
+
" doctag_output = cleaned_output\n",
|
| 98 |
+
" \n",
|
| 99 |
+
" # Handle chart tags\n",
|
| 100 |
+
" if \"<chart>\" in doctag_output:\n",
|
| 101 |
+
" doctag_output = doctag_output.replace(\"<chart>\", \"<otsl>\").replace(\"</chart>\", \"</otsl>\")\n",
|
| 102 |
+
" doctag_output = re.sub(r'(<loc_500>)(?!.*<loc_500>)<[^>]+>', r'\\1', doctag_output)\n",
|
| 103 |
+
" \n",
|
| 104 |
+
" # Create document and convert to markdown\n",
|
| 105 |
+
" doc = DoclingDocument(name=\"Document\")\n",
|
| 106 |
+
" doctags_doc = DocTagsDocument.from_doctags_and_image_pairs([doctag_output], [image])\n",
|
| 107 |
+
" doc.load_from_doctags(doctags_doc)\n",
|
| 108 |
+
" \n",
|
| 109 |
+
" return doc.export_to_markdown()\n",
|
| 110 |
+
" \n",
|
| 111 |
+
" # Return the cleaned output if no doctags are present\n",
|
| 112 |
+
" return cleaned_output"
|
| 113 |
+
]
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"cell_type": "code",
|
| 117 |
+
"execution_count": 13,
|
| 118 |
+
"metadata": {},
|
| 119 |
+
"outputs": [],
|
| 120 |
+
"source": [
|
| 121 |
+
"def process_pdfs_folder(pdf_folder, output_folder):\n",
|
| 122 |
+
" \"\"\"\n",
|
| 123 |
+
" Process all PDFs in a folder, converting each page to markdown immediately and saving results as JSON.\n",
|
| 124 |
+
" \n",
|
| 125 |
+
" Parameters:\n",
|
| 126 |
+
" pdf_folder (str): Path to folder containing PDFs\n",
|
| 127 |
+
" output_folder (str): Path to save output JSON files\n",
|
| 128 |
+
" \"\"\"\n",
|
| 129 |
+
" # Create output folder if it doesn't exist\n",
|
| 130 |
+
" if not os.path.exists(output_folder):\n",
|
| 131 |
+
" os.makedirs(output_folder)\n",
|
| 132 |
+
" \n",
|
| 133 |
+
" # Get all PDF files in the folder\n",
|
| 134 |
+
" pdf_files = [f for f in os.listdir(pdf_folder) if f.lower().endswith('.pdf')]\n",
|
| 135 |
+
" \n",
|
| 136 |
+
" # Process each PDF file\n",
|
| 137 |
+
" for pdf_file in tqdm(pdf_files, desc=\"Processing PDFs\"):\n",
|
| 138 |
+
" pdf_path = os.path.join(pdf_folder, pdf_file)\n",
|
| 139 |
+
" pdf_name = os.path.splitext(pdf_file)[0]\n",
|
| 140 |
+
" output_json = os.path.join(output_folder, f\"{pdf_name}.json\")\n",
|
| 141 |
+
" \n",
|
| 142 |
+
" # Initialize an empty list to store the data\n",
|
| 143 |
+
" pdf_data = []\n",
|
| 144 |
+
" \n",
|
| 145 |
+
" try:\n",
|
| 146 |
+
" # Open the PDF\n",
|
| 147 |
+
" pdf_document = fitz.open(pdf_path)\n",
|
| 148 |
+
" total_pages = pdf_document.page_count\n",
|
| 149 |
+
" \n",
|
| 150 |
+
" print(f\"Processing {pdf_file} ({total_pages} pages)\")\n",
|
| 151 |
+
" \n",
|
| 152 |
+
" # Process each page one by one\n",
|
| 153 |
+
" for page_number in tqdm(range(total_pages), desc=f\"Pages in {pdf_file}\", leave=False):\n",
|
| 154 |
+
" try:\n",
|
| 155 |
+
" # Get the page\n",
|
| 156 |
+
" page = pdf_document[page_number]\n",
|
| 157 |
+
" \n",
|
| 158 |
+
" # Convert page to image\n",
|
| 159 |
+
" pixmap = page.get_pixmap()\n",
|
| 160 |
+
" image = Image.frombytes(\"RGB\", [pixmap.width, pixmap.height], pixmap.samples)\n",
|
| 161 |
+
" \n",
|
| 162 |
+
" # Convert image to markdown immediately\n",
|
| 163 |
+
" markdown_text = image_to_markdown(image)\n",
|
| 164 |
+
" \n",
|
| 165 |
+
" # Display first 100 characters for verification\n",
|
| 166 |
+
" preview = markdown_text[:100].replace('\\n', ' ')\n",
|
| 167 |
+
" print(f\"Page {page_number+1}/{total_pages}: {preview}...\")\n",
|
| 168 |
+
" \n",
|
| 169 |
+
" # Add to data list\n",
|
| 170 |
+
" page_data = {\n",
|
| 171 |
+
" 'pdf_name': pdf_name,\n",
|
| 172 |
+
" 'slide_number': page_number+1,\n",
|
| 173 |
+
" 'markdown_text': markdown_text\n",
|
| 174 |
+
" }\n",
|
| 175 |
+
" pdf_data.append(page_data)\n",
|
| 176 |
+
" \n",
|
| 177 |
+
" # Save JSON after each page\n",
|
| 178 |
+
" with open(output_json, 'w', encoding='utf-8') as jsonfile:\n",
|
| 179 |
+
" json.dump(pdf_data, jsonfile, ensure_ascii=False, indent=2)\n",
|
| 180 |
+
" \n",
|
| 181 |
+
" except Exception as e:\n",
|
| 182 |
+
" error_msg = f\"Error processing page {page_number+1} from {pdf_file}: {e}\"\n",
|
| 183 |
+
" print(error_msg)\n",
|
| 184 |
+
" # Add error info to data\n",
|
| 185 |
+
" error_data = {\n",
|
| 186 |
+
" 'pdf_name': pdf_name,\n",
|
| 187 |
+
" 'slide_number': page_number+1,\n",
|
| 188 |
+
" 'markdown_text': f\"ERROR: {str(e)}\"\n",
|
| 189 |
+
" }\n",
|
| 190 |
+
" pdf_data.append(error_data)\n",
|
| 191 |
+
" \n",
|
| 192 |
+
" # Save JSON after error\n",
|
| 193 |
+
" with open(output_json, 'w', encoding='utf-8') as jsonfile:\n",
|
| 194 |
+
" json.dump(pdf_data, jsonfile, ensure_ascii=False, indent=2)\n",
|
| 195 |
+
" \n",
|
| 196 |
+
" # Close the PDF after processing\n",
|
| 197 |
+
" pdf_document.close()\n",
|
| 198 |
+
" \n",
|
| 199 |
+
" except Exception as e:\n",
|
| 200 |
+
" error_msg = f\"Error opening PDF {pdf_file}: {e}\"\n",
|
| 201 |
+
" print(error_msg)\n",
|
| 202 |
+
" error_data = {\n",
|
| 203 |
+
" 'pdf_name': pdf_name,\n",
|
| 204 |
+
" 'slide_number': 1,\n",
|
| 205 |
+
" 'markdown_text': f\"ERROR: Failed to process PDF: {str(e)}\"\n",
|
| 206 |
+
" }\n",
|
| 207 |
+
" pdf_data.append(error_data)\n",
|
| 208 |
+
" \n",
|
| 209 |
+
" # Save JSON after PDF error\n",
|
| 210 |
+
" with open(output_json, 'w', encoding='utf-8') as jsonfile:\n",
|
| 211 |
+
" json.dump(pdf_data, jsonfile, ensure_ascii=False, indent=2)\n",
|
| 212 |
+
" \n",
|
| 213 |
+
" print(f\"Processing complete. Results saved to {output_folder}\")"
|
| 214 |
+
]
|
| 215 |
+
},
|
| 216 |
+
{
|
| 217 |
+
"cell_type": "code",
|
| 218 |
+
"execution_count": null,
|
| 219 |
+
"metadata": {},
|
| 220 |
+
"outputs": [],
|
| 221 |
+
"source": [
|
| 222 |
+
"# input_folder = \"/fsx/avijit/projects/datacommonsMA/labormarketreports/pdfs\" ## First convert the ppts to pdf\n",
|
| 223 |
+
"# output_folder = \"/fsx/avijit/projects/datacommonsMA/labormarketreports/processed_reports\"\n",
|
| 224 |
+
"\n",
|
| 225 |
+
"input_folder = \"/fsx/avijit/projects/datacommonsMA/occupational_injury_reports/pdfs\"\n",
|
| 226 |
+
"output_folder = \"/fsx/avijit/projects/datacommonsMA/occupational_injury_reports/processed_reports\"\n",
|
| 227 |
+
"\n",
|
| 228 |
+
"process_pdfs_folder(input_folder,output_folder)"
|
| 229 |
+
]
|
| 230 |
+
},
|
| 231 |
+
{
|
| 232 |
+
"cell_type": "code",
|
| 233 |
+
"execution_count": null,
|
| 234 |
+
"metadata": {},
|
| 235 |
+
"outputs": [],
|
| 236 |
+
"source": [
|
| 237 |
+
"def combine_json_files(folder_path, output_file=\"combined_results.json\"):\n",
|
| 238 |
+
" \"\"\"\n",
|
| 239 |
+
" Read individual JSON files from a folder and simply concatenate them,\n",
|
| 240 |
+
" changing \"pdf_name\" to \"report name\" in each entry.\n",
|
| 241 |
+
" \n",
|
| 242 |
+
" Args:\n",
|
| 243 |
+
" folder_path (str): Path to the folder containing JSON files\n",
|
| 244 |
+
" output_file (str): Path to save the combined JSON file\n",
|
| 245 |
+
" \n",
|
| 246 |
+
" Returns:\n",
|
| 247 |
+
" list: The combined data list\n",
|
| 248 |
+
" \"\"\"\n",
|
| 249 |
+
" import json\n",
|
| 250 |
+
" from pathlib import Path\n",
|
| 251 |
+
" \n",
|
| 252 |
+
" # Initialize data list\n",
|
| 253 |
+
" combined_data = []\n",
|
| 254 |
+
" \n",
|
| 255 |
+
" # Get all JSON files in the folder\n",
|
| 256 |
+
" folder_path = Path(folder_path)\n",
|
| 257 |
+
" json_files = list(folder_path.glob(\"*.json\"))\n",
|
| 258 |
+
" \n",
|
| 259 |
+
" if not json_files:\n",
|
| 260 |
+
" print(f\"No JSON files found in {folder_path}\")\n",
|
| 261 |
+
" return []\n",
|
| 262 |
+
" \n",
|
| 263 |
+
" print(f\"Found {len(json_files)} JSON files in {folder_path}\")\n",
|
| 264 |
+
" \n",
|
| 265 |
+
" # Read each JSON file\n",
|
| 266 |
+
" for json_file in json_files:\n",
|
| 267 |
+
" try:\n",
|
| 268 |
+
" with open(json_file, \"r\", encoding=\"utf-8\") as f:\n",
|
| 269 |
+
" file_data = json.load(f)\n",
|
| 270 |
+
" \n",
|
| 271 |
+
" # Handle both list and single object formats\n",
|
| 272 |
+
" if isinstance(file_data, list):\n",
|
| 273 |
+
" items = file_data\n",
|
| 274 |
+
" else:\n",
|
| 275 |
+
" items = [file_data]\n",
|
| 276 |
+
" \n",
|
| 277 |
+
" # Rename pdf_name to report name in each item\n",
|
| 278 |
+
" for item in items:\n",
|
| 279 |
+
" if \"pdf_name\" in item:\n",
|
| 280 |
+
" item[\"report name\"] = item.pop(\"pdf_name\")\n",
|
| 281 |
+
" item[\"page number\"] = item.pop(\"slide_number\")\n",
|
| 282 |
+
" \n",
|
| 283 |
+
" # Add to combined data\n",
|
| 284 |
+
" combined_data.extend(items)\n",
|
| 285 |
+
" \n",
|
| 286 |
+
" except Exception as e:\n",
|
| 287 |
+
" print(f\"Error reading {json_file}: {e}\")\n",
|
| 288 |
+
" \n",
|
| 289 |
+
" # Write to file\n",
|
| 290 |
+
" with open(output_file, \"w\", encoding=\"utf-8\") as f:\n",
|
| 291 |
+
" json.dump(combined_data, output_folder+'/'+f, indent=2, ensure_ascii=False)\n",
|
| 292 |
+
" \n",
|
| 293 |
+
" print(f\"Combined {len(combined_data)} items into {output_file}\")\n",
|
| 294 |
+
" return combined_data"
|
| 295 |
+
]
|
| 296 |
+
},
|
| 297 |
+
{
|
| 298 |
+
"cell_type": "code",
|
| 299 |
+
"execution_count": 4,
|
| 300 |
+
"metadata": {},
|
| 301 |
+
"outputs": [
|
| 302 |
+
{
|
| 303 |
+
"name": "stdout",
|
| 304 |
+
"output_type": "stream",
|
| 305 |
+
"text": [
|
| 306 |
+
"Found 7 JSON files in /fsx/avijit/projects/datacommonsMA/occupational_injury_reports/processed_reports\n",
|
| 307 |
+
"Error reading /fsx/avijit/projects/datacommonsMA/occupational_injury_reports/processed_reports/combined_reports.json: Extra data: line 73 column 1 (char 109380)\n",
|
| 308 |
+
"Combined 78 items into occupational_injury_combined_reports.json\n"
|
| 309 |
+
]
|
| 310 |
+
}
|
| 311 |
+
],
|
| 312 |
+
"source": [
|
| 313 |
+
"combined_data = combine_json_files(output_folder, \"occupational_injury_combined_reports.json\")"
|
| 314 |
+
]
|
| 315 |
+
},
|
| 316 |
+
{
|
| 317 |
+
"cell_type": "code",
|
| 318 |
+
"execution_count": null,
|
| 319 |
+
"metadata": {},
|
| 320 |
+
"outputs": [],
|
| 321 |
+
"source": []
|
| 322 |
+
}
|
| 323 |
+
],
|
| 324 |
+
"metadata": {
|
| 325 |
+
"kernelspec": {
|
| 326 |
+
"display_name": "py312",
|
| 327 |
+
"language": "python",
|
| 328 |
+
"name": "py312"
|
| 329 |
+
}
|
| 330 |
+
},
|
| 331 |
+
"nbformat": 4,
|
| 332 |
+
"nbformat_minor": 2
|
| 333 |
+
}
|