Create Sample_Generate_dataset_file.py
Browse files
Sample_Generate_dataset_file.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
create_cloze_qa_dataset.py
|
| 3 |
+
-------------------------------------
|
| 4 |
+
Generate Cloze-style QA dataset from WikiText-2.
|
| 5 |
+
Each sentence produces one 'fill-in-the-blank' question
|
| 6 |
+
with a single correct answer.
|
| 7 |
+
|
| 8 |
+
Output: JSONL files for train / validation / test.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from datasets import load_dataset
|
| 12 |
+
import re
|
| 13 |
+
import json
|
| 14 |
+
from pathlib import Path
|
| 15 |
+
import random
|
| 16 |
+
|
| 17 |
+
# Load WikiText-2
|
| 18 |
+
print("🔹 Loading WikiText-2 dataset ...")
|
| 19 |
+
dataset = load_dataset("wikitext", "wikitext-2-raw-v1")
|
| 20 |
+
|
| 21 |
+
# Output directories
|
| 22 |
+
output_dir = Path("cloze_qa_dataset")
|
| 23 |
+
output_dir.mkdir(exist_ok=True, parents=True)
|
| 24 |
+
|
| 25 |
+
def create_cloze_question(sentence: str):
|
| 26 |
+
"""
|
| 27 |
+
Convert a sentence into a Cloze-style question by masking one entity/keyword.
|
| 28 |
+
Returns (question, answer) or None if unsuitable.
|
| 29 |
+
"""
|
| 30 |
+
words = re.findall(r"\b[A-Z][a-zA-Z]+\b", sentence) # find capitalized words (possible entities)
|
| 31 |
+
if not words:
|
| 32 |
+
return None
|
| 33 |
+
|
| 34 |
+
answer = random.choice(words)
|
| 35 |
+
question = sentence.replace(answer, "____", 1)
|
| 36 |
+
if question == sentence or len(answer) < 3:
|
| 37 |
+
return None
|
| 38 |
+
|
| 39 |
+
return question.strip(), answer.strip()
|
| 40 |
+
|
| 41 |
+
def generate_qa_split(split_name, data):
|
| 42 |
+
"""
|
| 43 |
+
Generate QA pairs for each sentence in the given split.
|
| 44 |
+
"""
|
| 45 |
+
output_path = output_dir / f"{split_name}.jsonl"
|
| 46 |
+
count = 0
|
| 47 |
+
with open(output_path, "w", encoding="utf-8") as f:
|
| 48 |
+
for doc_id, text in enumerate(data["text"]):
|
| 49 |
+
if not text.strip():
|
| 50 |
+
continue
|
| 51 |
+
sentences = re.split(r'(?<=[.!?]) +', text.strip())
|
| 52 |
+
for sent_id, sent in enumerate(sentences):
|
| 53 |
+
qa = create_cloze_question(sent)
|
| 54 |
+
if qa:
|
| 55 |
+
question, answer = qa
|
| 56 |
+
record = {
|
| 57 |
+
"doc_id": doc_id,
|
| 58 |
+
"sent_id": sent_id,
|
| 59 |
+
"title": None,
|
| 60 |
+
"question": question,
|
| 61 |
+
"answer": answer
|
| 62 |
+
}
|
| 63 |
+
f.write(json.dumps(record, ensure_ascii=False) + "\n")
|
| 64 |
+
count += 1
|
| 65 |
+
print(f" Saved {count} QA pairs to {output_path}")
|
| 66 |
+
|
| 67 |
+
# Generate datasets
|
| 68 |
+
for split in ["train", "validation", "test"]:
|
| 69 |
+
generate_qa_split(split, dataset[split])
|
| 70 |
+
|
| 71 |
+
print("\nAll splits processed and saved successfully")
|