File size: 8,110 Bytes
54c4947
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
#!/usr/bin/env python3
from __future__ import annotations
import argparse, json, re, random, sys, unicodedata, string
from pathlib import Path
from typing import List, Tuple, Dict, Optional

# ----------- Heuristics -----------
ENT_PHRASE_NONSTART = re.compile(r"(?<!^)([A-Z][a-z]+(?:\s+[A-Z][a-z]+)*)")
ENT_PHRASE_ANY      = re.compile(r"([A-Z][a-z]+(?:\s+[A-Z][a-z]+)*)")
YEAR_RE             = re.compile(r"\b(1[89]\d{2}|20\d{2})\b")
NUM_RE              = re.compile(r"\b\d+(?:\.\d+)?\b")
SENT_SPLIT          = re.compile(r"(?<=[.!?。!?])\s+|\n+")
STOPWORDS = set("""
a an and are as at be by for from has he her hers him his i in is it its of on or that the their them they this to was were will with you your
""".split())

_PUNCT = "".join(ch for ch in string.punctuation if ch not in "_-'’”‘“")
_PUNCT_RE = re.compile(f"[{re.escape(_PUNCT)}]")

def norm(s: str) -> str:
    s = unicodedata.normalize("NFKC", s)
    s = s.replace("“","\"").replace("”","\"").replace("’","'").replace(" ‘ ","'")
    s = _PUNCT_RE.sub(" ", s)
    s = re.sub(r"\s+", " ", s).strip().lower()
    return s

def sentence_split(text: str) -> List[str]:
    text = (text or "").strip()
    if not text:
        return []
    return [p.strip() for p in SENT_SPLIT.split(text) if p.strip()]

def longest_word(s: str) -> Optional[str]:
    toks = re.findall(r"[A-Za-z][A-Za-z\-']+", s)
    toks = [t for t in toks if len(t) >= 7 and t.lower() not in STOPWORDS]
    return max(toks, key=len) if toks else None

def replace_once(sent: str, ans: str) -> Optional[str]:
    idx = sent.find(ans)
    if idx == -1:
        # try case-insensitive
        m = re.search(re.escape(ans), sent, flags=re.IGNORECASE)
        if not m:
            return None
        idx = m.start()
        ans = sent[m.start():m.end()]
    return sent[:idx] + "____" + sent[idx+len(ans):]

def cloze_candidates_for_sentence(s: str) -> List[Tuple[str,str]]:
    cands: List[Tuple[str,str]] = []

    # 1) Proper noun phrase not at start
    m = ENT_PHRASE_NONSTART.search(s)
    if m:
        ans = m.group(1).strip()
        if 2 <= len(ans) <= 80:
            q = replace_once(s, ans)
            if q and q != s:
                cands.append((q, ans))

    # 2) Proper noun phrase anywhere
    if not cands:
        m = ENT_PHRASE_ANY.search(s)
        if m:
            ans = m.group(1).strip()
            if 2 <= len(ans) <= 80:
                q = replace_once(s, ans)
                if q and q != s:
                    cands.append((q, ans))

    # 3) Year
    if not cands:
        m = YEAR_RE.search(s)
        if m:
            ans = m.group(0)
            q = replace_once(s, ans)
            if q and q != s:
                cands.append((q, ans))

    # 4) Number
    if not cands:
        m = NUM_RE.search(s)
        if m:
            ans = m.group(0)
            q = replace_once(s, ans)
            if q and q != s:
                cands.append((q, ans))

    # 5) Long word fallback
    if not cands:
        lw = longest_word(s)
        if lw:
            ans = lw
            q = replace_once(s, ans)
            if q and q != s:
                cands.append((q, ans))

    # Avoid silly blanks
    cands = [(q, a) for (q, a) in cands if "____" in q and 1 <= len(a) <= 80]
    return cands

def generate_doc_qas(doc: Dict, need: int, global_seen: set, rng: random.Random, dedupe_global: bool) -> List[Dict]:
    """Return exactly `need` QAs for this doc.
       Always enforce per-doc uniqueness; enforce global uniqueness only if dedupe_global=True.
    """
    sents: List[str] = doc["sentences"]
    idxs = list(range(len(sents)))
    rng.shuffle(idxs)

    picked: List[Dict] = []
    local_seen = set()

    def try_add(sid: int, q: str, a: str) -> bool:
        key = (norm(q), norm(a))
        if key in local_seen:
            return False
        if dedupe_global and key in global_seen:
            return False
        picked.append({
            "doc_id": doc["doc_id"],
            "sent_id": sid,
            "title": doc.get("title",""),
            "question": q,
            "answer": a
        })
        local_seen.add(key)
        if dedupe_global:
            global_seen.add(key)
        return True

    # pass 1: heuristic cloze candidates
    for sid in idxs:
        if len(picked) >= need:
            break
        for (q, a) in cloze_candidates_for_sentence(sents[sid]):
            if try_add(sid, q, a) and len(picked) >= need:
                break

    # pass 2: deterministic fallback (longest-word cloze)
    if len(picked) < need:
        for sid in range(len(sents)):
            if len(picked) >= need:
                break
            s = sents[sid]
            lw = longest_word(s)
            if not lw:
                continue
            q = replace_once(s, lw)
            if not q or q == s:
                continue
            try_add(sid, q, lw)

    # pass 3: ultra fallback — mask a year or number even if short
    if len(picked) < need:
        for sid in range(len(sents)):
            if len(picked) >= need:
                break
            s = sents[sid]
            m = YEAR_RE.search(s) or NUM_RE.search(s)
            if not m:
                continue
            ans = m.group(0)
            q = replace_once(s, ans)
            if not q or q == s:
                continue
            try_add(sid, q, ans)

    return picked[:need]


# ----------- IO / Main -----------
def main():
    ap = argparse.ArgumentParser()
    ap.add_argument("--docs_path", type=str, default="data/wt2raw/train/docs.jsonl",
                    help="Input docs.jsonl (expects fields: doc_id, title, sentences[list[str]])")
    ap.add_argument("--out_path", type=str, default="data/wt2raw/train/qa.jsonl",
                    help="Output qa.jsonl")
    ap.add_argument("--seed", type=int, default=42)
    ap.add_argument("--docs_expected", type=int, default=5135, help="Expected number of docs")
    ap.add_argument("--q_per_doc", type=int, default=3, help="Questions per doc (fixed)")
    ap.add_argument("--dedupe_global", action="store_true",
                help="If set, avoid duplicate (question, answer) pairs across the entire split. "
                     "By default only per-doc de-duplication is enforced.")
    args = ap.parse_args()

    rng = random.Random(args.seed)


    docs: List[Dict] = []
    with open(args.docs_path, "r", encoding="utf-8") as f:
        for line in f:
            if not line.strip():
                continue
            d = json.loads(line)
            # Coerce sentences if someone saved text:
            if "sentences" not in d or not isinstance(d["sentences"], list):
                txt = d.get("text") or d.get("content") or ""
                d["sentences"] = sentence_split(txt)
            if len(d["sentences"]) >= 3:
                docs.append(d)

    if args.docs_expected and len(docs) != args.docs_expected:
        print(f"[warn] docs count = {len(docs)} (expected {args.docs_expected}). Continuing anyway.", file=sys.stderr)

    total_needed = len(docs) * args.q_per_doc
    out_path = Path(args.out_path)
    out_path.parent.mkdir(parents=True, exist_ok=True)

    global_seen = set()
    written = 0
    with open(out_path, "w", encoding="utf-8") as fout:
        for d in docs:
            qas = generate_doc_qas(
            d, need=args.q_per_doc,
            global_seen=global_seen,
            rng=rng,
            dedupe_global=args.dedupe_global
        )
            # last-resort assert: if still short, fail loudly
            if len(qas) < args.q_per_doc:
                print(f"[error] Could not produce {args.q_per_doc} unique QAs for doc_id={d['doc_id']}", file=sys.stderr)
                sys.exit(1)
            for qa in qas:
                fout.write(json.dumps(qa, ensure_ascii=False) + "\n")
                written += 1

    if written != total_needed:
        print(f"[error] Wrote {written} but needed {total_needed}.", file=sys.stderr)
        sys.exit(2)

    print(f"Saved {len(docs)} docs × {args.q_per_doc} = {written} QAs to {out_path}")

if __name__ == "__main__":
    main()