Update dataset.py
Browse files- dataset.py +1 -143
dataset.py
CHANGED
|
@@ -3,7 +3,6 @@ import os
|
|
| 3 |
import os.path as osp
|
| 4 |
import json
|
| 5 |
import numpy as np
|
| 6 |
-
# from konlpy.tag import Okt
|
| 7 |
|
| 8 |
import torch
|
| 9 |
import torch.nn.functional as F
|
|
@@ -240,11 +239,6 @@ class JointDataset(Dataset):
|
|
| 240 |
# debug
|
| 241 |
for ent_k, ent_h in zip(ent_pos_kor, ent_pos_han):
|
| 242 |
assert len(ent_k) == len(ent_h)
|
| 243 |
-
# print(json_file)
|
| 244 |
-
# pprint.pprint(ex["entity"])
|
| 245 |
-
# print(entities_kor)
|
| 246 |
-
# print(entities_han)
|
| 247 |
-
# break
|
| 248 |
|
| 249 |
|
| 250 |
### labels ###
|
|
@@ -259,19 +253,7 @@ class JointDataset(Dataset):
|
|
| 259 |
if h_idx is None or t_idx is None:
|
| 260 |
num_filtered_labels += 1
|
| 261 |
continue
|
| 262 |
-
|
| 263 |
-
# TODO: idx has to match across languages, otherwise the label won't be universal.
|
| 264 |
-
# if h_idx != h_idx2 or t_idx != t_idx2:
|
| 265 |
-
# import pdb; pdb.set_trace()
|
| 266 |
-
# assert h_idx == h_idx2 and t_idx == t_idx2
|
| 267 |
|
| 268 |
-
# debugging
|
| 269 |
-
if not( h_idx == h_idx2 and t_idx == t_idx2) :
|
| 270 |
-
# print(f"fname: {json_file}")
|
| 271 |
-
# pprint.pprint(relation)
|
| 272 |
-
N_data_problems += 1
|
| 273 |
-
continue
|
| 274 |
-
|
| 275 |
r_idx = self.label_map[relation["kor"]["label"]]
|
| 276 |
labels[h_idx, t_idx, r_idx] = 1
|
| 277 |
|
|
@@ -292,16 +274,6 @@ class JointDataset(Dataset):
|
|
| 292 |
"text_han": ex["text"]["han"]
|
| 293 |
})
|
| 294 |
|
| 295 |
-
# self.features.append({
|
| 296 |
-
# "input_ids_kor": input_ids_kor,
|
| 297 |
-
# "input_ids_han": input_ids_han,
|
| 298 |
-
# "ent_pos_kor": ent_pos_kor,
|
| 299 |
-
# "ent_pos_han": ent_pos_han,
|
| 300 |
-
# "labels": labels
|
| 301 |
-
# })
|
| 302 |
-
|
| 303 |
-
print(f"# problems in (h_idx == h_idx2 and t_idx == t_idx2) : {N_data_problems}")
|
| 304 |
-
|
| 305 |
logging.info(f"# of empty entity examples filtered: {num_empty_entity_examples}")
|
| 306 |
logging.info(f"# of empty label examples filtered: {num_empty_label_examples}")
|
| 307 |
logging.info(f"# of beyond-truncated-text labels filtered: {num_filtered_labels}")
|
|
@@ -362,8 +334,6 @@ class KoreanDataset(Dataset):
|
|
| 362 |
self.split = split
|
| 363 |
self.features = []
|
| 364 |
|
| 365 |
-
# self.word_tokenizer = Okt()
|
| 366 |
-
|
| 367 |
self.save_dir = osp.join(args.data_dir, args.language)
|
| 368 |
self.save_path = osp.join(self.save_dir, f"{args.model_type}_{split}.pt")
|
| 369 |
os.makedirs(self.save_dir, exist_ok=True)
|
|
@@ -392,7 +362,6 @@ class KoreanDataset(Dataset):
|
|
| 392 |
|
| 393 |
logging.info(f"Creating features from {self.args.data_dir}")
|
| 394 |
rootdir = osp.join(self.args.data_dir, f"{self.split}")
|
| 395 |
-
# print(f"Current directory: {rootdir}")
|
| 396 |
|
| 397 |
for json_file in tqdm(os.listdir(rootdir), desc="Converting examples to features"):
|
| 398 |
with open(osp.join(rootdir, json_file), encoding='utf-8') as f:
|
|
@@ -478,28 +447,6 @@ class KoreanDataset(Dataset):
|
|
| 478 |
ent_pos[-1].append((token_start, token_end))
|
| 479 |
# ent_ner[-1].append(ment[-1])
|
| 480 |
|
| 481 |
-
# ent_masks, ent_ners = [], []
|
| 482 |
-
# for ent in entities:
|
| 483 |
-
# ent_mask = np.zeros(len(input_ids), dtype=np.float32)
|
| 484 |
-
# ent_ner = np.zeros(len(input_ids), dtype=np.float32)
|
| 485 |
-
|
| 486 |
-
# for ment in ent:
|
| 487 |
-
# start, end = ment[3], ment[4]
|
| 488 |
-
# # Skip entity mentions that appear beyond the truncated text
|
| 489 |
-
# if (start > self.args.max_seq_length-num_special_tokens or
|
| 490 |
-
# end > self.args.max_seq_length-num_special_tokens):
|
| 491 |
-
# continue
|
| 492 |
-
# ent_mask[start:end] = 1
|
| 493 |
-
# ent_ner[start:end] = self.ner_map[ment[5]]
|
| 494 |
-
|
| 495 |
-
# assert ent_mask.sum() != 0
|
| 496 |
-
|
| 497 |
-
# ent_masks.append(ent_mask)
|
| 498 |
-
# ent_ners.append(ent_ner)
|
| 499 |
-
|
| 500 |
-
# ent_masks = np.stack(ent_masks, axis=0)
|
| 501 |
-
# ent_ners = np.stack(ent_ners, axis=0)
|
| 502 |
-
|
| 503 |
### labels ###
|
| 504 |
labels = torch.zeros((len(entities), len(entities), self.config.num_labels), dtype=torch.float32)
|
| 505 |
for relation in ex["relation"]:
|
|
@@ -517,28 +464,13 @@ class KoreanDataset(Dataset):
|
|
| 517 |
for t in range(len(entities)):
|
| 518 |
if torch.all(labels[h][t] == 0):
|
| 519 |
labels[h][t][0] = 1
|
| 520 |
-
|
| 521 |
-
### label mask ###
|
| 522 |
-
# label_mask = np.ones((len(entities), len(entities)), dtype='bool')
|
| 523 |
-
# np.fill_diagonal(label_mask, 0) # ignore diagonals
|
| 524 |
-
|
| 525 |
-
# TODO: normalize ent_masks (test normalization vs. not)
|
| 526 |
-
# ent_masks = ent_masks / np.expand_dims(ent_masks.sum(1), axis=1)
|
| 527 |
-
|
| 528 |
self.features.append({
|
| 529 |
"input_ids": input_ids,
|
| 530 |
"ent_pos": ent_pos,
|
| 531 |
"labels": labels,
|
| 532 |
})
|
| 533 |
|
| 534 |
-
# self.features.append({
|
| 535 |
-
# "input_ids": input_ids,
|
| 536 |
-
# "ent_masks": ent_masks,
|
| 537 |
-
# "ent_ners": ent_ners,
|
| 538 |
-
# "labels": labels,
|
| 539 |
-
# "label_mask": label_mask
|
| 540 |
-
# })
|
| 541 |
-
|
| 542 |
logging.info(f"# of empty entity examples filtered: {num_empty_entity_examples}")
|
| 543 |
logging.info(f"# of empty label examples filtered: {num_empty_label_examples}")
|
| 544 |
logging.info(f"# of beyond-truncated-text labels filtered: {num_filtered_labels}")
|
|
@@ -548,32 +480,15 @@ class KoreanDataset(Dataset):
|
|
| 548 |
|
| 549 |
def collate_fn(self, samples):
|
| 550 |
input_ids = [x["input_ids"] for x in samples]
|
| 551 |
-
|
| 552 |
ent_pos = [x["ent_pos"] for x in samples]
|
| 553 |
-
# max_ent_len = max([len(x["ent_pos"]) for x in samples])
|
| 554 |
-
# ent_masks = [F.pad(torch.from_numpy(x["ent_masks"]), \
|
| 555 |
-
# (0, 0, 0, max_ent_len-x["ent_masks"].shape[0])) for x in samples]
|
| 556 |
-
# ent_ners = [F.pad(torch.from_numpy(x["ent_ners"]), \
|
| 557 |
-
# (0, 0, 0, max_ent_len-x["ent_ners"].shape[0])) for x in samples]
|
| 558 |
-
|
| 559 |
labels = [x["labels"].view(-1, self.config.num_labels) for x in samples]
|
| 560 |
-
# labels = [F.pad(torch.from_numpy(x["labels"]), \
|
| 561 |
-
# (0, 0, 0, max_ent_len-x["labels"].shape[0], 0, max_ent_len-x["labels"].shape[1]), value=-100) for x in samples]
|
| 562 |
-
# label_mask = [F.pad(torch.from_numpy(x["label_mask"]), \
|
| 563 |
-
# (0, max_ent_len-x["label_mask"].shape[0], 0, max_ent_len-x["label_mask"].shape[1])) for x in samples]
|
| 564 |
|
| 565 |
input_ids = torch.tensor(input_ids, dtype=torch.long)
|
| 566 |
-
# ent_masks = torch.stack(ent_masks, dim=0)
|
| 567 |
labels = torch.cat(labels, dim=0)
|
| 568 |
-
# labels = torch.stack(labels, dim=0)
|
| 569 |
-
# label_mask = torch.stack(label_mask, dim=0)
|
| 570 |
|
| 571 |
return {"input_ids": input_ids,
|
| 572 |
"ent_pos": ent_pos,
|
| 573 |
-
# "ent_masks": ent_masks,
|
| 574 |
-
# "ent_ners": ent_ners,
|
| 575 |
"labels": labels,
|
| 576 |
-
# "label_mask": label_mask,
|
| 577 |
}
|
| 578 |
|
| 579 |
def __len__(self):
|
|
@@ -623,7 +538,6 @@ class HanjaDataset(Dataset):
|
|
| 623 |
|
| 624 |
logging.info(f"Creating features from {self.args.data_dir}")
|
| 625 |
rootdir = osp.join(self.args.data_dir, f"{self.split}")
|
| 626 |
-
# print(f"Current directory: {rootdir}")
|
| 627 |
|
| 628 |
for json_file in tqdm(os.listdir(rootdir), desc="Converting examples to features"):
|
| 629 |
with open(osp.join(rootdir, json_file), encoding='utf-8') as f:
|
|
@@ -702,34 +616,10 @@ class HanjaDataset(Dataset):
|
|
| 702 |
ent_pos, ent_ner = [], []
|
| 703 |
for ent in entities:
|
| 704 |
ent_pos.append([])
|
| 705 |
-
# ent_ner.append([])
|
| 706 |
for ment in ent:
|
| 707 |
token_start, token_end = ment[3], ment[4]
|
| 708 |
ent_pos[-1].append((token_start, token_end))
|
| 709 |
-
# ent_ner[-1].append(ment[-1])
|
| 710 |
-
|
| 711 |
-
# ent_masks, ent_ners = [], []
|
| 712 |
-
# for ent in entities:
|
| 713 |
-
# ent_mask = np.zeros(len(input_ids), dtype=np.float32)
|
| 714 |
-
# ent_ner = np.zeros(len(input_ids), dtype=np.float32)
|
| 715 |
-
|
| 716 |
-
# for ment in ent:
|
| 717 |
-
# start, end = ment[3], ment[4]
|
| 718 |
-
# # Skip entity mentions that appear beyond the truncated text
|
| 719 |
-
# if (start > self.args.max_seq_length-num_special_tokens or
|
| 720 |
-
# end > self.args.max_seq_length-num_special_tokens):
|
| 721 |
-
# continue
|
| 722 |
-
# ent_mask[start:end] = 1
|
| 723 |
-
# ent_ner[start:end] = self.ner_map[ment[5]]
|
| 724 |
-
|
| 725 |
-
# assert ent_mask.sum() != 0
|
| 726 |
-
|
| 727 |
-
# ent_masks.append(ent_mask)
|
| 728 |
-
# ent_ners.append(ent_ner)
|
| 729 |
|
| 730 |
-
# ent_masks = np.stack(ent_masks, axis=0)
|
| 731 |
-
# ent_ners = np.stack(ent_ners, axis=0)
|
| 732 |
-
|
| 733 |
### labels ###
|
| 734 |
labels = torch.zeros((len(entities), len(entities), self.config.num_labels), dtype=torch.float32)
|
| 735 |
for relation in ex["relation"]:
|
|
@@ -748,27 +638,12 @@ class HanjaDataset(Dataset):
|
|
| 748 |
if torch.all(labels[h][t] == 0):
|
| 749 |
labels[h][t][0] = 1
|
| 750 |
|
| 751 |
-
### label mask ###
|
| 752 |
-
# label_mask = np.ones((len(entities), len(entities)), dtype='bool')
|
| 753 |
-
# np.fill_diagonal(label_mask, 0) # ignore diagonals
|
| 754 |
-
|
| 755 |
-
# TODO: normalize ent_masks (test normalization vs. not)
|
| 756 |
-
# ent_masks = ent_masks / np.expand_dims(ent_masks.sum(1), axis=1)
|
| 757 |
-
|
| 758 |
self.features.append({
|
| 759 |
"input_ids": input_ids,
|
| 760 |
"ent_pos": ent_pos,
|
| 761 |
"labels": labels,
|
| 762 |
})
|
| 763 |
|
| 764 |
-
# self.features.append({
|
| 765 |
-
# "input_ids": input_ids,
|
| 766 |
-
# "ent_masks": ent_masks,
|
| 767 |
-
# "ent_ners": ent_ners,
|
| 768 |
-
# "labels": labels,
|
| 769 |
-
# "label_mask": label_mask
|
| 770 |
-
# })
|
| 771 |
-
|
| 772 |
logging.info(f"# of empty entity examples filtered: {num_empty_entity_examples}")
|
| 773 |
logging.info(f"# of empty label examples filtered: {num_empty_label_examples}")
|
| 774 |
logging.info(f"# of beyond-truncated-text labels filtered: {num_filtered_labels}")
|
|
@@ -779,30 +654,13 @@ class HanjaDataset(Dataset):
|
|
| 779 |
input_ids = [x["input_ids"] for x in samples]
|
| 780 |
|
| 781 |
ent_pos = [x["ent_pos"] for x in samples]
|
| 782 |
-
# max_ent_len = max([len(x["ent_pos"]) for x in samples])
|
| 783 |
-
# ent_masks = [F.pad(torch.from_numpy(x["ent_masks"]), \
|
| 784 |
-
# (0, 0, 0, max_ent_len-x["ent_masks"].shape[0])) for x in samples]
|
| 785 |
-
# ent_ners = [F.pad(torch.from_numpy(x["ent_ners"]), \
|
| 786 |
-
# (0, 0, 0, max_ent_len-x["ent_ners"].shape[0])) for x in samples]
|
| 787 |
-
|
| 788 |
labels = [x["labels"].view(-1, self.config.num_labels) for x in samples]
|
| 789 |
-
# labels = [F.pad(torch.from_numpy(x["labels"]), \
|
| 790 |
-
# (0, 0, 0, max_ent_len-x["labels"].shape[0], 0, max_ent_len-x["labels"].shape[1]), value=-100) for x in samples]
|
| 791 |
-
# label_mask = [F.pad(torch.from_numpy(x["label_mask"]), \
|
| 792 |
-
# (0, max_ent_len-x["label_mask"].shape[0], 0, max_ent_len-x["label_mask"].shape[1])) for x in samples]
|
| 793 |
-
|
| 794 |
input_ids = torch.tensor(input_ids, dtype=torch.long)
|
| 795 |
-
# ent_masks = torch.stack(ent_masks, dim=0)
|
| 796 |
labels = torch.cat(labels, dim=0)
|
| 797 |
-
# labels = torch.stack(labels, dim=0)
|
| 798 |
-
# label_mask = torch.stack(label_mask, dim=0)
|
| 799 |
|
| 800 |
return {"input_ids": input_ids,
|
| 801 |
"ent_pos": ent_pos,
|
| 802 |
-
# "ent_masks": ent_masks,
|
| 803 |
-
# "ent_ners": ent_ners,
|
| 804 |
"labels": labels,
|
| 805 |
-
# "label_mask": label_mask,
|
| 806 |
}
|
| 807 |
|
| 808 |
def __len__(self):
|
|
|
|
| 3 |
import os.path as osp
|
| 4 |
import json
|
| 5 |
import numpy as np
|
|
|
|
| 6 |
|
| 7 |
import torch
|
| 8 |
import torch.nn.functional as F
|
|
|
|
| 239 |
# debug
|
| 240 |
for ent_k, ent_h in zip(ent_pos_kor, ent_pos_han):
|
| 241 |
assert len(ent_k) == len(ent_h)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 242 |
|
| 243 |
|
| 244 |
### labels ###
|
|
|
|
| 253 |
if h_idx is None or t_idx is None:
|
| 254 |
num_filtered_labels += 1
|
| 255 |
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 256 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 257 |
r_idx = self.label_map[relation["kor"]["label"]]
|
| 258 |
labels[h_idx, t_idx, r_idx] = 1
|
| 259 |
|
|
|
|
| 274 |
"text_han": ex["text"]["han"]
|
| 275 |
})
|
| 276 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 277 |
logging.info(f"# of empty entity examples filtered: {num_empty_entity_examples}")
|
| 278 |
logging.info(f"# of empty label examples filtered: {num_empty_label_examples}")
|
| 279 |
logging.info(f"# of beyond-truncated-text labels filtered: {num_filtered_labels}")
|
|
|
|
| 334 |
self.split = split
|
| 335 |
self.features = []
|
| 336 |
|
|
|
|
|
|
|
| 337 |
self.save_dir = osp.join(args.data_dir, args.language)
|
| 338 |
self.save_path = osp.join(self.save_dir, f"{args.model_type}_{split}.pt")
|
| 339 |
os.makedirs(self.save_dir, exist_ok=True)
|
|
|
|
| 362 |
|
| 363 |
logging.info(f"Creating features from {self.args.data_dir}")
|
| 364 |
rootdir = osp.join(self.args.data_dir, f"{self.split}")
|
|
|
|
| 365 |
|
| 366 |
for json_file in tqdm(os.listdir(rootdir), desc="Converting examples to features"):
|
| 367 |
with open(osp.join(rootdir, json_file), encoding='utf-8') as f:
|
|
|
|
| 447 |
ent_pos[-1].append((token_start, token_end))
|
| 448 |
# ent_ner[-1].append(ment[-1])
|
| 449 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 450 |
### labels ###
|
| 451 |
labels = torch.zeros((len(entities), len(entities), self.config.num_labels), dtype=torch.float32)
|
| 452 |
for relation in ex["relation"]:
|
|
|
|
| 464 |
for t in range(len(entities)):
|
| 465 |
if torch.all(labels[h][t] == 0):
|
| 466 |
labels[h][t][0] = 1
|
| 467 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 468 |
self.features.append({
|
| 469 |
"input_ids": input_ids,
|
| 470 |
"ent_pos": ent_pos,
|
| 471 |
"labels": labels,
|
| 472 |
})
|
| 473 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 474 |
logging.info(f"# of empty entity examples filtered: {num_empty_entity_examples}")
|
| 475 |
logging.info(f"# of empty label examples filtered: {num_empty_label_examples}")
|
| 476 |
logging.info(f"# of beyond-truncated-text labels filtered: {num_filtered_labels}")
|
|
|
|
| 480 |
|
| 481 |
def collate_fn(self, samples):
|
| 482 |
input_ids = [x["input_ids"] for x in samples]
|
|
|
|
| 483 |
ent_pos = [x["ent_pos"] for x in samples]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 484 |
labels = [x["labels"].view(-1, self.config.num_labels) for x in samples]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 485 |
|
| 486 |
input_ids = torch.tensor(input_ids, dtype=torch.long)
|
|
|
|
| 487 |
labels = torch.cat(labels, dim=0)
|
|
|
|
|
|
|
| 488 |
|
| 489 |
return {"input_ids": input_ids,
|
| 490 |
"ent_pos": ent_pos,
|
|
|
|
|
|
|
| 491 |
"labels": labels,
|
|
|
|
| 492 |
}
|
| 493 |
|
| 494 |
def __len__(self):
|
|
|
|
| 538 |
|
| 539 |
logging.info(f"Creating features from {self.args.data_dir}")
|
| 540 |
rootdir = osp.join(self.args.data_dir, f"{self.split}")
|
|
|
|
| 541 |
|
| 542 |
for json_file in tqdm(os.listdir(rootdir), desc="Converting examples to features"):
|
| 543 |
with open(osp.join(rootdir, json_file), encoding='utf-8') as f:
|
|
|
|
| 616 |
ent_pos, ent_ner = [], []
|
| 617 |
for ent in entities:
|
| 618 |
ent_pos.append([])
|
|
|
|
| 619 |
for ment in ent:
|
| 620 |
token_start, token_end = ment[3], ment[4]
|
| 621 |
ent_pos[-1].append((token_start, token_end))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 622 |
|
|
|
|
|
|
|
|
|
|
| 623 |
### labels ###
|
| 624 |
labels = torch.zeros((len(entities), len(entities), self.config.num_labels), dtype=torch.float32)
|
| 625 |
for relation in ex["relation"]:
|
|
|
|
| 638 |
if torch.all(labels[h][t] == 0):
|
| 639 |
labels[h][t][0] = 1
|
| 640 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 641 |
self.features.append({
|
| 642 |
"input_ids": input_ids,
|
| 643 |
"ent_pos": ent_pos,
|
| 644 |
"labels": labels,
|
| 645 |
})
|
| 646 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 647 |
logging.info(f"# of empty entity examples filtered: {num_empty_entity_examples}")
|
| 648 |
logging.info(f"# of empty label examples filtered: {num_empty_label_examples}")
|
| 649 |
logging.info(f"# of beyond-truncated-text labels filtered: {num_filtered_labels}")
|
|
|
|
| 654 |
input_ids = [x["input_ids"] for x in samples]
|
| 655 |
|
| 656 |
ent_pos = [x["ent_pos"] for x in samples]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 657 |
labels = [x["labels"].view(-1, self.config.num_labels) for x in samples]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 658 |
input_ids = torch.tensor(input_ids, dtype=torch.long)
|
|
|
|
| 659 |
labels = torch.cat(labels, dim=0)
|
|
|
|
|
|
|
| 660 |
|
| 661 |
return {"input_ids": input_ids,
|
| 662 |
"ent_pos": ent_pos,
|
|
|
|
|
|
|
| 663 |
"labels": labels,
|
|
|
|
| 664 |
}
|
| 665 |
|
| 666 |
def __len__(self):
|