Update files from the datasets library (from 1.16.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.16.0
- README.md +1 -0
- eraser_multi_rc.py +26 -27
README.md
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
---
|
|
|
|
| 2 |
languages:
|
| 3 |
- en
|
| 4 |
paperswithcode_id: null
|
|
|
|
| 1 |
---
|
| 2 |
+
pretty_name: Eraser Multi Rc
|
| 3 |
languages:
|
| 4 |
- en
|
| 5 |
paperswithcode_id: null
|
eraser_multi_rc.py
CHANGED
|
@@ -18,7 +18,6 @@
|
|
| 18 |
|
| 19 |
|
| 20 |
import json
|
| 21 |
-
import os
|
| 22 |
|
| 23 |
import datasets
|
| 24 |
|
|
@@ -73,46 +72,46 @@ class EraserMultiRc(datasets.GeneratorBasedBuilder):
|
|
| 73 |
def _split_generators(self, dl_manager):
|
| 74 |
"""Returns SplitGenerators."""
|
| 75 |
|
| 76 |
-
|
| 77 |
-
data_dir = os.path.join(dl_dir, "multirc")
|
| 78 |
return [
|
| 79 |
datasets.SplitGenerator(
|
| 80 |
name=datasets.Split.TRAIN,
|
| 81 |
# These kwargs will be passed to _generate_examples
|
| 82 |
-
gen_kwargs={"
|
| 83 |
),
|
| 84 |
datasets.SplitGenerator(
|
| 85 |
name=datasets.Split.VALIDATION,
|
| 86 |
# These kwargs will be passed to _generate_examples
|
| 87 |
-
gen_kwargs={"
|
| 88 |
),
|
| 89 |
datasets.SplitGenerator(
|
| 90 |
name=datasets.Split.TEST,
|
| 91 |
# These kwargs will be passed to _generate_examples
|
| 92 |
-
gen_kwargs={"
|
| 93 |
),
|
| 94 |
]
|
| 95 |
|
| 96 |
-
def _generate_examples(self,
|
| 97 |
"""Yields examples."""
|
| 98 |
|
| 99 |
-
multirc_dir =
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
|
|
|
|
|
| 18 |
|
| 19 |
|
| 20 |
import json
|
|
|
|
| 21 |
|
| 22 |
import datasets
|
| 23 |
|
|
|
|
| 72 |
def _split_generators(self, dl_manager):
|
| 73 |
"""Returns SplitGenerators."""
|
| 74 |
|
| 75 |
+
archive = dl_manager.download(_DOWNLOAD_URL)
|
|
|
|
| 76 |
return [
|
| 77 |
datasets.SplitGenerator(
|
| 78 |
name=datasets.Split.TRAIN,
|
| 79 |
# These kwargs will be passed to _generate_examples
|
| 80 |
+
gen_kwargs={"files": dl_manager.iter_archive(archive), "split_file": "multirc/train.jsonl"},
|
| 81 |
),
|
| 82 |
datasets.SplitGenerator(
|
| 83 |
name=datasets.Split.VALIDATION,
|
| 84 |
# These kwargs will be passed to _generate_examples
|
| 85 |
+
gen_kwargs={"files": dl_manager.iter_archive(archive), "split_file": "multirc/val.jsonl"},
|
| 86 |
),
|
| 87 |
datasets.SplitGenerator(
|
| 88 |
name=datasets.Split.TEST,
|
| 89 |
# These kwargs will be passed to _generate_examples
|
| 90 |
+
gen_kwargs={"files": dl_manager.iter_archive(archive), "split_file": "multirc/test.jsonl"},
|
| 91 |
),
|
| 92 |
]
|
| 93 |
|
| 94 |
+
def _generate_examples(self, files, split_file):
|
| 95 |
"""Yields examples."""
|
| 96 |
|
| 97 |
+
multirc_dir = "multirc/docs"
|
| 98 |
+
docs = {}
|
| 99 |
+
for path, f in files:
|
| 100 |
+
docs[path] = f.read().decode("utf-8")
|
| 101 |
+
for line in docs[split_file].splitlines():
|
| 102 |
+
row = json.loads(line)
|
| 103 |
+
evidences = []
|
| 104 |
+
|
| 105 |
+
for evidence in row["evidences"][0]:
|
| 106 |
+
docid = evidence["docid"]
|
| 107 |
+
evidences.append(evidence["text"])
|
| 108 |
+
|
| 109 |
+
passage_file = "/".join([multirc_dir, docid])
|
| 110 |
+
passage_text = docs[passage_file]
|
| 111 |
+
|
| 112 |
+
yield row["annotation_id"], {
|
| 113 |
+
"passage": passage_text,
|
| 114 |
+
"query_and_answer": row["query"],
|
| 115 |
+
"label": row["classification"],
|
| 116 |
+
"evidences": evidences,
|
| 117 |
+
}
|