Albert Villanova del Moral
commited on
Refactor code with baseline ranges
Browse files- open_access.py +36 -46
open_access.py
CHANGED
|
@@ -111,36 +111,27 @@ class OpenAccess(datasets.GeneratorBasedBuilder):
|
|
| 111 |
|
| 112 |
def _split_generators(self, dl_manager):
|
| 113 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
incremental_paths = {
|
| 115 |
"incremental_file_lists": [],
|
| 116 |
-
"incremental_archives": []
|
| 117 |
-
|
| 118 |
-
baseline_file_lists = []
|
| 119 |
-
baseline_archives = []
|
| 120 |
|
| 121 |
for subset in self.config.subsets:
|
| 122 |
url = _URL.format(subset=_SUBSETS[subset])
|
| 123 |
basename = f"{_SUBSETS[subset]}_txt."
|
| 124 |
# Baselines
|
| 125 |
baselines = [f"PMC00{i}xxxxxx.baseline.{_BASELINE_DATE}" for i in _BASELINE_RANGES[subset]]
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
try:
|
| 134 |
-
baseline_file_list = dl_manager.download(baseline_file_list_url)
|
| 135 |
-
except FileNotFoundError: # non-commercial PMC000xxxxxx baseline does not exist
|
| 136 |
-
continue
|
| 137 |
-
baseline_archive_url = f"{url}{basename}{baseline}.tar.gz"
|
| 138 |
-
try:
|
| 139 |
-
baseline_archive = dl_manager.download(baseline_archive_url)
|
| 140 |
-
except FileNotFoundError:
|
| 141 |
-
continue
|
| 142 |
-
baseline_file_lists.append(baseline_file_list)
|
| 143 |
-
baseline_archives.append(baseline_archive)
|
| 144 |
# Incremental
|
| 145 |
date_delta = datetime.date.today() - datetime.date.fromisoformat(_BASELINE_DATE)
|
| 146 |
incremental_dates = [
|
|
@@ -162,8 +153,10 @@ class OpenAccess(datasets.GeneratorBasedBuilder):
|
|
| 162 |
datasets.SplitGenerator(
|
| 163 |
name=datasets.Split.TRAIN,
|
| 164 |
gen_kwargs={
|
| 165 |
-
"baseline_file_lists": baseline_file_lists,
|
| 166 |
-
"baseline_archives": [
|
|
|
|
|
|
|
| 167 |
"incremental_file_lists": incremental_paths["incremental_file_lists"],
|
| 168 |
"incremental_archives": [
|
| 169 |
dl_manager.iter_archive(archive) for archive in incremental_paths["incremental_archives"]
|
|
@@ -176,28 +169,25 @@ class OpenAccess(datasets.GeneratorBasedBuilder):
|
|
| 176 |
key = 0
|
| 177 |
# Baselines
|
| 178 |
for baseline_file_list, baseline_archive in zip(baseline_file_lists, baseline_archives):
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
key += 1
|
| 199 |
-
except FileNotFoundError: # non-commercial PMC000xxxxxx baseline does not exist
|
| 200 |
-
continue
|
| 201 |
# Incrementals
|
| 202 |
if incremental_file_lists:
|
| 203 |
for incremental_file_list, incremental_archive in zip(incremental_file_lists, incremental_archives):
|
|
|
|
| 111 |
|
| 112 |
def _split_generators(self, dl_manager):
|
| 113 |
|
| 114 |
+
baseline_paths = {
|
| 115 |
+
"baseline_file_lists": [],
|
| 116 |
+
"baseline_archives": [],
|
| 117 |
+
}
|
| 118 |
incremental_paths = {
|
| 119 |
"incremental_file_lists": [],
|
| 120 |
+
"incremental_archives": [],
|
| 121 |
+
}
|
|
|
|
|
|
|
| 122 |
|
| 123 |
for subset in self.config.subsets:
|
| 124 |
url = _URL.format(subset=_SUBSETS[subset])
|
| 125 |
basename = f"{_SUBSETS[subset]}_txt."
|
| 126 |
# Baselines
|
| 127 |
baselines = [f"PMC00{i}xxxxxx.baseline.{_BASELINE_DATE}" for i in _BASELINE_RANGES[subset]]
|
| 128 |
+
baseline_urls = {
|
| 129 |
+
"baseline_file_lists": [f"{url}{basename}{baseline}.filelist.csv" for baseline in baselines],
|
| 130 |
+
"baseline_archives": [f"{url}{basename}{baseline}.tar.gz" for baseline in baselines],
|
| 131 |
+
}
|
| 132 |
+
paths = dl_manager.download(baseline_urls)
|
| 133 |
+
baseline_paths["baseline_file_lists"].extend(paths["baseline_file_lists"])
|
| 134 |
+
baseline_paths["baseline_archives"].extend(paths["baseline_archives"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 135 |
# Incremental
|
| 136 |
date_delta = datetime.date.today() - datetime.date.fromisoformat(_BASELINE_DATE)
|
| 137 |
incremental_dates = [
|
|
|
|
| 153 |
datasets.SplitGenerator(
|
| 154 |
name=datasets.Split.TRAIN,
|
| 155 |
gen_kwargs={
|
| 156 |
+
"baseline_file_lists": baseline_paths["baseline_file_lists"],
|
| 157 |
+
"baseline_archives": [
|
| 158 |
+
dl_manager.iter_archive(archive) for archive in baseline_paths["baseline_archives"]
|
| 159 |
+
],
|
| 160 |
"incremental_file_lists": incremental_paths["incremental_file_lists"],
|
| 161 |
"incremental_archives": [
|
| 162 |
dl_manager.iter_archive(archive) for archive in incremental_paths["incremental_archives"]
|
|
|
|
| 169 |
key = 0
|
| 170 |
# Baselines
|
| 171 |
for baseline_file_list, baseline_archive in zip(baseline_file_lists, baseline_archives):
|
| 172 |
+
baselines = pd.read_csv(baseline_file_list, index_col="Article File").to_dict(orient="index")
|
| 173 |
+
for path, file in baseline_archive:
|
| 174 |
+
data = baselines.pop(path)
|
| 175 |
+
content = file.read()
|
| 176 |
+
try:
|
| 177 |
+
text = content.decode("utf-8").strip()
|
| 178 |
+
except UnicodeDecodeError as e:
|
| 179 |
+
text = content.decode("latin-1").strip()
|
| 180 |
+
data = {
|
| 181 |
+
"text": text,
|
| 182 |
+
"pmid": data["PMID"],
|
| 183 |
+
"accession_id": data["AccessionID"],
|
| 184 |
+
"license": data["License"],
|
| 185 |
+
"last_updated": data["LastUpdated (YYYY-MM-DD HH:MM:SS)"],
|
| 186 |
+
"retracted": data["Retracted"],
|
| 187 |
+
"citation": data["Article Citation"],
|
| 188 |
+
}
|
| 189 |
+
yield key, data
|
| 190 |
+
key += 1
|
|
|
|
|
|
|
|
|
|
| 191 |
# Incrementals
|
| 192 |
if incremental_file_lists:
|
| 193 |
for incremental_file_list, incremental_archive in zip(incremental_file_lists, incremental_archives):
|