Datasets:
Convert dataset to Parquet (#3)
Browse files- Convert dataset to Parquet (abdeb74746b0c4d98b421e57be1c2cb54f2ce8c6)
- Add ar_to_es data files (51aafbdbb45cf6b8e824eda9c62337089865161b)
- Add ar_to_fr data files (198836abe38385a0aa1e66bcc12c348fd227a500)
- Add ar_to_ru data files (b758ce2716cc97c0e3db061c962a01b092add13f)
- Add ar_to_zh data files (68509b83a975e37a11270d72f78055b640c0a279)
- Add en_to_es data files (d3d56ce259ff84865be3d95676036f3693ab37ae)
- Add en_to_fr data files (0bd52f050d709fb5d392cef184e16f573c7f227e)
- Add en_to_ru data files (fee77c3bcc51815cd6746dd5cb71067dd19c7c39)
- Add en_to_zh data files (7a20303bf6901ec4cc58e80e82177e6e11f5b168)
- Add es_to_fr data files (290cc29cd9cdbc3c4ce24dda260bc98d7f4a3690)
- Add es_to_ru data files (9369497693d2f93aba3176bb191d9655da5e4541)
- Add es_to_zh data files (181f93b86a5e0659733f866341c23a933b936ae5)
- Add fr_to_ru data files (a96a77ec66701448f7c08944c22262b330cecb0a)
- Add fr_to_zh data files (af074c99e3a982ac6a12cb6edc345b9045eb0392)
- Add ru_to_zh data files (d8ceda69e75dc94b8ebb019104fedfbaa35c8d83)
- Delete loading script (26d179e8cf1f30e4d28aa1d5d7a043af42e2b3fd)
- README.md +122 -62
- ar_to_en/train-00000-of-00001.parquet +3 -0
- ar_to_es/train-00000-of-00001.parquet +3 -0
- ar_to_fr/train-00000-of-00001.parquet +3 -0
- ar_to_ru/train-00000-of-00001.parquet +3 -0
- ar_to_zh/train-00000-of-00001.parquet +3 -0
- en_to_es/train-00000-of-00001.parquet +3 -0
- en_to_fr/train-00000-of-00001.parquet +3 -0
- en_to_ru/train-00000-of-00001.parquet +3 -0
- en_to_zh/train-00000-of-00001.parquet +3 -0
- es_to_fr/train-00000-of-00001.parquet +3 -0
- es_to_ru/train-00000-of-00001.parquet +3 -0
- es_to_zh/train-00000-of-00001.parquet +3 -0
- fr_to_ru/train-00000-of-00001.parquet +3 -0
- fr_to_zh/train-00000-of-00001.parquet +3 -0
- ru_to_zh/train-00000-of-00001.parquet +3 -0
- un_ga.py +0 -154
|
@@ -21,8 +21,23 @@ source_datasets:
|
|
| 21 |
task_categories:
|
| 22 |
- translation
|
| 23 |
task_ids: []
|
| 24 |
-
paperswithcode_id: null
|
| 25 |
pretty_name: UnGa
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
dataset_info:
|
| 27 |
- config_name: ar_to_en
|
| 28 |
features:
|
|
@@ -36,10 +51,10 @@ dataset_info:
|
|
| 36 |
- en
|
| 37 |
splits:
|
| 38 |
- name: train
|
| 39 |
-
num_bytes:
|
| 40 |
num_examples: 74067
|
| 41 |
-
download_size:
|
| 42 |
-
dataset_size:
|
| 43 |
- config_name: ar_to_es
|
| 44 |
features:
|
| 45 |
- name: id
|
|
@@ -52,10 +67,10 @@ dataset_info:
|
|
| 52 |
- es
|
| 53 |
splits:
|
| 54 |
- name: train
|
| 55 |
-
num_bytes:
|
| 56 |
num_examples: 74067
|
| 57 |
-
download_size:
|
| 58 |
-
dataset_size:
|
| 59 |
- config_name: ar_to_fr
|
| 60 |
features:
|
| 61 |
- name: id
|
|
@@ -68,10 +83,10 @@ dataset_info:
|
|
| 68 |
- fr
|
| 69 |
splits:
|
| 70 |
- name: train
|
| 71 |
-
num_bytes:
|
| 72 |
num_examples: 74067
|
| 73 |
-
download_size:
|
| 74 |
-
dataset_size:
|
| 75 |
- config_name: ar_to_ru
|
| 76 |
features:
|
| 77 |
- name: id
|
|
@@ -84,10 +99,10 @@ dataset_info:
|
|
| 84 |
- ru
|
| 85 |
splits:
|
| 86 |
- name: train
|
| 87 |
-
num_bytes:
|
| 88 |
num_examples: 74067
|
| 89 |
-
download_size:
|
| 90 |
-
dataset_size:
|
| 91 |
- config_name: ar_to_zh
|
| 92 |
features:
|
| 93 |
- name: id
|
|
@@ -100,10 +115,10 @@ dataset_info:
|
|
| 100 |
- zh
|
| 101 |
splits:
|
| 102 |
- name: train
|
| 103 |
-
num_bytes:
|
| 104 |
num_examples: 74067
|
| 105 |
-
download_size:
|
| 106 |
-
dataset_size:
|
| 107 |
- config_name: en_to_es
|
| 108 |
features:
|
| 109 |
- name: id
|
|
@@ -116,10 +131,10 @@ dataset_info:
|
|
| 116 |
- es
|
| 117 |
splits:
|
| 118 |
- name: train
|
| 119 |
-
num_bytes:
|
| 120 |
num_examples: 74067
|
| 121 |
-
download_size:
|
| 122 |
-
dataset_size:
|
| 123 |
- config_name: en_to_fr
|
| 124 |
features:
|
| 125 |
- name: id
|
|
@@ -132,10 +147,10 @@ dataset_info:
|
|
| 132 |
- fr
|
| 133 |
splits:
|
| 134 |
- name: train
|
| 135 |
-
num_bytes:
|
| 136 |
num_examples: 74067
|
| 137 |
-
download_size:
|
| 138 |
-
dataset_size:
|
| 139 |
- config_name: en_to_ru
|
| 140 |
features:
|
| 141 |
- name: id
|
|
@@ -148,10 +163,10 @@ dataset_info:
|
|
| 148 |
- ru
|
| 149 |
splits:
|
| 150 |
- name: train
|
| 151 |
-
num_bytes:
|
| 152 |
num_examples: 74067
|
| 153 |
-
download_size:
|
| 154 |
-
dataset_size:
|
| 155 |
- config_name: en_to_zh
|
| 156 |
features:
|
| 157 |
- name: id
|
|
@@ -164,10 +179,10 @@ dataset_info:
|
|
| 164 |
- zh
|
| 165 |
splits:
|
| 166 |
- name: train
|
| 167 |
-
num_bytes:
|
| 168 |
num_examples: 74067
|
| 169 |
-
download_size:
|
| 170 |
-
dataset_size:
|
| 171 |
- config_name: es_to_fr
|
| 172 |
features:
|
| 173 |
- name: id
|
|
@@ -180,10 +195,10 @@ dataset_info:
|
|
| 180 |
- fr
|
| 181 |
splits:
|
| 182 |
- name: train
|
| 183 |
-
num_bytes:
|
| 184 |
num_examples: 74067
|
| 185 |
-
download_size:
|
| 186 |
-
dataset_size:
|
| 187 |
- config_name: es_to_ru
|
| 188 |
features:
|
| 189 |
- name: id
|
|
@@ -196,10 +211,10 @@ dataset_info:
|
|
| 196 |
- ru
|
| 197 |
splits:
|
| 198 |
- name: train
|
| 199 |
-
num_bytes:
|
| 200 |
num_examples: 74067
|
| 201 |
-
download_size:
|
| 202 |
-
dataset_size:
|
| 203 |
- config_name: es_to_zh
|
| 204 |
features:
|
| 205 |
- name: id
|
|
@@ -212,10 +227,10 @@ dataset_info:
|
|
| 212 |
- zh
|
| 213 |
splits:
|
| 214 |
- name: train
|
| 215 |
-
num_bytes:
|
| 216 |
num_examples: 74067
|
| 217 |
-
download_size:
|
| 218 |
-
dataset_size:
|
| 219 |
- config_name: fr_to_ru
|
| 220 |
features:
|
| 221 |
- name: id
|
|
@@ -228,10 +243,10 @@ dataset_info:
|
|
| 228 |
- ru
|
| 229 |
splits:
|
| 230 |
- name: train
|
| 231 |
-
num_bytes:
|
| 232 |
num_examples: 74067
|
| 233 |
-
download_size:
|
| 234 |
-
dataset_size:
|
| 235 |
- config_name: fr_to_zh
|
| 236 |
features:
|
| 237 |
- name: id
|
|
@@ -244,10 +259,10 @@ dataset_info:
|
|
| 244 |
- zh
|
| 245 |
splits:
|
| 246 |
- name: train
|
| 247 |
-
num_bytes:
|
| 248 |
num_examples: 74067
|
| 249 |
-
download_size:
|
| 250 |
-
dataset_size:
|
| 251 |
- config_name: ru_to_zh
|
| 252 |
features:
|
| 253 |
- name: id
|
|
@@ -260,26 +275,71 @@ dataset_info:
|
|
| 260 |
- zh
|
| 261 |
splits:
|
| 262 |
- name: train
|
| 263 |
-
num_bytes:
|
| 264 |
num_examples: 74067
|
| 265 |
-
download_size:
|
| 266 |
-
dataset_size:
|
| 267 |
-
|
| 268 |
-
-
|
| 269 |
-
|
| 270 |
-
-
|
| 271 |
-
|
| 272 |
-
-
|
| 273 |
-
|
| 274 |
-
-
|
| 275 |
-
|
| 276 |
-
-
|
| 277 |
-
|
| 278 |
-
-
|
| 279 |
-
|
| 280 |
-
-
|
| 281 |
-
|
| 282 |
-
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 283 |
---
|
| 284 |
|
| 285 |
# Dataset Card for [Dataset Name]
|
|
|
|
| 21 |
task_categories:
|
| 22 |
- translation
|
| 23 |
task_ids: []
|
|
|
|
| 24 |
pretty_name: UnGa
|
| 25 |
+
config_names:
|
| 26 |
+
- ar-to-en
|
| 27 |
+
- ar-to-es
|
| 28 |
+
- ar-to-fr
|
| 29 |
+
- ar-to-ru
|
| 30 |
+
- ar-to-zh
|
| 31 |
+
- en-to-es
|
| 32 |
+
- en-to-fr
|
| 33 |
+
- en-to-ru
|
| 34 |
+
- en-to-zh
|
| 35 |
+
- es-to-fr
|
| 36 |
+
- es-to-ru
|
| 37 |
+
- es-to-zh
|
| 38 |
+
- fr-to-ru
|
| 39 |
+
- fr-to-zh
|
| 40 |
+
- ru-to-zh
|
| 41 |
dataset_info:
|
| 42 |
- config_name: ar_to_en
|
| 43 |
features:
|
|
|
|
| 51 |
- en
|
| 52 |
splits:
|
| 53 |
- name: train
|
| 54 |
+
num_bytes: 53122776
|
| 55 |
num_examples: 74067
|
| 56 |
+
download_size: 21418697
|
| 57 |
+
dataset_size: 53122776
|
| 58 |
- config_name: ar_to_es
|
| 59 |
features:
|
| 60 |
- name: id
|
|
|
|
| 67 |
- es
|
| 68 |
splits:
|
| 69 |
- name: train
|
| 70 |
+
num_bytes: 55728615
|
| 71 |
num_examples: 74067
|
| 72 |
+
download_size: 22724976
|
| 73 |
+
dataset_size: 55728615
|
| 74 |
- config_name: ar_to_fr
|
| 75 |
features:
|
| 76 |
- name: id
|
|
|
|
| 83 |
- fr
|
| 84 |
splits:
|
| 85 |
- name: train
|
| 86 |
+
num_bytes: 55930802
|
| 87 |
num_examples: 74067
|
| 88 |
+
download_size: 23035904
|
| 89 |
+
dataset_size: 55930802
|
| 90 |
- config_name: ar_to_ru
|
| 91 |
features:
|
| 92 |
- name: id
|
|
|
|
| 99 |
- ru
|
| 100 |
splits:
|
| 101 |
- name: train
|
| 102 |
+
num_bytes: 72657625
|
| 103 |
num_examples: 74067
|
| 104 |
+
download_size: 28279669
|
| 105 |
+
dataset_size: 72657625
|
| 106 |
- config_name: ar_to_zh
|
| 107 |
features:
|
| 108 |
- name: id
|
|
|
|
| 115 |
- zh
|
| 116 |
splits:
|
| 117 |
- name: train
|
| 118 |
+
num_bytes: 48217579
|
| 119 |
num_examples: 74067
|
| 120 |
+
download_size: 20391116
|
| 121 |
+
dataset_size: 48217579
|
| 122 |
- config_name: en_to_es
|
| 123 |
features:
|
| 124 |
- name: id
|
|
|
|
| 131 |
- es
|
| 132 |
splits:
|
| 133 |
- name: train
|
| 134 |
+
num_bytes: 45358770
|
| 135 |
num_examples: 74067
|
| 136 |
+
download_size: 19229141
|
| 137 |
+
dataset_size: 45358770
|
| 138 |
- config_name: en_to_fr
|
| 139 |
features:
|
| 140 |
- name: id
|
|
|
|
| 147 |
- fr
|
| 148 |
splits:
|
| 149 |
- name: train
|
| 150 |
+
num_bytes: 45560957
|
| 151 |
num_examples: 74067
|
| 152 |
+
download_size: 19540063
|
| 153 |
+
dataset_size: 45560957
|
| 154 |
- config_name: en_to_ru
|
| 155 |
features:
|
| 156 |
- name: id
|
|
|
|
| 163 |
- ru
|
| 164 |
splits:
|
| 165 |
- name: train
|
| 166 |
+
num_bytes: 62287780
|
| 167 |
num_examples: 74067
|
| 168 |
+
download_size: 24783812
|
| 169 |
+
dataset_size: 62287780
|
| 170 |
- config_name: en_to_zh
|
| 171 |
features:
|
| 172 |
- name: id
|
|
|
|
| 179 |
- zh
|
| 180 |
splits:
|
| 181 |
- name: train
|
| 182 |
+
num_bytes: 37847734
|
| 183 |
num_examples: 74067
|
| 184 |
+
download_size: 16895275
|
| 185 |
+
dataset_size: 37847734
|
| 186 |
- config_name: es_to_fr
|
| 187 |
features:
|
| 188 |
- name: id
|
|
|
|
| 195 |
- fr
|
| 196 |
splits:
|
| 197 |
- name: train
|
| 198 |
+
num_bytes: 48166796
|
| 199 |
num_examples: 74067
|
| 200 |
+
download_size: 20846355
|
| 201 |
+
dataset_size: 48166796
|
| 202 |
- config_name: es_to_ru
|
| 203 |
features:
|
| 204 |
- name: id
|
|
|
|
| 211 |
- ru
|
| 212 |
splits:
|
| 213 |
- name: train
|
| 214 |
+
num_bytes: 64893619
|
| 215 |
num_examples: 74067
|
| 216 |
+
download_size: 26090092
|
| 217 |
+
dataset_size: 64893619
|
| 218 |
- config_name: es_to_zh
|
| 219 |
features:
|
| 220 |
- name: id
|
|
|
|
| 227 |
- zh
|
| 228 |
splits:
|
| 229 |
- name: train
|
| 230 |
+
num_bytes: 40453573
|
| 231 |
num_examples: 74067
|
| 232 |
+
download_size: 18201560
|
| 233 |
+
dataset_size: 40453573
|
| 234 |
- config_name: fr_to_ru
|
| 235 |
features:
|
| 236 |
- name: id
|
|
|
|
| 243 |
- ru
|
| 244 |
splits:
|
| 245 |
- name: train
|
| 246 |
+
num_bytes: 65095806
|
| 247 |
num_examples: 74067
|
| 248 |
+
download_size: 26401015
|
| 249 |
+
dataset_size: 65095806
|
| 250 |
- config_name: fr_to_zh
|
| 251 |
features:
|
| 252 |
- name: id
|
|
|
|
| 259 |
- zh
|
| 260 |
splits:
|
| 261 |
- name: train
|
| 262 |
+
num_bytes: 40655760
|
| 263 |
num_examples: 74067
|
| 264 |
+
download_size: 18512482
|
| 265 |
+
dataset_size: 40655760
|
| 266 |
- config_name: ru_to_zh
|
| 267 |
features:
|
| 268 |
- name: id
|
|
|
|
| 275 |
- zh
|
| 276 |
splits:
|
| 277 |
- name: train
|
| 278 |
+
num_bytes: 57382583
|
| 279 |
num_examples: 74067
|
| 280 |
+
download_size: 23756229
|
| 281 |
+
dataset_size: 57382583
|
| 282 |
+
configs:
|
| 283 |
+
- config_name: ar_to_en
|
| 284 |
+
data_files:
|
| 285 |
+
- split: train
|
| 286 |
+
path: ar_to_en/train-*
|
| 287 |
+
- config_name: ar_to_es
|
| 288 |
+
data_files:
|
| 289 |
+
- split: train
|
| 290 |
+
path: ar_to_es/train-*
|
| 291 |
+
- config_name: ar_to_fr
|
| 292 |
+
data_files:
|
| 293 |
+
- split: train
|
| 294 |
+
path: ar_to_fr/train-*
|
| 295 |
+
- config_name: ar_to_ru
|
| 296 |
+
data_files:
|
| 297 |
+
- split: train
|
| 298 |
+
path: ar_to_ru/train-*
|
| 299 |
+
- config_name: ar_to_zh
|
| 300 |
+
data_files:
|
| 301 |
+
- split: train
|
| 302 |
+
path: ar_to_zh/train-*
|
| 303 |
+
- config_name: en_to_es
|
| 304 |
+
data_files:
|
| 305 |
+
- split: train
|
| 306 |
+
path: en_to_es/train-*
|
| 307 |
+
- config_name: en_to_fr
|
| 308 |
+
data_files:
|
| 309 |
+
- split: train
|
| 310 |
+
path: en_to_fr/train-*
|
| 311 |
+
- config_name: en_to_ru
|
| 312 |
+
data_files:
|
| 313 |
+
- split: train
|
| 314 |
+
path: en_to_ru/train-*
|
| 315 |
+
- config_name: en_to_zh
|
| 316 |
+
data_files:
|
| 317 |
+
- split: train
|
| 318 |
+
path: en_to_zh/train-*
|
| 319 |
+
- config_name: es_to_fr
|
| 320 |
+
data_files:
|
| 321 |
+
- split: train
|
| 322 |
+
path: es_to_fr/train-*
|
| 323 |
+
- config_name: es_to_ru
|
| 324 |
+
data_files:
|
| 325 |
+
- split: train
|
| 326 |
+
path: es_to_ru/train-*
|
| 327 |
+
- config_name: es_to_zh
|
| 328 |
+
data_files:
|
| 329 |
+
- split: train
|
| 330 |
+
path: es_to_zh/train-*
|
| 331 |
+
- config_name: fr_to_ru
|
| 332 |
+
data_files:
|
| 333 |
+
- split: train
|
| 334 |
+
path: fr_to_ru/train-*
|
| 335 |
+
- config_name: fr_to_zh
|
| 336 |
+
data_files:
|
| 337 |
+
- split: train
|
| 338 |
+
path: fr_to_zh/train-*
|
| 339 |
+
- config_name: ru_to_zh
|
| 340 |
+
data_files:
|
| 341 |
+
- split: train
|
| 342 |
+
path: ru_to_zh/train-*
|
| 343 |
---
|
| 344 |
|
| 345 |
# Dataset Card for [Dataset Name]
|
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ebaa0a103b98e81755881eff1e83db1f462ccc0491186646997c80539769966a
|
| 3 |
+
size 21418697
|
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:45c8eddd68abbf83a503c7bccc4602d483d63874dc2ec2ca9a86d4aab5495f2d
|
| 3 |
+
size 22724976
|
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2bdfc0c018fdb95678927926168d184b01aeb83e0e8387592df6d262dfbcff06
|
| 3 |
+
size 23035904
|
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:675449cb8d1f47775fb4ee2f9022d96f0c7dfb824cb9aea209d1d5690dea7197
|
| 3 |
+
size 28279669
|
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:033ea4769cde695f63fb71eba07d114cdf6a18d2d2cdeeb79760393fdf00192b
|
| 3 |
+
size 20391116
|
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d3a5797a9d27e43af9896e70301b8c93648a055f11f6c3d2e4c9a9cea54e04d4
|
| 3 |
+
size 19229141
|
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c6e883d727805f593a20f757859efaec3589d005c246296c327490a0debbcbac
|
| 3 |
+
size 19540063
|
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:de3b0843d925d1f996d378d6dede0ba406a357fabc067cd1da98b8fd2d6b6044
|
| 3 |
+
size 24783812
|
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8ceedf184299c31fb9ecee597f3d14b36818015505bc09c6ce7a5842a14a4853
|
| 3 |
+
size 16895275
|
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d90dbe39568deada452914c7868a6b8844b9e9ea40d71a72d2dd8675680f47e2
|
| 3 |
+
size 20846355
|
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:60d46f6cda0c2b9b2593ac67c2ba27923bbdeb3bdffc5eb45537b21ec102c826
|
| 3 |
+
size 26090092
|
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c448cf6e67b275cf57fe3ce551ec2037473b1ed433dbfc7787c78bcc851f2227
|
| 3 |
+
size 18201560
|
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4677dbeb76cee465a6098078a06303e67b404c5141cf5f5a814c55bba0647a41
|
| 3 |
+
size 26401015
|
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ffc9f7ff7153a2dcf7b48bd6f21502298e57ae0e5622499128649698fc40d189
|
| 3 |
+
size 18512482
|
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:71ddd1090f02b980ee99591204a702093723676be02801a8cd6277c63ffaceb1
|
| 3 |
+
size 23756229
|
|
@@ -1,154 +0,0 @@
|
|
| 1 |
-
# coding=utf-8
|
| 2 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
| 3 |
-
#
|
| 4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
-
# you may not use this file except in compliance with the License.
|
| 6 |
-
# You may obtain a copy of the License at
|
| 7 |
-
#
|
| 8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
-
#
|
| 10 |
-
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
-
# See the License for the specific language governing permissions and
|
| 14 |
-
# limitations under the License.
|
| 15 |
-
"""United nations general assembly resolutions: A six-language parallel corpus"""
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
import os
|
| 19 |
-
|
| 20 |
-
import datasets
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
_CITATION = """\
|
| 24 |
-
@inproceedings{title = "United Nations General Assembly Resolutions: a six-language parallel corpus",
|
| 25 |
-
abstract = "In this paper we describe a six-ways parallel public-domain corpus consisting of 2100 United Nations General Assembly Resolutions with translations in the six official languages of the United Nations, with an average of around 3 million tokens per language. The corpus is available in a preprocessed, formatting-normalized TMX format with paragraphs aligned across multiple languages. We describe the background to the corpus and its content, the process of its construction, and some of its interesting properties.",
|
| 26 |
-
author = "Alexandre Rafalovitch and Robert Dale",
|
| 27 |
-
year = "2009",
|
| 28 |
-
language = "English",
|
| 29 |
-
booktitle = "MT Summit XII proceedings",
|
| 30 |
-
publisher = "International Association of Machine Translation",
|
| 31 |
-
}"""
|
| 32 |
-
|
| 33 |
-
_HOMEPAGE = "http://opus.nlpl.eu/UN.php"
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
_LICENSE = ""
|
| 37 |
-
|
| 38 |
-
_VALID_LANGUAGE_PAIRS = {
|
| 39 |
-
("ar", "en"): "http://opus.nlpl.eu/download.php?f=UN/v20090831/moses/ar-en.txt.zip",
|
| 40 |
-
("ar", "es"): "http://opus.nlpl.eu/download.php?f=UN/v20090831/moses/ar-es.txt.zip",
|
| 41 |
-
("ar", "fr"): "http://opus.nlpl.eu/download.php?f=UN/v20090831/moses/ar-fr.txt.zip",
|
| 42 |
-
("ar", "ru"): "http://opus.nlpl.eu/download.php?f=UN/v20090831/moses/ar-ru.txt.zip",
|
| 43 |
-
("ar", "zh"): "http://opus.nlpl.eu/download.php?f=UN/v20090831/moses/ar-zh.txt.zip",
|
| 44 |
-
("en", "es"): "http://opus.nlpl.eu/download.php?f=UN/v20090831/moses/en-es.txt.zip",
|
| 45 |
-
("en", "fr"): "http://opus.nlpl.eu/download.php?f=UN/v20090831/moses/en-fr.txt.zip",
|
| 46 |
-
("en", "ru"): "http://opus.nlpl.eu/download.php?f=UN/v20090831/moses/en-ru.txt.zip",
|
| 47 |
-
("en", "zh"): "http://opus.nlpl.eu/download.php?f=UN/v20090831/moses/en-zh.txt.zip",
|
| 48 |
-
("es", "fr"): "http://opus.nlpl.eu/download.php?f=UN/v20090831/moses/es-fr.txt.zip",
|
| 49 |
-
("es", "ru"): "http://opus.nlpl.eu/download.php?f=UN/v20090831/moses/es-ru.txt.zip",
|
| 50 |
-
("es", "zh"): "http://opus.nlpl.eu/download.php?f=UN/v20090831/moses/es-zh.txt.zip",
|
| 51 |
-
("fr", "ru"): "http://opus.nlpl.eu/download.php?f=UN/v20090831/moses/fr-ru.txt.zip",
|
| 52 |
-
("fr", "zh"): "http://opus.nlpl.eu/download.php?f=UN/v20090831/moses/fr-zh.txt.zip",
|
| 53 |
-
("ru", "zh"): "http://opus.nlpl.eu/download.php?f=UN/v20090831/moses/ru-zh.txt.zip",
|
| 54 |
-
}
|
| 55 |
-
|
| 56 |
-
_VERSION = "2.0.0"
|
| 57 |
-
|
| 58 |
-
_DESCRIPTION = """\
|
| 59 |
-
United nations general assembly resolutions: A six-language parallel corpus.
|
| 60 |
-
This is a collection of translated documents from the United Nations originally compiled into a translation memory by Alexandre Rafalovitch, Robert Dale (see http://uncorpora.org).
|
| 61 |
-
6 languages, 15 bitexts
|
| 62 |
-
total number of files: 6
|
| 63 |
-
total number of tokens: 18.87M
|
| 64 |
-
total number of sentence fragments: 0.44M
|
| 65 |
-
"""
|
| 66 |
-
|
| 67 |
-
_BASE_NAME = "UN.{}-{}.{}"
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
class UnGaConfig(datasets.BuilderConfig):
|
| 71 |
-
"""BuilderConfig for United nations general assembly resolutions: A six-language parallel corpus"""
|
| 72 |
-
|
| 73 |
-
def __init__(self, language_pair=(None, None), **kwargs):
|
| 74 |
-
"""BuilderConfig for United nations general assembly resolutions: A six-language parallel corpus.
|
| 75 |
-
The first language in `language_pair` should consist of two strings joined by
|
| 76 |
-
an underscore (e.g. "en-tr").
|
| 77 |
-
Args:
|
| 78 |
-
language_pair: pair of languages that will be used for translation.
|
| 79 |
-
**kwargs: keyword arguments forwarded to super.
|
| 80 |
-
"""
|
| 81 |
-
name = "%s_to_%s" % (language_pair[0], language_pair[1])
|
| 82 |
-
|
| 83 |
-
description = ("Translation dataset from %s to %s or %s to %s.") % (
|
| 84 |
-
language_pair[0],
|
| 85 |
-
language_pair[1],
|
| 86 |
-
language_pair[1],
|
| 87 |
-
language_pair[0],
|
| 88 |
-
)
|
| 89 |
-
super(UnGaConfig, self).__init__(
|
| 90 |
-
name=name, description=description, version=datasets.Version(_VERSION, ""), **kwargs
|
| 91 |
-
)
|
| 92 |
-
|
| 93 |
-
# Validate language pair.
|
| 94 |
-
assert language_pair in _VALID_LANGUAGE_PAIRS, (
|
| 95 |
-
"Config language pair (%s, " "%s) not supported"
|
| 96 |
-
) % language_pair
|
| 97 |
-
|
| 98 |
-
self.language_pair = language_pair
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
class UnGa(datasets.GeneratorBasedBuilder):
|
| 102 |
-
|
| 103 |
-
BUILDER_CONFIGS = [
|
| 104 |
-
UnGaConfig(
|
| 105 |
-
language_pair=pair,
|
| 106 |
-
)
|
| 107 |
-
for pair in _VALID_LANGUAGE_PAIRS.keys()
|
| 108 |
-
]
|
| 109 |
-
|
| 110 |
-
BUILDER_CONFIG_CLASS = UnGaConfig
|
| 111 |
-
|
| 112 |
-
def _info(self):
|
| 113 |
-
return datasets.DatasetInfo(
|
| 114 |
-
description=_DESCRIPTION,
|
| 115 |
-
features=datasets.Features(
|
| 116 |
-
{
|
| 117 |
-
"id": datasets.Value("string"),
|
| 118 |
-
"translation": datasets.Translation(languages=tuple(self.config.language_pair)),
|
| 119 |
-
},
|
| 120 |
-
),
|
| 121 |
-
supervised_keys=None,
|
| 122 |
-
homepage=_HOMEPAGE,
|
| 123 |
-
citation=_CITATION,
|
| 124 |
-
)
|
| 125 |
-
|
| 126 |
-
def _split_generators(self, dl_manager):
|
| 127 |
-
download_url = _VALID_LANGUAGE_PAIRS.get(tuple(self.config.language_pair))
|
| 128 |
-
path = dl_manager.download_and_extract(download_url)
|
| 129 |
-
return [
|
| 130 |
-
datasets.SplitGenerator(
|
| 131 |
-
name=datasets.Split.TRAIN,
|
| 132 |
-
gen_kwargs={"datapath": path},
|
| 133 |
-
)
|
| 134 |
-
]
|
| 135 |
-
|
| 136 |
-
def _generate_examples(self, datapath):
|
| 137 |
-
lang1, lang2 = self.config.language_pair
|
| 138 |
-
lang1_file = _BASE_NAME.format(lang1, lang2, lang1)
|
| 139 |
-
lang2_file = _BASE_NAME.format(lang1, lang2, lang2)
|
| 140 |
-
lang1_path = os.path.join(datapath, lang1_file)
|
| 141 |
-
lang2_path = os.path.join(datapath, lang2_file)
|
| 142 |
-
|
| 143 |
-
with open(lang1_path, encoding="utf-8") as f1, open(lang2_path, encoding="utf-8") as f2:
|
| 144 |
-
for sentence_counter, (x, y) in enumerate(zip(f1, f2)):
|
| 145 |
-
x = x.strip()
|
| 146 |
-
y = y.strip()
|
| 147 |
-
result = (
|
| 148 |
-
sentence_counter,
|
| 149 |
-
{
|
| 150 |
-
"id": str(sentence_counter),
|
| 151 |
-
"translation": {lang1: x, lang2: y},
|
| 152 |
-
},
|
| 153 |
-
)
|
| 154 |
-
yield result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|