1024m commited on
Commit
069b328
·
verified ·
1 Parent(s): d6ed981

Upload xlsum.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. xlsum.py +163 -0
xlsum.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """XL-Sum abstractive summarization dataset."""
2
+
3
+
4
+ import json
5
+ import os
6
+
7
+ import datasets
8
+
9
+
10
+ _CITATION = """\
11
+ @inproceedings{hasan-etal-2021-xl,
12
+ title = "{XL}-Sum: Large-Scale Multilingual Abstractive Summarization for 44 Languages",
13
+ author = "Hasan, Tahmid and
14
+ Bhattacharjee, Abhik and
15
+ Islam, Md. Saiful and
16
+ Mubasshir, Kazi and
17
+ Li, Yuan-Fang and
18
+ Kang, Yong-Bin and
19
+ Rahman, M. Sohel and
20
+ Shahriyar, Rifat",
21
+ booktitle = "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021",
22
+ month = aug,
23
+ year = "2021",
24
+ address = "Online",
25
+ publisher = "Association for Computational Linguistics",
26
+ url = "https://aclanthology.org/2021.findings-acl.413",
27
+ pages = "4693--4703",
28
+ }
29
+ """
30
+
31
+
32
+ _DESCRIPTION = """\
33
+ We present XLSum, a comprehensive and diverse dataset comprising 1.35 million professionally
34
+ annotated article-summary pairs from BBC, extracted using a set of carefully designed heuristics.
35
+ The dataset covers 45 languages ranging from low to high-resource, for many of which no
36
+ public dataset is currently available. XL-Sum is highly abstractive, concise,
37
+ and of high quality, as indicated by human and intrinsic evaluation.
38
+ """
39
+
40
+ _HOMEPAGE = "https://github.com/csebuetnlp/xl-sum"
41
+
42
+ _LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)"
43
+
44
+ _URL = "https://huggingface.co/datasets/csebuetnlp/xlsum/resolve/main/data/{}_XLSum_v{}.tar.bz2"
45
+
46
+ _LANGUAGES = [
47
+ "oromo",
48
+ "french",
49
+ "amharic",
50
+ "arabic",
51
+ "azerbaijani",
52
+ "bengali",
53
+ "burmese",
54
+ "chinese_simplified",
55
+ "chinese_traditional",
56
+ "welsh",
57
+ "english",
58
+ "kirundi",
59
+ "gujarati",
60
+ "hausa",
61
+ "hindi",
62
+ "igbo",
63
+ "indonesian",
64
+ "japanese",
65
+ "korean",
66
+ "kyrgyz",
67
+ "marathi",
68
+ "spanish",
69
+ "scottish_gaelic",
70
+ "nepali",
71
+ "pashto",
72
+ "persian",
73
+ "pidgin",
74
+ "portuguese",
75
+ "punjabi",
76
+ "russian",
77
+ "serbian_cyrillic",
78
+ "serbian_latin",
79
+ "sinhala",
80
+ "somali",
81
+ "swahili",
82
+ "tamil",
83
+ "telugu",
84
+ "thai",
85
+ "tigrinya",
86
+ "turkish",
87
+ "ukrainian",
88
+ "urdu",
89
+ "uzbek",
90
+ "vietnamese",
91
+ "yoruba",
92
+ ]
93
+
94
+
95
+ class Xlsum(datasets.GeneratorBasedBuilder):
96
+ VERSION = datasets.Version("2.0.0")
97
+
98
+ BUILDER_CONFIGS = [
99
+ datasets.BuilderConfig(
100
+ name="{}".format(lang),
101
+ version=datasets.Version("2.0.0")
102
+ )
103
+ for lang in _LANGUAGES
104
+ ]
105
+
106
+ def _info(self):
107
+ return datasets.DatasetInfo(
108
+ description=_DESCRIPTION,
109
+ features=datasets.Features(
110
+ {
111
+ "id": datasets.Value("string"),
112
+ "url": datasets.Value("string"),
113
+ "title": datasets.Value("string"),
114
+ "summary": datasets.Value("string"),
115
+ "text": datasets.Value("string"),
116
+ }
117
+ ),
118
+ supervised_keys=None,
119
+ homepage=_HOMEPAGE,
120
+ citation=_CITATION,
121
+ license=_LICENSE,
122
+ version=self.VERSION,
123
+ )
124
+
125
+ def _split_generators(self, dl_manager):
126
+ """Returns SplitGenerators."""
127
+ lang = str(self.config.name)
128
+ url = _URL.format(lang, self.VERSION.version_str[:-2])
129
+
130
+ data_dir = dl_manager.download_and_extract(url)
131
+ return [
132
+ datasets.SplitGenerator(
133
+ name=datasets.Split.TRAIN,
134
+ gen_kwargs={
135
+ "filepath": os.path.join(data_dir, lang + "_train.jsonl"),
136
+ },
137
+ ),
138
+ datasets.SplitGenerator(
139
+ name=datasets.Split.TEST,
140
+ gen_kwargs={
141
+ "filepath": os.path.join(data_dir, lang + "_test.jsonl"),
142
+ },
143
+ ),
144
+ datasets.SplitGenerator(
145
+ name=datasets.Split.VALIDATION,
146
+ gen_kwargs={
147
+ "filepath": os.path.join(data_dir, lang + "_val.jsonl"),
148
+ },
149
+ ),
150
+ ]
151
+
152
+ def _generate_examples(self, filepath):
153
+ """Yields examples as (key, example) tuples."""
154
+ with open(filepath, encoding="utf-8") as f:
155
+ for idx_, row in enumerate(f):
156
+ data = json.loads(row)
157
+ yield idx_, {
158
+ "id": data["id"],
159
+ "url": data["url"],
160
+ "title": data["title"],
161
+ "summary": data["summary"],
162
+ "text": data["text"],
163
+ }