GorkaUrbizu commited on
Commit
a94296d
·
1 Parent(s): 7df7abc

Upload 2 files

Browse files
Files changed (2) hide show
  1. README.md +806 -1
  2. basque_glue.py +523 -0
README.md CHANGED
@@ -1,3 +1,808 @@
1
  ---
2
- license: cc-by-nc-sa-4.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ annotations_creators:
3
+ - expert-generated
4
+ language:
5
+ - eu
6
+ language_creators:
7
+ - expert-generated
8
+ license:
9
+ - cc-by-sa-4.0
10
+ multilinguality:
11
+ - monolingual
12
+ pretty_name: BasqueGLUE
13
+ size_categories:
14
+ - 100K<n<1M
15
+ source_datasets:
16
+ - original
17
+ tags: []
18
+ task_categories:
19
+ - text-classification
20
+ - token-classification
21
+ task_ids:
22
+ - intent-classification
23
+ - natural-language-inference
24
+ - sentiment-classification
25
+ - topic-classification
26
+ - named-entity-recognition
27
+ - coreference-resolution
28
+ configs:
29
+ - bec
30
+ - bhtc
31
+ - coref
32
+ - intent
33
+ - nerc_id
34
+ - nerc_od
35
+ - qnli
36
+ - slot
37
+ - vaxx
38
+ - wic
39
+ dataset_info:
40
+ - config_name: bec
41
+ features:
42
+ - name: text
43
+ dtype: string
44
+ - name: label
45
+ dtype:
46
+ class_label:
47
+ names:
48
+ '0': N
49
+ '1': NEU
50
+ '2': P
51
+ - name: idx
52
+ dtype: int32
53
+ splits:
54
+ - name: train
55
+ num_bytes: 693284
56
+ num_examples: 6078
57
+ - name: test
58
+ num_bytes: 148510
59
+ num_examples: 1302
60
+ - name: validation
61
+ num_bytes: 148377
62
+ num_examples: 1302
63
+ download_size: 1217803
64
+ dataset_size: 990171
65
+ - config_name: bhtc
66
+ features:
67
+ - name: text
68
+ dtype: string
69
+ - name: label
70
+ dtype:
71
+ class_label:
72
+ names:
73
+ '0': Ekonomia
74
+ '1': Euskal Herria
75
+ '2': Euskara
76
+ '3': Gizartea
77
+ '4': Historia
78
+ '5': Ingurumena
79
+ '6': Iritzia
80
+ '7': Komunikazioa
81
+ '8': Kultura
82
+ '9': Nazioartea
83
+ '10': Politika
84
+ '11': Zientzia
85
+ - name: idx
86
+ dtype: int32
87
+ splits:
88
+ - name: train
89
+ num_bytes: 2431494
90
+ num_examples: 8585
91
+ - name: test
92
+ num_bytes: 523066
93
+ num_examples: 1854
94
+ - name: validation
95
+ num_bytes: 519555
96
+ num_examples: 1857
97
+ download_size: 3896312
98
+ dataset_size: 3474115
99
+ - config_name: coref
100
+ features:
101
+ - name: text
102
+ dtype: string
103
+ - name: span1_text
104
+ dtype: string
105
+ - name: span2_text
106
+ dtype: string
107
+ - name: label
108
+ dtype:
109
+ class_label:
110
+ names:
111
+ '0': 'false'
112
+ '1': 'true'
113
+ - name: span1_index
114
+ dtype: int32
115
+ - name: span2_index
116
+ dtype: int32
117
+ - name: idx
118
+ dtype: int32
119
+ splits:
120
+ - name: train
121
+ num_bytes: 365830
122
+ num_examples: 986
123
+ - name: test
124
+ num_bytes: 201378
125
+ num_examples: 587
126
+ - name: validation
127
+ num_bytes: 108632
128
+ num_examples: 320
129
+ download_size: 855074
130
+ dataset_size: 675840
131
+ - config_name: intent
132
+ features:
133
+ - name: text
134
+ dtype: string
135
+ - name: label
136
+ dtype:
137
+ class_label:
138
+ names:
139
+ '0': alarm/cancel_alarm
140
+ '1': alarm/modify_alarm
141
+ '2': alarm/set_alarm
142
+ '3': alarm/show_alarms
143
+ '4': alarm/snooze_alarm
144
+ '5': alarm/time_left_on_alarm
145
+ '6': reminder/cancel_reminder
146
+ '7': reminder/set_reminder
147
+ '8': reminder/show_reminders
148
+ '9': weather/checkSunrise
149
+ '10': weather/checkSunset
150
+ '11': weather/find
151
+ - name: idx
152
+ dtype: int32
153
+ splits:
154
+ - name: train
155
+ num_bytes: 182856
156
+ num_examples: 3418
157
+ - name: test
158
+ num_bytes: 56118
159
+ num_examples: 1087
160
+ - name: validation
161
+ num_bytes: 101644
162
+ num_examples: 1904
163
+ download_size: 595375
164
+ dataset_size: 340618
165
+ - config_name: nerc_id
166
+ features:
167
+ - name: tokens
168
+ sequence: string
169
+ - name: tags
170
+ sequence:
171
+ class_label:
172
+ names:
173
+ '0': O
174
+ '1': B-PER
175
+ '2': I-PER
176
+ '3': B-LOC
177
+ '4': I-LOC
178
+ '5': B-ORG
179
+ '6': I-ORG
180
+ '7': B-MISC
181
+ '8': I-MISC
182
+ - name: idx
183
+ dtype: int32
184
+ splits:
185
+ - name: train
186
+ num_bytes: 946007
187
+ num_examples: 2842
188
+ - name: test
189
+ num_bytes: 653960
190
+ num_examples: 1846
191
+ - name: validation
192
+ num_bytes: 237464
193
+ num_examples: 711
194
+ download_size: 1723325
195
+ dataset_size: 1837431
196
+ - config_name: nerc_od
197
+ features:
198
+ - name: tokens
199
+ sequence: string
200
+ - name: tags
201
+ sequence:
202
+ class_label:
203
+ names:
204
+ '0': O
205
+ '1': B-PER
206
+ '2': I-PER
207
+ '3': B-LOC
208
+ '4': I-LOC
209
+ '5': B-ORG
210
+ '6': I-ORG
211
+ '7': B-MISC
212
+ '8': I-MISC
213
+ - name: idx
214
+ dtype: int32
215
+ splits:
216
+ - name: train
217
+ num_bytes: 1183471
218
+ num_examples: 3553
219
+ - name: test
220
+ num_bytes: 262853
221
+ num_examples: 598
222
+ - name: validation
223
+ num_bytes: 270028
224
+ num_examples: 601
225
+ download_size: 1613369
226
+ dataset_size: 1716352
227
+ - config_name: qnli
228
+ features:
229
+ - name: question
230
+ dtype: string
231
+ - name: sentence
232
+ dtype: string
233
+ - name: label
234
+ dtype:
235
+ class_label:
236
+ names:
237
+ '0': entailment
238
+ '1': not_entailment
239
+ - name: idx
240
+ dtype: int32
241
+ splits:
242
+ - name: train
243
+ num_bytes: 327189
244
+ num_examples: 1764
245
+ - name: test
246
+ num_bytes: 42569
247
+ num_examples: 238
248
+ - name: validation
249
+ num_bytes: 46359
250
+ num_examples: 230
251
+ download_size: 532399
252
+ dataset_size: 416117
253
+ - config_name: slot
254
+ features:
255
+ - name: tokens
256
+ sequence: string
257
+ - name: tags
258
+ sequence:
259
+ class_label:
260
+ names:
261
+ '0': O
262
+ '1': B-datetime
263
+ '2': B-location
264
+ '3': B-negation
265
+ '4': B-alarm/alarm_modifier
266
+ '5': B-alarm/recurring_period
267
+ '6': B-reminder/noun
268
+ '7': B-reminder/todo
269
+ '8': B-reminder/reference
270
+ '9': B-reminder/recurring_period
271
+ '10': B-weather/attribute
272
+ '11': B-weather/noun
273
+ '12': I-datetime
274
+ '13': I-location
275
+ '14': I-negation
276
+ '15': I-alarm/alarm_modifier
277
+ '16': I-alarm/recurring_period
278
+ '17': I-reminder/noun
279
+ '18': I-reminder/todo
280
+ '19': I-reminder/reference
281
+ '20': I-reminder/recurring_period
282
+ '21': I-weather/attribute
283
+ '22': I-weather/noun
284
+ - name: idx
285
+ dtype: int32
286
+ splits:
287
+ - name: train
288
+ num_bytes: 388774
289
+ num_examples: 3418
290
+ - name: test
291
+ num_bytes: 114876
292
+ num_examples: 1088
293
+ - name: validation
294
+ num_bytes: 214053
295
+ num_examples: 1900
296
+ download_size: 962250
297
+ dataset_size: 717703
298
+ - config_name: vaxx
299
+ features:
300
+ - name: text
301
+ dtype: string
302
+ - name: label
303
+ dtype:
304
+ class_label:
305
+ names:
306
+ '0': AGAINST
307
+ '1': NONE
308
+ '2': FAVOR
309
+ - name: idx
310
+ dtype: int32
311
+ splits:
312
+ - name: train
313
+ num_bytes: 176436
314
+ num_examples: 864
315
+ - name: test
316
+ num_bytes: 70947
317
+ num_examples: 312
318
+ - name: validation
319
+ num_bytes: 42795
320
+ num_examples: 206
321
+ download_size: 333997
322
+ dataset_size: 290178
323
+ - config_name: wic
324
+ features:
325
+ - name: sentence1
326
+ dtype: string
327
+ - name: sentence2
328
+ dtype: string
329
+ - name: word
330
+ dtype: string
331
+ - name: label
332
+ dtype:
333
+ class_label:
334
+ names:
335
+ '0': 'false'
336
+ '1': 'true'
337
+ - name: start1
338
+ dtype: int32
339
+ - name: start2
340
+ dtype: int32
341
+ - name: end1
342
+ dtype: int32
343
+ - name: end2
344
+ dtype: int32
345
+ - name: idx
346
+ dtype: int32
347
+ splits:
348
+ - name: train
349
+ num_bytes: 172847108
350
+ num_examples: 408559
351
+ - name: test
352
+ num_bytes: 589578
353
+ num_examples: 1400
354
+ - name: validation
355
+ num_bytes: 251549
356
+ num_examples: 600
357
+ download_size: 22938354
358
+ dataset_size: 173688235
359
  ---
360
+
361
+ # Dataset Card for BasqueGLUE
362
+
363
+ ## Table of Contents
364
+
365
+ * [Table of Contents](#table-of-contents)
366
+ * [Dataset Description](#dataset-description)
367
+ * [Dataset Summary](#dataset-summary)
368
+ * [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
369
+ * [Languages](#languages)
370
+ * [Dataset Structure](#dataset-structure)
371
+ * [Data Instances](#data-instances)
372
+ * [Data Fields](#data-fields)
373
+ * [Data Splits](#data-splits)
374
+ * [Dataset Creation](#dataset-creation)
375
+ * [Curation Rationale](#curation-rationale)
376
+ * [Source Data](#source-data)
377
+ * [Annotations](#annotations)
378
+ * [Personal and Sensitive Information](#personal-and-sensitive-information)
379
+ * [Considerations for Using the Data](#considerations-for-using-the-data)
380
+ * [Social Impact of Dataset](#social-impact-of-dataset)
381
+ * [Discussion of Biases](#discussion-of-biases)
382
+ * [Other Known Limitations](#other-known-limitations)
383
+ * [Additional Information](#additional-information)
384
+ * [Dataset Curators](#dataset-curators)
385
+ * [Licensing Information](#licensing-information)
386
+ * [Citation Information](#citation-information)
387
+ * [Contributions](#contributions)
388
+
389
+ ## Dataset Description
390
+
391
+ * **Repository:** <https://github.com/orai-nlp/BasqueGLUE>
392
+ * **Paper:** [BasqueGLUE: A Natural Language Understanding Benchmark for Basque](http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.172.pdf)
393
+ * **Point of Contact:** [Contact Information](https://github.com/orai-nlp/BasqueGLUE#contact-information)
394
+
395
+ ### Dataset Summary
396
+
397
+ Natural Language Understanding (NLU) technology has improved significantly over the last few years, and multitask benchmarks such as GLUE are key to evaluate this improvement in a robust and general way. These benchmarks take into account a wide and diverse set of NLU tasks that require some form of language understanding, beyond the detection of superficial, textual clues. However, they are costly to develop and language-dependent, and therefore they are only available for a small number of languages.
398
+
399
+ We present BasqueGLUE, the first NLU benchmark for Basque, which has been elaborated from previously existing datasets and following similar criteria to those used for the construction of GLUE and SuperGLUE. BasqueGLUE is freely available under an open license.
400
+
401
+ | Dataset | \|Train\| | \|Val\| | \|Test\| | Task | Metric | Domain |
402
+ |----------------|----------:|--------:|---------:|------------------------|:------:|-----------------|
403
+ | NERCid | 51,539 | 12,936 | 35,855 | NERC | F1 | News |
404
+ | NERCood | 64,475 | 14,945 | 14,462 | NERC | F1 | News, Wikipedia |
405
+ | FMTODeu_intent | 3,418 | 1,904 | 1,087 | Intent classification | F1 | Dialog system |
406
+ | FMTODeu_slot | 19,652 | 10,791 | 5,633 | Slot filling | F1 | Dialog system |
407
+ | BHTCv2 | 8,585 | 1,857 | 1,854 | Topic classification | F1 | News |
408
+ | BEC2016eu | 6,078 | 1,302 | 1,302 | Sentiment analysis | F1 | Twitter |
409
+ | VaxxStance | 864 | 206 | 312 | Stance detection | MF1* | Twitter |
410
+ | QNLIeu | 1,764 | 230 | 238 | QA/NLI | Acc | Wikipedia |
411
+ | WiCeu | 408,559 | 600 | 1,400 | WSD | Acc | Wordnet |
412
+ | EpecKorrefBin | 986 | 320 | 587 | Coreference resolution | Acc | News |
413
+
414
+ ### Supported Tasks and Leaderboards
415
+
416
+ This benchmark comprises the following tasks:
417
+
418
+ #### NERCid
419
+
420
+ This dataset contains sentences from the news domain with manually annotated named entities. The data is the merge of EIEC (a dataset of a collection of news wire articles from Euskaldunon Egunkaria newspaper, (Alegria et al. 2004)), and newly annotated data from naiz.eus. The data is annotated following the BIO annotation scheme over four categories: person, organization, location, and miscellaneous.
421
+
422
+ #### NERCood
423
+
424
+ This dataset contains sentences with manually annotated named entities. The training data is the merge of EIEC (a dataset of a collection of news wire articles from Euskaldunon Egunkaria newspaper, (Alegria et al. 2004)), and newly annotated data from naiz.eus. The data is annotated following the BIO annotation scheme over four categories: person, organization, location, and miscellaneous. For validation and test sets, sentences from Wikipedia were annotated following the same annotation guidelines.
425
+
426
+ #### FMTODeu_intent
427
+
428
+ This dataset contains utterance texts and intent annotations drawn from the manually-annotated Facebook Multilingual Task Oriented Dataset (FMTOD) (Schuster et al. 2019). Basque translated data was drawn from the datasets created for Building a Task-oriented Dialog System for languages with no training data: the Case for Basque (de Lacalle et al. 2020). The examples are annotated with one of 12 different intent classes corresponding to alarm, reminder or weather related actions.
429
+
430
+ #### FMTODeu_slot
431
+
432
+ This dataset contains utterance texts and sequence intent argument annotations designed for slot filling tasks, drawn from the manually-annotated Facebook Multilingual Task Oriented Dataset (FMTOD) (Schuster et al. 2019). Basque translated data was drawn from the datasets created for Building a Task-oriented Dialog System for languages with no training data: the Case for Basque (de Lacalle et al. 2020). The task is a sequence labelling task similar to NERC, following BIO annotation scheme over 11 categories.
433
+
434
+ #### BHTCv2
435
+
436
+ The corpus contains 12,296 news headlines (brief article descriptions) from the Basque weekly newspaper [Argia](https://www.argia.eus). Topics are classified uniquely according to twelve thematic categories.
437
+
438
+ #### BEC2016eu
439
+
440
+ The Basque Election Campaign 2016 Opinion Dataset (BEC2016eu) is a new dataset for the task of sentiment analysis, a sequence classification task, which contains tweets about the campaign for the Basque elections from 2016. The crawling was carried out during the election campaign period (2016/09/09-2016/09/23), by monitoring the main parties and their respective candidates. The tweets were manually annotated as positive, negative or neutral.
441
+
442
+ #### VaxxStance
443
+
444
+ The VaxxStance (Agerri et al., 2021) dataset originally provides texts and stance annotations for social media texts around the anti-vaccine movement. Texts are given a label indicating whether they express an AGAINST, FAVOR or NEUTRAL stance towards the topic.
445
+
446
+ #### QNLIeu
447
+
448
+ This task includes the QA dataset ElkarHizketak (Otegi et al. 2020), a low resource conversational Question Answering (QA) dataset for Basque created by native speaker volunteers. The dataset is built on top of Wikipedia sections about popular people and organizations, and it contains around 400 dialogues and 1600 question and answer pairs. The task was adapted into a sentence-pair binary classification task, following the design of QNLI for English (Wang et al. 2019). Each question and answer pair are given a label indicating whether the answer is entailed by the question.
449
+
450
+ #### WiCeu
451
+
452
+ Word in Context or WiC (Pilehvar and Camacho-Collados 2019) is a word sense disambiguation (WSD) task, designed as a particular form of sentence pair binary classification. Given two text snippets and a polyse mous word that appears in both of them (the span of the word is marked in both snippets), the task is to determine whether the word has the same sense in both sentences. This dataset is based on the EPEC-EuSemcor (Pociello et al. 2011) sense-tagged corpus.
453
+
454
+ #### EpecKorrefBin
455
+
456
+ EPEC-KORREF-Bin is a dataset derived from EPEC-KORREF (Soraluze et al. 2012), a corpus of Basque news documents with manually annotated mentions and coreference chains, which we have been converted into a binary classification task. In this task, the model has to predict whether two mentions from a text, which can be pronouns, nouns or noun phrases, are referring to the same entity.
457
+
458
+ #### Leaderboard
459
+
460
+ Results obtained for two BERT base models as a baseline for the Benchmark. The results obtained on NERC are the average of in domain and out of domain NERC.
461
+
462
+
463
+ | | AVG | NERC | F_intent | F_slot | BHTC | BEC | Vaxx | QNLI | WiC | coref |
464
+ |------------------------------------------------------------|:-----:|:-----:|:---------:|:-------:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|
465
+ | Model | | F1 | F1 | F1 | F1 | F1 | MF1 | acc | acc | acc |
466
+ |[BERTeus](https://huggingface.co/ixa-ehu/berteus-base-cased)| 73.23 | 81.92 | 82.52 | 74.34 | 78.26 | 69.43 | 59.30 | 74.26 | 70.71 | 68.31 |
467
+ |[ElhBERTeu](https://huggingface.co/elh-eus/ElhBERTeu) | 73.71 | 82.30 | 82.24 | 75.64 | 78.05 | 69.89 | 63.81 | 73.84 | 71.71 | 65.93 |
468
+
469
+ ### Languages
470
+
471
+ Data are available in Basque (BCP-47 `eu`)
472
+
473
+ ## Dataset Structure
474
+
475
+ ### Data Instances
476
+
477
+ #### NERCid/NERCood
478
+
479
+ An example of 'train' looks as follows:
480
+ ```
481
+ {
482
+ "idx": 0,
483
+ "tags": ["O", "O", "O", "O", "B-ORG", "O", ...],
484
+ "tokens": ["Greba", "orokorrera", "deitu", "du", "EHk", "27rako", ...]
485
+ }
486
+ ```
487
+ #### FMTODeu_intent
488
+
489
+ An example of 'train' looks as follows:
490
+ ```
491
+ {
492
+ "idx": 0,
493
+ "label": "alarm/modify_alarm",
494
+ "text": "aldatu alarma 7am-tik 7pm-ra , mesedez"
495
+ }
496
+ ```
497
+ #### FMTODeu_slot
498
+
499
+ An example of 'train' looks as follows:
500
+ ```
501
+ {
502
+ "idx": 923,
503
+ "tags": ["O", "B-reminder/todo", "I-datetime", "I-datetime", "B-reminder/todo"],
504
+ "tokens": ["gogoratu", "zaborra", "gaur", "gauean", "ateratzea"]
505
+ }
506
+ ```
507
+ #### BHTCv2
508
+
509
+ An example of 'test' looks as follows:
510
+ ```
511
+ {
512
+ "idx": 0,
513
+ "label": "Gizartea",
514
+ "text": "Genero berdintasunaz, hezkuntzaz eta klase gizarteaz hamar liburu baino gehiago..."
515
+ }
516
+ ```
517
+ #### BEC2016eu
518
+
519
+ An example of 'train' looks as follows:
520
+
521
+ ```
522
+ {
523
+ "idx": 0,
524
+ "label": "NEU",
525
+ "text": "Jorge Garc\\u00eda concejal de @EHBilduBaraka ha entregado la camiseta... #URL"
526
+ }
527
+ ```
528
+
529
+ #### VaxxStance
530
+
531
+ An example of 'train' looks as follows:
532
+ ```
533
+ {
534
+ "idx": 0,
535
+ "label": "FAVOR",
536
+ "text": "\"#COVID19 Oraingo datuak, izurriaren dinamika, txertoaren eragina eta birusaren..
537
+ }
538
+
539
+ ```
540
+ #### QNLIeu
541
+
542
+ An example of 'train' looks as follows:
543
+ ```
544
+ {
545
+ "idx": 1,
546
+ "label": "not_entailment",
547
+ "question": "Zein posiziotan jokatzen du Busquets-ek?",
548
+ "sentence": "Busquets 23 partidatan izan zen konbokatua eta 2 gol sartu zituen."
549
+ }
550
+ ```
551
+ #### WiCeu
552
+
553
+ An example of 'test' looks as follows:
554
+ ```
555
+ {
556
+ "idx": 16,
557
+ "label": false,
558
+ "word": "udal",
559
+ "sentence1": "1a . Lekeitioko udal mugarteko Alde Historikoa Birgaitzeko Plan Berezia behin...",
560
+ "sentence2": "Diezek kritikatu egin zuen EAJk zenbait udaletan EH gobernu taldeetatik at utzi...",
561
+ "start1": 16,
562
+ "start2": 40,
563
+ "end1": 21,
564
+ "end2": 49
565
+ }
566
+ ```
567
+
568
+ #### EpecKorrefBin
569
+
570
+ An example of 'train' looks as follows:
571
+ ```
572
+ {
573
+ "idx": 6,
574
+ "label": false,
575
+ "text": "Isuntza da faborito nagusia Elantxobeko banderan . ISUNTZA trainerua da faborito nagusia bihar Elantxoben jokatuko den bandera irabazteko .",
576
+ "span1_text": "Elantxobeko banderan",
577
+ "span2_text": "ISUNTZA trainerua",
578
+ "span1_index": 4,
579
+ "span2_index": 8
580
+ }
581
+ ```
582
+
583
+ ### Data Fields
584
+
585
+ #### NERCid
586
+
587
+ * `tokens`: a list of `string` features
588
+ * `tags`: a list of entity labels, with possible values including `person` (PER), `location` (LOC), `organization` (ORG), `miscellaneous` (MISC)
589
+ * `idx`: an `int32` feature
590
+
591
+ #### NERCood
592
+
593
+ * `tokens`: a list of `string` features
594
+ * `tags`: a list of entity labels, with possible values including `person` (PER), `location` (LOC), `organization` (ORG), `miscellaneous` (MISC)
595
+ * `idx`: an `int32` feature
596
+
597
+ #### FMTODeu_intent
598
+
599
+ * `text`: a `string` feature
600
+ * `label`: an intent label, with possible values including:
601
+ * `alarm/cancel_alarm`
602
+ * `alarm/modify_alarm`
603
+ * `alarm/set_alarm`
604
+ * `alarm/show_alarms`
605
+ * `alarm/snooze_alarm`
606
+ * `alarm/time_left_on_alarm`
607
+ * `reminder/cancel_reminder`
608
+ * `reminder/set_reminder`
609
+ * `reminder/show_reminders`
610
+ * `weather/checkSunrise`
611
+ * `weather/checkSunset`
612
+ * `weather/find`
613
+ * `idx`: an `int32` feature
614
+
615
+ #### FMTODeu_slot
616
+
617
+ * `tokens`: a list of `string` features
618
+ * `tags`: a list of intent labels, with possible values including:
619
+ * `datetime`
620
+ * `location`
621
+ * `negation`
622
+ * `alarm/alarm_modifier`
623
+ * `alarm/recurring_period`
624
+ * `reminder/noun`
625
+ * `reminder/todo`
626
+ * `reminder/reference`
627
+ * `reminder/recurring_period`
628
+ * `weather/attribute`
629
+ * `weather/noun`
630
+ * `idx`: an `int32` feature
631
+
632
+ #### BHTCv2
633
+
634
+ * `text`: a `string` feature
635
+ * `label`: a polarity label, with possible values including `neutral` (NEU), `negative` (N), `positive` (P)
636
+ * `idx`: an `int32` feature
637
+
638
+ #### BEC2016eu
639
+
640
+ * `text`: a `string` feature
641
+ * `label`: a topic label, with possible values including:
642
+ * `Ekonomia`
643
+ * `Euskal Herria`
644
+ * `Euskara`
645
+ * `Gizartea`
646
+ * `Historia`
647
+ * `Ingurumena`
648
+ * `Iritzia`
649
+ * `Komunikazioa`
650
+ * `Kultura`
651
+ * `Nazioartea`
652
+ * `Politika`
653
+ * `Zientzia`
654
+ * `idx`: an `int32` feature
655
+
656
+ #### VaxxStance
657
+
658
+ * `text`: a `string` feature
659
+ * `label`: a stance label, with possible values including `AGAINST`, `FAVOR`, `NONE`
660
+ * `idx`: an `int32` feature
661
+
662
+ #### QNLIeu
663
+
664
+ * `question`: a `string` feature
665
+ * `sentence`: a `string` feature
666
+ * `label`: an entailment label, with possible values including `entailment`, `not_entailment`
667
+ * `idx`: an `int32` feature
668
+
669
+ #### WiCeu
670
+
671
+ * `word`: a `string` feature
672
+ * `sentence1`: a `string` feature
673
+ * `sentence2`: a `string` feature
674
+ * `label`: a `boolean` label indicating sense agreement, with possible values including `true`, `false`
675
+ * `start1`: an `int` feature indicating character position where word occurence begins in first sentence
676
+ * `start2`: an `int` feature indicating character position where word occurence begins in second sentence
677
+ * `end1`: an `int` feature indicating character position where word occurence ends in first sentence
678
+ * `end2`: an `int` feature indicating character position where word occurence ends in second sentence
679
+ * `idx`: an `int32` feature
680
+
681
+ #### EpecKorrefBin
682
+
683
+ * `text`: a `string` feature.
684
+ * `label`: a `boolean` coreference label, with possible values including `true`, `false`.
685
+ * `span1_text`: a `string` feature
686
+ * `span2_text`: a `string` feature
687
+ * `span1_index`: an `int` feature indicating token index where `span1_text` feature occurs in `text`
688
+ * `span2_index`: an `int` feature indicating token index where `span2_text` feature occurs in `text`
689
+ * `idx`: an `int32` feature
690
+
691
+ ### Data Splits
692
+
693
+ | Dataset | \|Train\| | \|Val\| | \|Test\| |
694
+ |---------|--------:|------:|-------:|
695
+ | NERCid | 51,539 | 12,936 | 35,855 |
696
+ | NERCood | 64,475 | 14,945 | 14,462 |
697
+ | FMTODeu_intent | 3,418 | 1,904 | 1,087 |
698
+ | FMTODeu_slot | 19,652 | 10,791 | 5,633 |
699
+ | BHTCv2 | 8,585 | 1,857 | 1,854 |
700
+ | BEC2016eu | 6,078 | 1,302 | 1,302 |
701
+ | VaxxStance | 864 | 206 | 312 |
702
+ | QNLIeu | 1,764 | 230 | 238 |
703
+ | WiCeu | 408,559 | 600 | 1,400 |
704
+ | EpecKorrefBin | 986 | 320 | 587 |
705
+
706
+
707
+
708
+ ## Dataset Creation
709
+
710
+ ### Curation Rationale
711
+
712
+ We believe that BasqueGLUE is a significant contribution towards developing NLU tools in Basque, which we believe will facilitate the technological advance for the Basque language. In order to create BasqueGLUE we took as a reference the GLUE and SuperGLUE frameworks. When possible, we re-used existing datasets for Basque, adapting them to the corresponding task formats if necessary. Additionally, BasqueGLUE also includes six new datasets that have not been published before. In total, BasqueGLUE consists of nine Basque NLU tasks and covers a wide range of tasks with different difficulties across several domains. As with the original GLUE benchmark, the training data for the tasks vary in size, which allows to measure the performance of how the models transfer knowledge across tasks.
713
+
714
+ ### Source Data
715
+
716
+ #### Initial Data Collection and Normalization
717
+
718
+ [More Information Needed]
719
+
720
+ #### Who are the source language producers?
721
+
722
+ [More Information Needed]
723
+
724
+ ### Annotations
725
+
726
+ #### Annotation process
727
+
728
+ [More Information Needed]
729
+
730
+ #### Who are the annotators?
731
+
732
+ [More Information Needed]
733
+
734
+ ### Personal and Sensitive Information
735
+
736
+ [More Information Needed]
737
+
738
+ ## Considerations for Using the Data
739
+
740
+ ### Social Impact of Dataset
741
+
742
+ [More Information Needed]
743
+
744
+ ### Discussion of Biases
745
+
746
+ [More Information Needed]
747
+
748
+ ### Other Known Limitations
749
+
750
+ [More Information Needed]
751
+
752
+ ## Additional Information
753
+
754
+ ### Dataset Curators
755
+
756
+ Gorka Urbizu [1], Iñaki San Vicente [1], Xabier Saralegi [1], Rodrigo Agerri [2] and Aitor Soroa [2]
757
+
758
+ Affiliation of the authors:
759
+
760
+ [1] orai NLP Technologies
761
+
762
+ [2] HiTZ Center - Ixa, University of the Basque Country UPV/EHU
763
+
764
+ ### Licensing Information
765
+
766
+ Each dataset of the BasqueGLUE benchmark has it's own license (due to most of them being or being derived from already existing datasets). See their respective README files for details.
767
+
768
+ Here we provide a brief summary of their licenses:
769
+
770
+ | Dataset | License |
771
+ |---------|---------|
772
+ | NERCid | CC BY-NC-SA 4.0 |
773
+ | NERCood | CC BY-NC-SA 4.0 |
774
+ | FMTODeu_intent | CC BY-NC-SA 4.0 |
775
+ | FMTODeu_slot | CC BY-NC-SA 4.0 |
776
+ | BHTCv2 | CC BY-NC-SA 4.0 |
777
+ | BEC2016eu | Twitter's license + CC BY-NC-SA 4.0 |
778
+ | VaxxStance | Twitter's license + CC BY 4.0 |
779
+ | QNLIeu | CC BY-SA 4.0 |
780
+ | WiCeu | CC BY-NC-SA 4.0 |
781
+ | EpecKorrefBin | CC BY-NC-SA 4.0 |
782
+
783
+ For the rest of the files of the benchmark, including the evaluation script, the following license applies:
784
+
785
+ Copyright (C) by Orai NLP Technologies.
786
+ This benchmark and evaluation scripts are licensed under the Creative Commons Attribution Share Alike 4.0
787
+ International License (CC BY-SA 4.0). To view a copy of this license, visit http://creativecommons.org/licenses/by/4.0/.
788
+
789
+ ### Citation Information
790
+
791
+ ```
792
+ @InProceedings{urbizu2022basqueglue,
793
+ author = {Urbizu, Gorka and San Vicente, Iñaki and Saralegi, Xabier and Agerri, Rodrigo and Soroa, Aitor},
794
+ title = {BasqueGLUE: A Natural Language Understanding Benchmark for Basque},
795
+ booktitle = {Proceedings of the Language Resources and Evaluation Conference},
796
+ month = {June},
797
+ year = {2022},
798
+ address = {Marseille, France},
799
+ publisher = {European Language Resources Association},
800
+ pages = {1603--1612},
801
+ abstract = {Natural Language Understanding (NLU) technology has improved significantly over the last few years and multitask benchmarks such as GLUE are key to evaluate this improvement in a robust and general way. These benchmarks take into account a wide and diverse set of NLU tasks that require some form of language understanding, beyond the detection of superficial, textual clues. However, they are costly to develop and language-dependent, and therefore they are only available for a small number of languages. In this paper, we present BasqueGLUE, the first NLU benchmark for Basque, a less-resourced language, which has been elaborated from previously existing datasets and following similar criteria to those used for the construction of GLUE and SuperGLUE. We also report the evaluation of two state-of-the-art language models for Basque on BasqueGLUE, thus providing a strong baseline to compare upon. BasqueGLUE is freely available under an open license.},
802
+ url = {https://aclanthology.org/2022.lrec-1.172}
803
+ }
804
+ ```
805
+
806
+ ### Contributions
807
+
808
+ Thanks to [@richplant](https://github.com/richplant) for adding this dataset.
basque_glue.py ADDED
@@ -0,0 +1,523 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+
18
+ """ BasqueGLUE: A Natural Language Understanding Benchmark for Basque """
19
+
20
+ import json
21
+ import os
22
+ import textwrap
23
+
24
+ import datasets
25
+ from datasets import DownloadManager
26
+
27
+ _CITATION = """\
28
+ @InProceedings{urbizu2022basqueglue,
29
+ author = {Urbizu, Gorka and San Vicente, Iñaki and Saralegi, Xabier and Agerri, Rodrigo and Soroa, Aitor},
30
+ title = {BasqueGLUE: A Natural Language Understanding Benchmark for Basque},
31
+ booktitle = {Proceedings of the Language Resources and Evaluation Conference},
32
+ month = {June},
33
+ year = {2022},
34
+ address = {Marseille, France},
35
+ publisher = {European Language Resources Association},
36
+ pages = {1603--1612},
37
+ abstract = {Natural Language Understanding (NLU) technology has improved significantly over the last few years and multitask benchmarks such as GLUE are key to evaluate this improvement in a robust and general way. These benchmarks take into account a wide and diverse set of NLU tasks that require some form of language understanding, beyond the detection of superficial, textual clues. However, they are costly to develop and language-dependent, and therefore they are only available for a small number of languages. In this paper, we present BasqueGLUE, the first NLU benchmark for Basque, a less-resourced language, which has been elaborated from previously existing datasets and following similar criteria to those used for the construction of GLUE and SuperGLUE. We also report the evaluation of two state-of-the-art language models for Basque on BasqueGLUE, thus providing a strong baseline to compare upon. BasqueGLUE is freely available under an open license.},
38
+ url = {https://aclanthology.org/2022.lrec-1.172}
39
+ }
40
+ """
41
+
42
+ _DESCRIPTION = """\
43
+ We present BasqueGLUE, the first NLU benchmark for Basque, which has been elaborated from
44
+ previously existing datasets and following similar criteria to those used for the construction of
45
+ GLUE and SuperGLUE. BasqueGLUE is freely available under an open license.
46
+ """
47
+
48
+ _HOMEPAGE = "https://github.com/orai-nlp/BasqueGLUE"
49
+
50
+ URL = "https://raw.githubusercontent.com/orai-nlp/BasqueGLUE/main/"
51
+
52
+ CONFIGS = [
53
+ "bec",
54
+ "bhtc",
55
+ "coref",
56
+ "intent",
57
+ "nerc_id",
58
+ "nerc_od",
59
+ "qnli",
60
+ "slot",
61
+ "vaxx",
62
+ "wic"
63
+ ]
64
+
65
+ SPLITS = {
66
+ "train": datasets.Split.TRAIN,
67
+ "test": datasets.Split.TEST,
68
+ "val": datasets.Split.VALIDATION
69
+ }
70
+
71
+ _URLS = {
72
+ config: {split: URL + f"{config}/{split}.jsonl" for split in SPLITS.keys()} for config in CONFIGS
73
+ }
74
+ _URLS["wic"]["train"] = URL + "wic/train.zip"
75
+
76
+
77
+ class BasqueGLUEConfig(datasets.BuilderConfig):
78
+ """BuilderConfig for BasqueGLUE"""
79
+
80
+ def __init__(self,
81
+ text_features,
82
+ label_column,
83
+ citation,
84
+ label_classes,
85
+ int_features=None,
86
+ is_tokens=False,
87
+ **kwargs
88
+ ):
89
+ """
90
+ BuilderConfig for BasqueGLUE
91
+
92
+ :param text_features: `list[string]`, the list of text columns
93
+ :param int_features: `list[string]`, the list of int columns (optional)
94
+ :param label_column: `string`, label column
95
+ :param citation: `string`, citation for the data set
96
+ :param label_classes: `list[string]`, the list of classes
97
+ :param is_tokens: `bool`, indicates config is a token classification task
98
+ :param kwargs: keyword arguments forwarded to super
99
+ """
100
+ super(BasqueGLUEConfig, self).__init__(**kwargs)
101
+ self.text_features = text_features
102
+ self.int_features = int_features
103
+ self.label_column = label_column
104
+ self.label_classes = label_classes
105
+ self.citation = citation
106
+ self.is_tokens = is_tokens
107
+ self.label_map = {label: idx for idx, label in enumerate(label_classes)}
108
+
109
+
110
+ class BasqueGLUE(datasets.GeneratorBasedBuilder):
111
+ BUILDER_CONFIGS = [
112
+ BasqueGLUEConfig(
113
+ name='bec',
114
+ description=textwrap.dedent(
115
+ """\
116
+ The Basque Election Campaign 2016 Opinion Dataset (BEC2016eu) is a new dataset for
117
+ the task of sentiment analysis, a sequence classification task, which contains
118
+ tweets about the campaign for the Basque elections from 2016. The crawling was
119
+ carried out during the election campaign period (2016/09/09-2016/09/23), by
120
+ monitoring the main parties and their respective candidates. The tweets were
121
+ manually annotated as positive, negative or neutral.
122
+ """
123
+ ),
124
+ text_features=['text'],
125
+ label_column="label",
126
+ label_classes=["N", "NEU", "P"],
127
+ citation=textwrap.dedent(_CITATION)
128
+ ),
129
+ BasqueGLUEConfig(
130
+ name='bhtc',
131
+ description=textwrap.dedent(
132
+ """\
133
+ The corpus contains 12,296 news headlines (brief article descriptions) from the
134
+ Basque weekly newspaper [Argia](https://www.argia.eus). Topics are classified
135
+ uniquely according to twelve thematic categories.
136
+ """
137
+ ),
138
+ text_features=["text"],
139
+ label_column="label",
140
+ label_classes=["Ekonomia", "Euskal Herria", "Euskara", "Gizartea", "Historia",
141
+ "Ingurumena", "Iritzia", "Komunikazioa", "Kultura", "Nazioartea",
142
+ "Politika", "Zientzia"],
143
+ citation=textwrap.dedent(
144
+ """\
145
+ @inproceedings{agerri-etal-2020-give,
146
+ title = "Give your Text Representation Models some Love: the Case for {B}asque",
147
+ author = "Agerri, Rodrigo and
148
+ San Vicente, I{\~n}aki and
149
+ Campos, Jon Ander and
150
+ Barrena, Ander and
151
+ Saralegi, Xabier and
152
+ Soroa, Aitor and
153
+ Agirre, Eneko",
154
+ booktitle = "Proceedings of the Twelfth Language Resources and Evaluation Conference",
155
+ month = may,
156
+ year = "2020",
157
+ address = "Marseille, France",
158
+ publisher = "European Language Resources Association",
159
+ url = "https://aclanthology.org/2020.lrec-1.588",
160
+ pages = "4781--4788",
161
+ abstract = "Word embeddings and pre-trained language models allow to build rich representations of text and have enabled improvements across most NLP tasks. Unfortunately they are very expensive to train, and many small companies and research groups tend to use models that have been pre-trained and made available by third parties, rather than building their own. This is suboptimal as, for many languages, the models have been trained on smaller (or lower quality) corpora. In addition, monolingual pre-trained models for non-English languages are not always available. At best, models for those languages are included in multilingual versions, where each language shares the quota of substrings and parameters with the rest of the languages. This is particularly true for smaller languages such as Basque. In this paper we show that a number of monolingual models (FastText word embeddings, FLAIR and BERT language models) trained with larger Basque corpora produce much better results than publicly available versions in downstream NLP tasks, including topic classification, sentiment classification, PoS tagging and NER. This work sets a new state-of-the-art in those tasks for Basque. All benchmarks and models used in this work are publicly available.",
162
+ language = "English",
163
+ ISBN = "979-10-95546-34-4",
164
+ }
165
+ """
166
+ )
167
+ ),
168
+ BasqueGLUEConfig(
169
+ name='coref',
170
+ description=textwrap.dedent(
171
+ """\
172
+ EPEC-KORREF-Bin is a dataset derived from EPEC-KORREF (Soraluze et al.
173
+ 2012), a corpus of Basque news documents with manually annotated mentions and
174
+ coreference chains, which we have been converted into a binary classification
175
+ task. In this task, the model has to predict whether two mentions from a text,
176
+ which can be pronouns, nouns or noun phrases, are referring to the same entity.
177
+ """
178
+ ),
179
+ text_features=["text", 'span1_text', "span2_text"],
180
+ label_column="label",
181
+ label_classes=["false", 'true'],
182
+ int_features=["span1_index", "span2_index"],
183
+ citation=textwrap.dedent(_CITATION)
184
+ ),
185
+ BasqueGLUEConfig(
186
+ name='intent',
187
+ description=textwrap.dedent(
188
+ """\
189
+ This dataset contains utterance texts and intent annotations drawn from the
190
+ manually-annotated Facebook Multilingual Task Oriented Dataset (FMTOD) (Schuster
191
+ et al. 2019). Basque translated data was drawn from the datasets created for
192
+ Building a Task-oriented Dialog System for languages with no training data: the
193
+ Case for Basque (de Lacalle et al. 2020). The examples are annotated with one of
194
+ 12 different intent classes corresponding to alarm, reminder or weather related
195
+ actions.
196
+ """
197
+ ),
198
+ text_features=["text"],
199
+ label_column="label",
200
+ label_classes=["alarm/cancel_alarm", "alarm/modify_alarm", "alarm/set_alarm",
201
+ "alarm/show_alarms", "alarm/snooze_alarm", "alarm/time_left_on_alarm",
202
+ "reminder/cancel_reminder", "reminder/set_reminder",
203
+ "reminder/show_reminders", "weather/checkSunrise",
204
+ "weather/checkSunset", "weather/find"],
205
+ citation=textwrap.dedent(
206
+ """\
207
+ @inproceedings{lopez-de-lacalle-etal-2020-building,
208
+ title = "Building a Task-oriented Dialog System for Languages with no Training Data: the Case for {B}asque",
209
+ author = "L{\'o}pez de Lacalle, Maddalen and
210
+ Saralegi, Xabier and
211
+ San Vicente, I{\~n}aki",
212
+ booktitle = "Proceedings of the Twelfth Language Resources and Evaluation Conference",
213
+ month = may,
214
+ year = "2020",
215
+ address = "Marseille, France",
216
+ publisher = "European Language Resources Association",
217
+ url = "https://aclanthology.org/2020.lrec-1.340",
218
+ pages = "2796--2802",
219
+ abstract = "This paper presents an approach for developing a task-oriented dialog system for less-resourced languages in scenarios where training data is not available. Both intent classification and slot filling are tackled. We project the existing annotations in rich-resource languages by means of Neural Machine Translation (NMT) and posterior word alignments. We then compare training on the projected monolingual data with direct model transfer alternatives. Intent Classifiers and slot filling sequence taggers are implemented using a BiLSTM architecture or by fine-tuning BERT transformer models. Models learnt exclusively from Basque projected data provide better accuracies for slot filling. Combining Basque projected train data with rich-resource languages data outperforms consistently models trained solely on projected data for intent classification. At any rate, we achieve competitive performance in both tasks, with accuracies of 81{\%} for intent classification and 77{\%} for slot filling.",
220
+ language = "English",
221
+ ISBN = "979-10-95546-34-4",
222
+ }
223
+ """
224
+ )
225
+ ),
226
+ BasqueGLUEConfig(
227
+ name='nerc_id',
228
+ description=textwrap.dedent(
229
+ """\
230
+ This dataset contains sentences from the news domain with manually
231
+ annotated named entities. The data is the merge of EIEC (a dataset of a
232
+ collection of news wire articles from Euskaldunon Egunkaria newspaper, (Alegria
233
+ et al. 2004)), and newly annotated data from naiz.eus. The data is annotated
234
+ following the BIO annotation scheme over four categories: person, organization,
235
+ location, and miscellaneous.
236
+ """
237
+ ),
238
+ is_tokens=True,
239
+ text_features=["tokens"],
240
+ label_column="tags",
241
+ label_classes=["O",
242
+ "B-PER",
243
+ "I-PER",
244
+ "B-LOC",
245
+ "I-LOC",
246
+ "B-ORG",
247
+ "I-ORG",
248
+ "B-MISC",
249
+ "I-MISC"],
250
+ citation=textwrap.dedent(_CITATION)
251
+ ),
252
+ BasqueGLUEConfig(
253
+ name='nerc_od',
254
+ description=textwrap.dedent(
255
+ """\
256
+ This dataset contains sentences with manually annotated named entities. The
257
+ training data is the merge of EIEC (a dataset of a collection of news wire
258
+ articles from Euskaldunon Egunkaria newspaper, (Alegria et al. 2004)), and newly
259
+ annotated data from naiz.eus. The data is annotated following the BIO annotation
260
+ scheme over four categories: person, organization, location, and miscellaneous.
261
+ For validation and test sets, sentences from Wikipedia were annotated following
262
+ the same annotation guidelines.
263
+ """
264
+ ),
265
+ is_tokens=True,
266
+ text_features=["tokens"],
267
+ label_column="tags",
268
+ label_classes=["O",
269
+ "B-PER",
270
+ "I-PER",
271
+ "B-LOC",
272
+ "I-LOC",
273
+ "B-ORG",
274
+ "I-ORG",
275
+ "B-MISC",
276
+ "I-MISC"],
277
+ citation=textwrap.dedent(_CITATION)
278
+ ),
279
+ BasqueGLUEConfig(
280
+ name='qnli',
281
+ description=textwrap.dedent(
282
+ """\
283
+ This task includes the QA dataset ElkarHizketak (Otegi et al. 2020),
284
+ a low resource conversational Question Answering (QA) dataset for Basque created
285
+ by native speaker volunteers. The dataset is built on top of Wikipedia sections
286
+ about popular people and organizations, and it contains around 400 dialogues and
287
+ 1600 question and answer pairs. The task was adapted into a sentence-pair binary
288
+ classification task, following the design of QNLI for English (Wang et al.
289
+ 2019). Each question and answer pair are given a label indicating whether the
290
+ answer is entailed by the question.
291
+ """
292
+ ),
293
+ text_features=["question", "sentence"],
294
+ label_column="label",
295
+ label_classes=["entailment", "not_entailment"],
296
+ citation=textwrap.dedent(_CITATION)
297
+ ),
298
+ BasqueGLUEConfig(
299
+ name='slot',
300
+ description=textwrap.dedent(
301
+ """\
302
+ This dataset contains utterance texts and sequence intent argument
303
+ annotations designed for slot filling tasks, drawn from the manually-annotated
304
+ Facebook Multilingual Task Oriented Dataset (FMTOD) (Schuster et al. 2019).
305
+ Basque translated data was drawn from the datasets created for Building a
306
+ Task-oriented Dialog System for languages with no training data: the Case for
307
+ Basque (de Lacalle et al. 2020). The task is a sequence labelling task similar
308
+ to NERC, following BIO annotation scheme over 11 categories.
309
+ """
310
+ ),
311
+ is_tokens=True,
312
+ text_features=["tokens"],
313
+ label_column="tags",
314
+ label_classes=["O",
315
+ "B-datetime",
316
+ "B-location",
317
+ "B-negation",
318
+ "B-alarm/alarm_modifier",
319
+ "B-alarm/recurring_period",
320
+ "B-reminder/noun",
321
+ "B-reminder/todo",
322
+ "B-reminder/reference",
323
+ "B-reminder/recurring_period",
324
+ "B-weather/attribute",
325
+ "B-weather/noun",
326
+ "I-datetime",
327
+ "I-location",
328
+ "I-negation",
329
+ "I-alarm/alarm_modifier",
330
+ "I-alarm/recurring_period",
331
+ "I-reminder/noun",
332
+ "I-reminder/todo",
333
+ "I-reminder/reference",
334
+ "I-reminder/recurring_period",
335
+ "I-weather/attribute",
336
+ "I-weather/noun"],
337
+ citation=textwrap.dedent(
338
+ """\
339
+ @inproceedings{lopez-de-lacalle-etal-2020-building,
340
+ title = "Building a Task-oriented Dialog System for Languages with no Training Data: the Case for {B}asque",
341
+ author = "L{\'o}pez de Lacalle, Maddalen and
342
+ Saralegi, Xabier and
343
+ San Vicente, I{\~n}aki",
344
+ booktitle = "Proceedings of the Twelfth Language Resources and Evaluation Conference",
345
+ month = may,
346
+ year = "2020",
347
+ address = "Marseille, France",
348
+ publisher = "European Language Resources Association",
349
+ url = "https://aclanthology.org/2020.lrec-1.340",
350
+ pages = "2796--2802",
351
+ abstract = "This paper presents an approach for developing a task-oriented dialog system for less-resourced languages in scenarios where training data is not available. Both intent classification and slot filling are tackled. We project the existing annotations in rich-resource languages by means of Neural Machine Translation (NMT) and posterior word alignments. We then compare training on the projected monolingual data with direct model transfer alternatives. Intent Classifiers and slot filling sequence taggers are implemented using a BiLSTM architecture or by fine-tuning BERT transformer models. Models learnt exclusively from Basque projected data provide better accuracies for slot filling. Combining Basque projected train data with rich-resource languages data outperforms consistently models trained solely on projected data for intent classification. At any rate, we achieve competitive performance in both tasks, with accuracies of 81{\%} for intent classification and 77{\%} for slot filling.",
352
+ language = "English",
353
+ ISBN = "979-10-95546-34-4",
354
+ }
355
+ """
356
+ )
357
+ ),
358
+ BasqueGLUEConfig(
359
+ name='vaxx',
360
+ description=textwrap.dedent(
361
+ """\
362
+ The VaxxStance (Agerri et al., 2021) dataset originally provides texts and
363
+ stance annotations for social media texts around the anti-vaccine movement.
364
+ Texts are given a label indicating whether they express an AGAINST, FAVOR or
365
+ NEUTRAL stance towards the topic.
366
+ """
367
+ ),
368
+ text_features=['text'],
369
+ label_column="label",
370
+ label_classes=['AGAINST', 'NONE', 'FAVOR'],
371
+ citation=textwrap.dedent(
372
+ """\
373
+ @article{agerriVaxxStanceIberLEF20212021,
374
+ title = {{VaxxStance@IberLEF 2021: Overview of the Task on Going Beyond Text in Cross-Lingual Stance Detection}},
375
+ shorttitle = {{VaxxStance@IberLEF 2021}},
376
+ author = {Agerri, Rodrigo and Centeno, Roberto and Espinosa, Mar{\'i}a and de Landa, Joseba Fern{\'a}ndez and Rodrigo, {\'A}lvaro},
377
+ year = {2021},
378
+ month = sep,
379
+ journal = {Procesamiento del Lenguaje Natural},
380
+ volume = {67},
381
+ number = {0},
382
+ pages = {173--181},
383
+ issn = {1989-7553},
384
+ abstract = {This paper describes the VaxxStance task at IberLEF 2021. The task proposes to detect stance in Tweets referring to vaccines, a relevant and controversial topic in the current pandemia. The task is proposed in a multilingual setting, providing data for Basque and Spanish languages. The objective is to explore crosslingual approaches which also complement textual information with contextual features obtained from the social network. The results demonstrate that contextual information is crucial to obtain competitive results, especially across languages.},
385
+ copyright = {Copyright (c) 2021 Procesamiento del Lenguaje Natural},
386
+ langid = {spanish},
387
+ }
388
+ """
389
+ )
390
+ ),
391
+ BasqueGLUEConfig(
392
+ name='wic',
393
+ description=textwrap.dedent(
394
+ """\
395
+ Word in Context or WiC (Pilehvar and Camacho-Collados 2019) is a word sense
396
+ disambiguation (WSD) task, designed as a particular form of sentence pair binary
397
+ classification. Given two text snippets and a polyse mous word that appears in
398
+ both of them (the span of the word is marked in both snippets), the task is to
399
+ determine whether the word has the same sense in both sentences. This dataset is
400
+ based on the EPEC-EuSemcor (Pociello et al. 2011) sense-tagged corpus.
401
+ """
402
+ ),
403
+ text_features=['sentence1', 'sentence2', 'word'],
404
+ int_features=['start1', 'start2', 'end1', 'end2'],
405
+ label_column="label",
406
+ label_classes=['false', 'true'],
407
+ citation=textwrap.dedent(
408
+ """\
409
+ @article{agerriVaxxStanceIberLEF20212021,
410
+ title = {{VaxxStance@IberLEF 2021: Overview of the Task on Going Beyond Text in Cross-Lingual Stance Detection}},
411
+ shorttitle = {{VaxxStance@IberLEF 2021}},
412
+ author = {Agerri, Rodrigo and Centeno, Roberto and Espinosa, Mar{\'i}a and de Landa, Joseba Fern{\'a}ndez and Rodrigo, {\'A}lvaro},
413
+ year = {2021},
414
+ month = sep,
415
+ journal = {Procesamiento del Lenguaje Natural},
416
+ volume = {67},
417
+ number = {0},
418
+ pages = {173--181},
419
+ issn = {1989-7553},
420
+ abstract = {This paper describes the VaxxStance task at IberLEF 2021. The task proposes to detect stance in Tweets referring to vaccines, a relevant and controversial topic in the current pandemia. The task is proposed in a multilingual setting, providing data for Basque and Spanish languages. The objective is to explore crosslingual approaches which also complement textual information with contextual features obtained from the social network. The results demonstrate that contextual information is crucial to obtain competitive results, especially across languages.},
421
+ copyright = {Copyright (c) 2021 Procesamiento del Lenguaje Natural},
422
+ langid = {spanish},
423
+ }
424
+ """
425
+ )
426
+ ),
427
+ ]
428
+
429
+ def _info(self):
430
+ if self.config.is_tokens:
431
+ features = {
432
+ text_feature: datasets.Sequence(datasets.Value("string")) for text_feature in
433
+ self.config.text_features
434
+ }
435
+ features[self.config.label_column] = datasets.Sequence(
436
+ datasets.features.ClassLabel(names=self.config.label_classes)
437
+ )
438
+ else:
439
+ features = {
440
+ text_feature: datasets.Value("string") for text_feature in
441
+ self.config.text_features
442
+ }
443
+ features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
444
+ if self.config.int_features:
445
+ for int_feature in self.config.int_features:
446
+ features[int_feature] = datasets.Value("int32")
447
+ features["idx"] = datasets.Value("int32")
448
+ return datasets.DatasetInfo(
449
+ description=_DESCRIPTION,
450
+ features=datasets.Features(features),
451
+ citation=self.config.citation,
452
+ )
453
+
454
+ def _split_generators(self, dl_manager: DownloadManager):
455
+ """
456
+ Return SplitGenerators.
457
+ """
458
+ data_urls = _URLS[self.config.name]
459
+ splits = []
460
+ for split, sp_type in SPLITS.items():
461
+ data_url = data_urls[split]
462
+ if 'jsonl' in data_url:
463
+ data_file = dl_manager.download(data_url)
464
+ else:
465
+ data_dir = dl_manager.download_and_extract(data_url)
466
+ json_file = [f for f in os.listdir(data_dir) if f.endswith('jsonl')][0]
467
+ data_file = os.path.join(data_dir, json_file)
468
+
469
+ splits.append(
470
+ datasets.SplitGenerator(
471
+ name=sp_type,
472
+ gen_kwargs={
473
+ "data_file": data_file
474
+ }
475
+ )
476
+ )
477
+ return splits
478
+
479
+ def _generate_examples(self, data_file):
480
+ """
481
+ Yield examples.
482
+ """
483
+ with open(data_file, encoding="utf8", mode="r") as f:
484
+ id_ = 0
485
+ for line in f:
486
+ data = json.loads(line)
487
+
488
+ if self.config.name == 'coref':
489
+ example = {
490
+ 'text': data['text'],
491
+ 'span1_text': data['target']['span1_text'],
492
+ 'span2_text': data['target']['span1_text'],
493
+ 'span1_index': int(data['target']['span1_index']),
494
+ 'span2_index': int(data['target']['span2_index'])
495
+ }
496
+ else:
497
+ example = {
498
+ feat: data[feat] for feat in self.config.text_features
499
+ }
500
+
501
+ if self.config.int_features:
502
+ for feat in self.config.int_features:
503
+ example[feat] = int(data[feat])
504
+
505
+ example['idx'] = data['idx']
506
+
507
+ label_data = data[self.config.label_column]
508
+ if type(label_data) == bool:
509
+ label_data = str(label_data).lower()
510
+ if self.config.is_tokens:
511
+ label = [self.config.label_map[tag] for tag in label_data]
512
+ else:
513
+ label = self.config.label_map[label_data]
514
+ example[self.config.label_column] = label
515
+
516
+ # Filter out corrupted rows.
517
+ for value in example.values():
518
+ if value is None:
519
+ break
520
+ else:
521
+ yield id_, example
522
+
523
+ id_ += 1