parquet-converter commited on
Commit
4c96415
·
1 Parent(s): 5967cd7

Update parquet files

Browse files
README.md DELETED
@@ -1,651 +0,0 @@
1
- ---
2
- annotations_creators:
3
- - no-annotation
4
- language_creators:
5
- - found
6
- language:
7
- - ace
8
- - af
9
- - als
10
- - am
11
- - an
12
- - ang
13
- - ar
14
- - arz
15
- - as
16
- - ast
17
- - av
18
- - ay
19
- - az
20
- - azb
21
- - ba
22
- - bar
23
- - bcl
24
- - be
25
- - bg
26
- - bho
27
- - bjn
28
- - bn
29
- - bo
30
- - bpy
31
- - br
32
- - bs
33
- - bxr
34
- - ca
35
- - cbk
36
- - cdo
37
- - ce
38
- - ceb
39
- - chr
40
- - ckb
41
- - co
42
- - crh
43
- - cs
44
- - csb
45
- - cv
46
- - cy
47
- - da
48
- - de
49
- - diq
50
- - dsb
51
- - dty
52
- - dv
53
- - egl
54
- - el
55
- - en
56
- - eo
57
- - es
58
- - et
59
- - eu
60
- - ext
61
- - fa
62
- - fi
63
- - fo
64
- - fr
65
- - frp
66
- - fur
67
- - fy
68
- - ga
69
- - gag
70
- - gd
71
- - gl
72
- - glk
73
- - gn
74
- - gu
75
- - gv
76
- - ha
77
- - hak
78
- - he
79
- - hi
80
- - hif
81
- - hr
82
- - hsb
83
- - ht
84
- - hu
85
- - hy
86
- - ia
87
- - id
88
- - ie
89
- - ig
90
- - ilo
91
- - io
92
- - is
93
- - it
94
- - ja
95
- - jam
96
- - jbo
97
- - jv
98
- - ka
99
- - kaa
100
- - kab
101
- - kbd
102
- - kk
103
- - km
104
- - kn
105
- - ko
106
- - koi
107
- - kok
108
- - krc
109
- - ksh
110
- - ku
111
- - kv
112
- - kw
113
- - ky
114
- - la
115
- - lad
116
- - lb
117
- - lez
118
- - lg
119
- - li
120
- - lij
121
- - lmo
122
- - ln
123
- - lo
124
- - lrc
125
- - lt
126
- - ltg
127
- - lv
128
- - lzh
129
- - mai
130
- - map
131
- - mdf
132
- - mg
133
- - mhr
134
- - mi
135
- - min
136
- - mk
137
- - ml
138
- - mn
139
- - mr
140
- - mrj
141
- - ms
142
- - mt
143
- - mwl
144
- - my
145
- - myv
146
- - mzn
147
- - nan
148
- - nap
149
- - nb
150
- - nci
151
- - nds
152
- - ne
153
- - new
154
- - nl
155
- - nn
156
- - nrm
157
- - nso
158
- - nv
159
- - oc
160
- - olo
161
- - om
162
- - or
163
- - os
164
- - pa
165
- - pag
166
- - pam
167
- - pap
168
- - pcd
169
- - pdc
170
- - pfl
171
- - pl
172
- - pnb
173
- - ps
174
- - pt
175
- - qu
176
- - rm
177
- - ro
178
- - roa
179
- - ru
180
- - rue
181
- - rup
182
- - rw
183
- - sa
184
- - sah
185
- - sc
186
- - scn
187
- - sco
188
- - sd
189
- - sgs
190
- - sh
191
- - si
192
- - sk
193
- - sl
194
- - sme
195
- - sn
196
- - so
197
- - sq
198
- - sr
199
- - srn
200
- - stq
201
- - su
202
- - sv
203
- - sw
204
- - szl
205
- - ta
206
- - tcy
207
- - te
208
- - tet
209
- - tg
210
- - th
211
- - tk
212
- - tl
213
- - tn
214
- - to
215
- - tr
216
- - tt
217
- - tyv
218
- - udm
219
- - ug
220
- - uk
221
- - ur
222
- - uz
223
- - vec
224
- - vep
225
- - vi
226
- - vls
227
- - vo
228
- - vro
229
- - wa
230
- - war
231
- - wo
232
- - wuu
233
- - xh
234
- - xmf
235
- - yi
236
- - yo
237
- - zea
238
- - zh
239
- language_bcp47:
240
- - be-tarask
241
- - map-bms
242
- - nds-nl
243
- - roa-tara
244
- - zh-yue
245
- license:
246
- - odbl
247
- multilinguality:
248
- - multilingual
249
- size_categories:
250
- - 100K<n<1M
251
- source_datasets:
252
- - original
253
- task_categories:
254
- - text-classification
255
- task_ids: []
256
- paperswithcode_id: wili-2018
257
- pretty_name: Wili2018
258
- tags:
259
- - language-identification
260
- dataset_info:
261
- features:
262
- - name: sentence
263
- dtype: string
264
- - name: label
265
- dtype:
266
- class_label:
267
- names:
268
- 0: cdo
269
- 1: glk
270
- 2: jam
271
- 3: lug
272
- 4: san
273
- 5: rue
274
- 6: wol
275
- 7: new
276
- 8: mwl
277
- 9: bre
278
- 10: ara
279
- 11: hye
280
- 12: xmf
281
- 13: ext
282
- 14: cor
283
- 15: yor
284
- 16: div
285
- 17: asm
286
- 18: lat
287
- 19: cym
288
- 20: hif
289
- 21: ace
290
- 22: kbd
291
- 23: tgk
292
- 24: rus
293
- 25: nso
294
- 26: mya
295
- 27: msa
296
- 28: ava
297
- 29: cbk
298
- 30: urd
299
- 31: deu
300
- 32: swa
301
- 33: pus
302
- 34: bxr
303
- 35: udm
304
- 36: csb
305
- 37: yid
306
- 38: vro
307
- 39: por
308
- 40: pdc
309
- 41: eng
310
- 42: tha
311
- 43: hat
312
- 44: lmo
313
- 45: pag
314
- 46: jav
315
- 47: chv
316
- 48: nan
317
- 49: sco
318
- 50: kat
319
- 51: bho
320
- 52: bos
321
- 53: kok
322
- 54: oss
323
- 55: mri
324
- 56: fry
325
- 57: cat
326
- 58: azb
327
- 59: kin
328
- 60: hin
329
- 61: sna
330
- 62: dan
331
- 63: egl
332
- 64: mkd
333
- 65: ron
334
- 66: bul
335
- 67: hrv
336
- 68: som
337
- 69: pam
338
- 70: nav
339
- 71: ksh
340
- 72: nci
341
- 73: khm
342
- 74: sgs
343
- 75: srn
344
- 76: bar
345
- 77: cos
346
- 78: ckb
347
- 79: pfl
348
- 80: arz
349
- 81: roa-tara
350
- 82: fra
351
- 83: mai
352
- 84: zh-yue
353
- 85: guj
354
- 86: fin
355
- 87: kir
356
- 88: vol
357
- 89: hau
358
- 90: afr
359
- 91: uig
360
- 92: lao
361
- 93: swe
362
- 94: slv
363
- 95: kor
364
- 96: szl
365
- 97: srp
366
- 98: dty
367
- 99: nrm
368
- 100: dsb
369
- 101: ind
370
- 102: wln
371
- 103: pnb
372
- 104: ukr
373
- 105: bpy
374
- 106: vie
375
- 107: tur
376
- 108: aym
377
- 109: lit
378
- 110: zea
379
- 111: pol
380
- 112: est
381
- 113: scn
382
- 114: vls
383
- 115: stq
384
- 116: gag
385
- 117: grn
386
- 118: kaz
387
- 119: ben
388
- 120: pcd
389
- 121: bjn
390
- 122: krc
391
- 123: amh
392
- 124: diq
393
- 125: ltz
394
- 126: ita
395
- 127: kab
396
- 128: bel
397
- 129: ang
398
- 130: mhr
399
- 131: che
400
- 132: koi
401
- 133: glv
402
- 134: ido
403
- 135: fao
404
- 136: bak
405
- 137: isl
406
- 138: bcl
407
- 139: tet
408
- 140: jpn
409
- 141: kur
410
- 142: map-bms
411
- 143: tyv
412
- 144: olo
413
- 145: arg
414
- 146: ori
415
- 147: lim
416
- 148: tel
417
- 149: lin
418
- 150: roh
419
- 151: sqi
420
- 152: xho
421
- 153: mlg
422
- 154: fas
423
- 155: hbs
424
- 156: tam
425
- 157: aze
426
- 158: lad
427
- 159: nob
428
- 160: sin
429
- 161: gla
430
- 162: nap
431
- 163: snd
432
- 164: ast
433
- 165: mal
434
- 166: mdf
435
- 167: tsn
436
- 168: nds
437
- 169: tgl
438
- 170: nno
439
- 171: sun
440
- 172: lzh
441
- 173: jbo
442
- 174: crh
443
- 175: pap
444
- 176: oci
445
- 177: hak
446
- 178: uzb
447
- 179: zho
448
- 180: hsb
449
- 181: sme
450
- 182: mlt
451
- 183: vep
452
- 184: lez
453
- 185: nld
454
- 186: nds-nl
455
- 187: mrj
456
- 188: spa
457
- 189: ceb
458
- 190: ina
459
- 191: heb
460
- 192: hun
461
- 193: que
462
- 194: kaa
463
- 195: mar
464
- 196: vec
465
- 197: frp
466
- 198: ell
467
- 199: sah
468
- 200: eus
469
- 201: ces
470
- 202: slk
471
- 203: chr
472
- 204: lij
473
- 205: nep
474
- 206: srd
475
- 207: ilo
476
- 208: be-tarask
477
- 209: bod
478
- 210: orm
479
- 211: war
480
- 212: glg
481
- 213: mon
482
- 214: gle
483
- 215: min
484
- 216: ibo
485
- 217: ile
486
- 218: epo
487
- 219: lav
488
- 220: lrc
489
- 221: als
490
- 222: mzn
491
- 223: rup
492
- 224: fur
493
- 225: tat
494
- 226: myv
495
- 227: pan
496
- 228: ton
497
- 229: kom
498
- 230: wuu
499
- 231: tcy
500
- 232: tuk
501
- 233: kan
502
- 234: ltg
503
- config_name: WiLI-2018 dataset
504
- splits:
505
- - name: train
506
- num_bytes: 65408201
507
- num_examples: 117500
508
- - name: test
509
- num_bytes: 66491260
510
- num_examples: 117500
511
- download_size: 130516351
512
- dataset_size: 131899461
513
- ---
514
-
515
- # Dataset Card for wili_2018
516
-
517
- ## Table of Contents
518
- - [Dataset Description](#dataset-description)
519
- - [Dataset Summary](#dataset-summary)
520
- - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
521
- - [Languages](#languages)
522
- - [Dataset Structure](#dataset-structure)
523
- - [Data Instances](#data-instances)
524
- - [Data Fields](#data-fields)
525
- - [Data Splits](#data-splits)
526
- - [Dataset Creation](#dataset-creation)
527
- - [Curation Rationale](#curation-rationale)
528
- - [Source Data](#source-data)
529
- - [Annotations](#annotations)
530
- - [Personal and Sensitive Information](#personal-and-sensitive-information)
531
- - [Considerations for Using the Data](#considerations-for-using-the-data)
532
- - [Social Impact of Dataset](#social-impact-of-dataset)
533
- - [Discussion of Biases](#discussion-of-biases)
534
- - [Other Known Limitations](#other-known-limitations)
535
- - [Additional Information](#additional-information)
536
- - [Dataset Curators](#dataset-curators)
537
- - [Licensing Information](#licensing-information)
538
- - [Citation Information](#citation-information)
539
- - [Contributions](#contributions)
540
-
541
- ## Dataset Description
542
-
543
- - **Homepage:** https://zenodo.org/record/841984
544
- - **Repository:** [Needs More Information]
545
- - **Paper:** https://arxiv.org/pdf/1801.07779
546
- - **Leaderboard:** [Needs More Information]
547
- - **Point of Contact:** Thoma, Martin (Email: [email protected])
548
-
549
- ### Dataset Summary
550
-
551
- WiLI-2018, the Wikipedia language identification benchmark dataset, contains 235000 paragraphs of 235 languages. The dataset is balanced and a train-test split is provided.
552
-
553
- ### Supported Tasks and Leaderboards
554
-
555
- [Needs More Information]
556
-
557
- ### Languages
558
-
559
- 235 Different Languages
560
-
561
- ## Dataset Structure
562
-
563
- ### Data Instances
564
-
565
- ```
566
- {
567
- 'label': 207,
568
- 'sentence': 'Ti Turkia ket maysa a demokrata, sekular, unitario, batay-linteg a republika nga addaan ti taga-ugma a tinawtawid a kultura. Ti Turkia ket umadadu a naipatipon iti Laud babaen ti panagkameng kadagiti organisasion a kas ti Konsilo iti Europa, NATO, OECD, OSCE ken ti G-20 a dagiti kangrunaan nga ekonomia. Ti Turkia ket nangrugi a nakitulag ti napno a panagkameng iti Kappon ti Europa idi 2005, nga isu ket maysa idin a kumaduaan a kameng iti Europeano a Komunidad ti Ekonomia manipud idi 1963 ken nakadanon ti maysa a tulagan ti kappon ti aduana idi 1995. Ti Turkia ket nagtaraken iti asideg a kultural, politikal, ekonomiko ken industria a panakibiang iti Tengnga a Daya, dagiti Turko nga estado iti Tengnga nga Asia ken dagiti pagilian ti Aprika babaen ti panagkameng kadagiti organisasion a kas ti Turko a Konsilo, Nagsaupan nga Administrasion iti Turko nga Arte ken Kultura, Organisasion iti Islamiko a Panagtitinnulong ken ti Organisasion ti Ekonomiko a Panagtitinnulong.'
569
- }
570
- ```
571
-
572
- ### Data Fields
573
-
574
- [Needs More Information]
575
-
576
- ### Data Splits
577
-
578
- 175000 lines of text each for train and test data.
579
-
580
- ## Dataset Creation
581
-
582
- ### Curation Rationale
583
-
584
- [Needs More Information]
585
-
586
- ### Source Data
587
-
588
- #### Initial Data Collection and Normalization
589
-
590
- [Needs More Information]
591
-
592
- #### Who are the source language producers?
593
-
594
- [Needs More Information]
595
-
596
- ### Annotations
597
-
598
- #### Annotation process
599
-
600
- [Needs More Information]
601
-
602
- #### Who are the annotators?
603
-
604
- [Needs More Information]
605
-
606
- ### Personal and Sensitive Information
607
-
608
- [Needs More Information]
609
-
610
- ## Considerations for Using the Data
611
-
612
- ### Social Impact of Dataset
613
-
614
- [Needs More Information]
615
-
616
- ### Discussion of Biases
617
-
618
- [Needs More Information]
619
-
620
- ### Other Known Limitations
621
-
622
- [Needs More Information]
623
-
624
- ## Additional Information
625
-
626
- ### Dataset Curators
627
-
628
- The dataset was initially created by Thomas Martin
629
-
630
- ### Licensing Information
631
-
632
- ODC Open Database License v1.0
633
-
634
- ### Citation Information
635
-
636
- ```
637
- @dataset{thoma_martin_2018_841984,
638
- author = {Thoma, Martin},
639
- title = {{WiLI-2018 - Wikipedia Language Identification database}},
640
- month = jan,
641
- year = 2018,
642
- publisher = {Zenodo},
643
- version = {1.0.0},
644
- doi = {10.5281/zenodo.841984},
645
- url = {https://doi.org/10.5281/zenodo.841984}
646
- }
647
- ```
648
-
649
- ### Contributions
650
-
651
- Thanks to [@Shubhambindal2017](https://github.com/Shubhambindal2017) for adding this dataset.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
WiLI-2018 dataset/wili_2018-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9df8d99d63caf4e12c141b2511fac6cb971ffc905f827fb6877135d59f660f5f
3
+ size 46000316
WiLI-2018 dataset/wili_2018-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e504c699af40388eda89ce3484bf27da5e86bf55b7754cafb935be2b16257b96
3
+ size 45717949
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"WiLI-2018 dataset": {"description": "It is a benchmark dataset for language identification and contains 235000 paragraphs of 235 languages\n", "citation": "@dataset{thoma_martin_2018_841984,\n author = {Thoma, Martin},\n title = {{WiLI-2018 - Wikipedia Language Identification database}},\n month = jan,\n year = 2018,\n publisher = {Zenodo},\n version = {1.0.0},\n doi = {10.5281/zenodo.841984},\n url = {https://doi.org/10.5281/zenodo.841984}\n}\n", "homepage": "https://zenodo.org/record/841984", "license": "ODC Open Database License v1.0", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 235, "names": ["cdo", "glk", "jam", "lug", "san", "rue", "wol", "new", "mwl", "bre", "ara", "hye", "xmf", "ext", "cor", "yor", "div", "asm", "lat", "cym", "hif", "ace", "kbd", "tgk", "rus", "nso", "mya", "msa", "ava", "cbk", "urd", "deu", "swa", "pus", "bxr", "udm", "csb", "yid", "vro", "por", "pdc", "eng", "tha", "hat", "lmo", "pag", "jav", "chv", "nan", "sco", "kat", "bho", "bos", "kok", "oss", "mri", "fry", "cat", "azb", "kin", "hin", "sna", "dan", "egl", "mkd", "ron", "bul", "hrv", "som", "pam", "nav", "ksh", "nci", "khm", "sgs", "srn", "bar", "cos", "ckb", "pfl", "arz", "roa-tara", "fra", "mai", "zh-yue", "guj", "fin", "kir", "vol", "hau", "afr", "uig", "lao", "swe", "slv", "kor", "szl", "srp", "dty", "nrm", "dsb", "ind", "wln", "pnb", "ukr", "bpy", "vie", "tur", "aym", "lit", "zea", "pol", "est", "scn", "vls", "stq", "gag", "grn", "kaz", "ben", "pcd", "bjn", "krc", "amh", "diq", "ltz", "ita", "kab", "bel", "ang", "mhr", "che", "koi", "glv", "ido", "fao", "bak", "isl", "bcl", "tet", "jpn", "kur", "map-bms", "tyv", "olo", "arg", "ori", "lim", "tel", "lin", "roh", "sqi", "xho", "mlg", "fas", "hbs", "tam", "aze", "lad", "nob", "sin", "gla", "nap", "snd", "ast", "mal", "mdf", "tsn", "nds", "tgl", "nno", "sun", "lzh", "jbo", "crh", "pap", "oci", "hak", "uzb", "zho", "hsb", "sme", "mlt", "vep", "lez", "nld", "nds-nl", "mrj", "spa", "ceb", "ina", "heb", "hun", "que", "kaa", "mar", "vec", "frp", "ell", "sah", "eus", "ces", "slk", "chr", "lij", "nep", "srd", "ilo", "be-tarask", "bod", "orm", "war", "glg", "mon", "gle", "min", "ibo", "ile", "epo", "lav", "lrc", "als", "mzn", "rup", "fur", "tat", "myv", "pan", "ton", "kom", "wuu", "tcy", "tuk", "kan", "ltg"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": [{"task": "text-classification", "text_column": "sentence", "label_column": "label", "labels": ["ace", "afr", "als", "amh", "ang", "ara", "arg", "arz", "asm", "ast", "ava", "aym", "azb", "aze", "bak", "bar", "bcl", "be-tarask", "bel", "ben", "bho", "bjn", "bod", "bos", "bpy", "bre", "bul", "bxr", "cat", "cbk", "cdo", "ceb", "ces", "che", "chr", "chv", "ckb", "cor", "cos", "crh", "csb", "cym", "dan", "deu", "diq", "div", "dsb", "dty", "egl", "ell", "eng", "epo", "est", "eus", "ext", "fao", "fas", "fin", "fra", "frp", "fry", "fur", "gag", "gla", "gle", "glg", "glk", "glv", "grn", "guj", "hak", "hat", "hau", "hbs", "heb", "hif", "hin", "hrv", "hsb", "hun", "hye", "ibo", "ido", "ile", "ilo", "ina", "ind", "isl", "ita", "jam", "jav", "jbo", "jpn", "kaa", "kab", "kan", "kat", "kaz", "kbd", "khm", "kin", "kir", "koi", "kok", "kom", "kor", "krc", "ksh", "kur", "lad", "lao", "lat", "lav", "lez", "lij", "lim", "lin", "lit", "lmo", "lrc", "ltg", "ltz", "lug", "lzh", "mai", "mal", "map-bms", "mar", "mdf", "mhr", "min", "mkd", "mlg", "mlt", "mon", "mri", "mrj", "msa", "mwl", "mya", "myv", "mzn", "nan", "nap", "nav", "nci", "nds", "nds-nl", "nep", "new", "nld", "nno", "nob", "nrm", "nso", "oci", "olo", "ori", "orm", "oss", "pag", "pam", "pan", "pap", "pcd", "pdc", "pfl", "pnb", "pol", "por", "pus", "que", "roa-tara", "roh", "ron", "rue", "rup", "rus", "sah", "san", "scn", "sco", "sgs", "sin", "slk", "slv", "sme", "sna", "snd", "som", "spa", "sqi", "srd", "srn", "srp", "stq", "sun", "swa", "swe", "szl", "tam", "tat", "tcy", "tel", "tet", "tgk", "tgl", "tha", "ton", "tsn", "tuk", "tur", "tyv", "udm", "uig", "ukr", "urd", "uzb", "vec", "vep", "vie", "vls", "vol", "vro", "war", "wln", "wol", "wuu", "xho", "xmf", "yid", "yor", "zea", "zh-yue", "zho"]}], "builder_name": "wili_2018", "config_name": "WiLI-2018 dataset", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 65408201, "num_examples": 117500, "dataset_name": "wili_2018"}, "test": {"name": "test", "num_bytes": 66491260, "num_examples": 117500, "dataset_name": "wili_2018"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1ZzlIQvw1KNBG97QQCfdatvVrrbeLaM1u": {"num_bytes": 64716393, "checksum": "895b3892a1edba1702b0f2117b756204ccc177a1c285420234bdb5d717ad4100"}, "https://drive.google.com/uc?export=download&id=1Xx4kFc1Xdzz8AhDasxZ0cSa-a35EQSDZ": {"num_bytes": 65799958, "checksum": "663f32b6f7d8a26b83e251803d386f29dcd558762125f4f8289f2cef067d4ce8"}}, "download_size": 130516351, "post_processing_size": null, "dataset_size": 131899461, "size_in_bytes": 262415812}}
 
 
wili_2018.py DELETED
@@ -1,334 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- """WiLI-2018, the Wikipedia language identification benchmark dataset"""
17
-
18
-
19
- import datasets
20
- from datasets.tasks import TextClassification
21
-
22
-
23
- _CITATION = """\
24
- @dataset{thoma_martin_2018_841984,
25
- author = {Thoma, Martin},
26
- title = {{WiLI-2018 - Wikipedia Language Identification database}},
27
- month = jan,
28
- year = 2018,
29
- publisher = {Zenodo},
30
- version = {1.0.0},
31
- doi = {10.5281/zenodo.841984},
32
- url = {https://doi.org/10.5281/zenodo.841984}
33
- }
34
- """
35
-
36
- _DESCRIPTION = """\
37
- It is a benchmark dataset for language identification and contains 235000 paragraphs of 235 languages
38
- """
39
-
40
- # TODO: Add a link to an official homepage for the dataset here
41
- _HOMEPAGE = "https://zenodo.org/record/841984"
42
-
43
- # TODO: Add the licence for the dataset here if you can find it
44
- _LICENSE = "ODC Open Database License v1.0"
45
-
46
-
47
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
48
- _TRAIN_DOWNLOAD_URL = "https://drive.google.com/uc?export=download&id=1ZzlIQvw1KNBG97QQCfdatvVrrbeLaM1u"
49
- _TEST_DOWNLOAD_URL = "https://drive.google.com/uc?export=download&id=1Xx4kFc1Xdzz8AhDasxZ0cSa-a35EQSDZ"
50
-
51
- _CLASSES = [
52
- "cdo",
53
- "glk",
54
- "jam",
55
- "lug",
56
- "san",
57
- "rue",
58
- "wol",
59
- "new",
60
- "mwl",
61
- "bre",
62
- "ara",
63
- "hye",
64
- "xmf",
65
- "ext",
66
- "cor",
67
- "yor",
68
- "div",
69
- "asm",
70
- "lat",
71
- "cym",
72
- "hif",
73
- "ace",
74
- "kbd",
75
- "tgk",
76
- "rus",
77
- "nso",
78
- "mya",
79
- "msa",
80
- "ava",
81
- "cbk",
82
- "urd",
83
- "deu",
84
- "swa",
85
- "pus",
86
- "bxr",
87
- "udm",
88
- "csb",
89
- "yid",
90
- "vro",
91
- "por",
92
- "pdc",
93
- "eng",
94
- "tha",
95
- "hat",
96
- "lmo",
97
- "pag",
98
- "jav",
99
- "chv",
100
- "nan",
101
- "sco",
102
- "kat",
103
- "bho",
104
- "bos",
105
- "kok",
106
- "oss",
107
- "mri",
108
- "fry",
109
- "cat",
110
- "azb",
111
- "kin",
112
- "hin",
113
- "sna",
114
- "dan",
115
- "egl",
116
- "mkd",
117
- "ron",
118
- "bul",
119
- "hrv",
120
- "som",
121
- "pam",
122
- "nav",
123
- "ksh",
124
- "nci",
125
- "khm",
126
- "sgs",
127
- "srn",
128
- "bar",
129
- "cos",
130
- "ckb",
131
- "pfl",
132
- "arz",
133
- "roa-tara",
134
- "fra",
135
- "mai",
136
- "zh-yue",
137
- "guj",
138
- "fin",
139
- "kir",
140
- "vol",
141
- "hau",
142
- "afr",
143
- "uig",
144
- "lao",
145
- "swe",
146
- "slv",
147
- "kor",
148
- "szl",
149
- "srp",
150
- "dty",
151
- "nrm",
152
- "dsb",
153
- "ind",
154
- "wln",
155
- "pnb",
156
- "ukr",
157
- "bpy",
158
- "vie",
159
- "tur",
160
- "aym",
161
- "lit",
162
- "zea",
163
- "pol",
164
- "est",
165
- "scn",
166
- "vls",
167
- "stq",
168
- "gag",
169
- "grn",
170
- "kaz",
171
- "ben",
172
- "pcd",
173
- "bjn",
174
- "krc",
175
- "amh",
176
- "diq",
177
- "ltz",
178
- "ita",
179
- "kab",
180
- "bel",
181
- "ang",
182
- "mhr",
183
- "che",
184
- "koi",
185
- "glv",
186
- "ido",
187
- "fao",
188
- "bak",
189
- "isl",
190
- "bcl",
191
- "tet",
192
- "jpn",
193
- "kur",
194
- "map-bms",
195
- "tyv",
196
- "olo",
197
- "arg",
198
- "ori",
199
- "lim",
200
- "tel",
201
- "lin",
202
- "roh",
203
- "sqi",
204
- "xho",
205
- "mlg",
206
- "fas",
207
- "hbs",
208
- "tam",
209
- "aze",
210
- "lad",
211
- "nob",
212
- "sin",
213
- "gla",
214
- "nap",
215
- "snd",
216
- "ast",
217
- "mal",
218
- "mdf",
219
- "tsn",
220
- "nds",
221
- "tgl",
222
- "nno",
223
- "sun",
224
- "lzh",
225
- "jbo",
226
- "crh",
227
- "pap",
228
- "oci",
229
- "hak",
230
- "uzb",
231
- "zho",
232
- "hsb",
233
- "sme",
234
- "mlt",
235
- "vep",
236
- "lez",
237
- "nld",
238
- "nds-nl",
239
- "mrj",
240
- "spa",
241
- "ceb",
242
- "ina",
243
- "heb",
244
- "hun",
245
- "que",
246
- "kaa",
247
- "mar",
248
- "vec",
249
- "frp",
250
- "ell",
251
- "sah",
252
- "eus",
253
- "ces",
254
- "slk",
255
- "chr",
256
- "lij",
257
- "nep",
258
- "srd",
259
- "ilo",
260
- "be-tarask",
261
- "bod",
262
- "orm",
263
- "war",
264
- "glg",
265
- "mon",
266
- "gle",
267
- "min",
268
- "ibo",
269
- "ile",
270
- "epo",
271
- "lav",
272
- "lrc",
273
- "als",
274
- "mzn",
275
- "rup",
276
- "fur",
277
- "tat",
278
- "myv",
279
- "pan",
280
- "ton",
281
- "kom",
282
- "wuu",
283
- "tcy",
284
- "tuk",
285
- "kan",
286
- "ltg",
287
- ]
288
-
289
-
290
- class Wili_2018(datasets.GeneratorBasedBuilder):
291
- """WiLI Language Identification Dataset"""
292
-
293
- VERSION = datasets.Version("1.1.0")
294
-
295
- BUILDER_CONFIGS = [
296
- datasets.BuilderConfig(
297
- name="WiLI-2018 dataset",
298
- version=VERSION,
299
- description="Plain text of import of WiLI-2018",
300
- )
301
- ]
302
-
303
- def _info(self):
304
-
305
- return datasets.DatasetInfo(
306
- # This is the description that will appear on the datasets page.
307
- description=_DESCRIPTION,
308
- # This defines the different columns of the dataset and their types
309
- features=datasets.Features(
310
- {"sentence": datasets.Value("string"), "label": datasets.features.ClassLabel(names=_CLASSES)}
311
- ),
312
- supervised_keys=None,
313
- homepage=_HOMEPAGE,
314
- license=_LICENSE,
315
- citation=_CITATION,
316
- task_templates=[TextClassification(text_column="sentence", label_column="label")],
317
- )
318
-
319
- def _split_generators(self, dl_manager):
320
- train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
321
- test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
322
- return [
323
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
324
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
325
- ]
326
-
327
- def _generate_examples(self, filepath):
328
-
329
- with open(filepath, encoding="utf-8") as f:
330
- for id_, line in enumerate(f):
331
- text, label = line.rsplit(",", 1)
332
- text = text.strip('"')
333
- label = int(label.strip())
334
- yield id_, {"sentence": text, "label": label - 1}