Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
License:
Davlan commited on
Commit
736ccd1
·
1 Parent(s): 7a843d4

Delete masakhaner2.0.py

Browse files
Files changed (1) hide show
  1. masakhaner2.0.py +0 -186
masakhaner2.0.py DELETED
@@ -1,186 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """MasakhaNER: Named Entity Recognition for African Languages"""
18
-
19
- import datasets
20
-
21
-
22
- logger = datasets.logging.get_logger(__name__)
23
-
24
-
25
- _CITATION = """\
26
- @article{Adelani2022MasakhaNER2A,
27
- title={MasakhaNER 2.0: Africa-centric Transfer Learning for Named Entity Recognition},
28
- author={David Ifeoluwa Adelani and Graham Neubig and Sebastian Ruder and Shruti Rijhwani and Michael Beukman and Chester Palen-Michel and Constantine Lignos and Jesujoba Oluwadara Alabi and Shamsuddeen Hassan Muhammad and Peter Nabende and Cheikh M. Bamba Dione and Andiswa Bukula and Rooweither Mabuya and Bonaventure F. P. Dossou and Blessing K. Sibanda and Happy Buzaaba and Jonathan Mukiibi and Godson Kalipe and Derguene Mbaye and Amelia Taylor and Fatoumata Kabore and Chris C. Emezue and Anuoluwapo Aremu and Perez Ogayo and Catherine W. Gitau and Edwin Munkoh-Buabeng and Victoire Memdjokam Koagne and Allahsera Auguste Tapo and Tebogo Macucwa and Vukosi Marivate and Elvis Mboning and Tajuddeen R. Gwadabe and Tosin P. Adewumi and Orevaoghene Ahia and Joyce Nakatumba-Nabende and Neo L. Mokono and Ignatius M Ezeani and Chiamaka Ijeoma Chukwuneke and Mofetoluwa Adeyemi and Gilles Hacheme and Idris Abdulmumin and Odunayo Ogundepo and Oreen Yousuf and Tatiana Moteu Ngoli and Dietrich Klakow},
29
- journal={ArXiv},
30
- year={2022},
31
- volume={abs/2210.12391}
32
- }
33
- """
34
-
35
- _DESCRIPTION = """\
36
- MasakhaNER 2.0 is the largest publicly available high-quality dataset for named entity recognition (NER) in 20 African languages.
37
-
38
- Named entities are phrases that contain the names of persons, organizations, locations, times and quantities.
39
-
40
- Example:
41
- [PER Wolff] , currently a journalist in [LOC Argentina] , played with [PER Del Bosque] in the final years of the seventies in [ORG Real Madrid] .
42
- MasakhaNER is a named entity dataset consisting of PER, ORG, LOC, and DATE entities annotated by Masakhane for 20 African languages:
43
- - Bambara (bam)
44
- - Ghomala (bbj)
45
- - Ewe (ewe)
46
- - Fon (fon)
47
- - Hausa (hau)
48
- - Igbo (ibo)
49
- - Kinyarwanda (kin)
50
- - Luganda (lug)
51
- - Dholuo (luo)
52
- - Mossi (mos)
53
- - Chichewa (nya)
54
- - Nigerian Pidgin
55
- - chShona (sna)
56
- - Kiswahili (swą)
57
- - Setswana (tsn)
58
- - Twi (twi)
59
- - Wolof (wol)
60
- - isiXhosa (xho)
61
- - Yorùbá (yor)
62
- - isiZulu (zul)
63
-
64
- The train/validation/test sets are available for all the ten languages.
65
-
66
- For more details see https://arxiv.org/abs/2103.11811
67
- """
68
-
69
- _URL = "https://github.com/masakhane-io/masakhane-ner/raw/main/MasakhaNER2.0/data/"
70
- _TRAINING_FILE = "train.txt"
71
- _DEV_FILE = "dev.txt"
72
- _TEST_FILE = "test.txt"
73
-
74
-
75
- class MasakhanerConfig(datasets.BuilderConfig):
76
- """BuilderConfig for Masakhaner"""
77
-
78
- def __init__(self, **kwargs):
79
- """BuilderConfig for Masakhaner.
80
-
81
- Args:
82
- **kwargs: keyword arguments forwarded to super.
83
- """
84
- super(MasakhanerConfig, self).__init__(**kwargs)
85
-
86
-
87
- class Masakhaner(datasets.GeneratorBasedBuilder):
88
- """Masakhaner dataset."""
89
-
90
- BUILDER_CONFIGS = [
91
- MasakhanerConfig(name="bam", version=datasets.Version("1.0.0"), description="Masakhaner Bambara dataset"),
92
- MasakhanerConfig(name="bbj", version=datasets.Version("1.0.0"), description="Masakhaner Ghomala dataset"),
93
- MasakhanerConfig(name="ewe", version=datasets.Version("1.0.0"), description="Masakhaner Ewe dataset"),
94
- MasakhanerConfig(name="fon", version=datasets.Version("1.0.0"), description="Masakhaner Fon dataset"),
95
- MasakhanerConfig(name="hau", version=datasets.Version("1.0.0"), description="Masakhaner Hausa dataset"),
96
- MasakhanerConfig(name="ibo", version=datasets.Version("1.0.0"), description="Masakhaner Igbo dataset"),
97
- MasakhanerConfig(name="kin", version=datasets.Version("1.0.0"), description="Masakhaner Kinyarwanda dataset"),
98
- MasakhanerConfig(name="lug", version=datasets.Version("1.0.0"), description="Masakhaner Luganda dataset"),
99
- MasakhanerConfig(name="mos", version=datasets.Version("1.0.0"), description="Masakhaner Mossi dataset"),
100
- MasakhanerConfig(name="nya", version=datasets.Version("1.0.0"), description="Masakhaner Chichewa` dataset"),
101
- MasakhanerConfig(
102
- name="pcm", version=datasets.Version("1.0.0"), description="Masakhaner Nigerian-Pidgin dataset"
103
- ),
104
- MasakhanerConfig(name="sna", version=datasets.Version("1.0.0"), description="Masakhaner Shona dataset"),
105
- MasakhanerConfig(name="swa", version=datasets.Version("1.0.0"), description="Masakhaner Swahili dataset"),
106
- MasakhanerConfig(name="tsn", version=datasets.Version("1.0.0"), description="Masakhaner Setswana dataset"),
107
- MasakhanerConfig(name="twi", version=datasets.Version("1.0.0"), description="Masakhaner Twi dataset"),
108
- MasakhanerConfig(name="wol", version=datasets.Version("1.0.0"), description="Masakhaner Wolof dataset"),
109
- MasakhanerConfig(name="xho", version=datasets.Version("1.0.0"), description="Masakhaner Xhosa dataset"),
110
- MasakhanerConfig(name="yor", version=datasets.Version("1.0.0"), description="Masakhaner Yoruba dataset"),
111
- MasakhanerConfig(name="zul", version=datasets.Version("1.0.0"), description="Masakhaner Zulu dataset"),
112
- ]
113
-
114
- def _info(self):
115
- return datasets.DatasetInfo(
116
- description=_DESCRIPTION,
117
- features=datasets.Features(
118
- {
119
- "id": datasets.Value("string"),
120
- "tokens": datasets.Sequence(datasets.Value("string")),
121
- "ner_tags": datasets.Sequence(
122
- datasets.features.ClassLabel(
123
- names=[
124
- "O",
125
- "B-PER",
126
- "I-PER",
127
- "B-ORG",
128
- "I-ORG",
129
- "B-LOC",
130
- "I-LOC",
131
- "B-DATE",
132
- "I-DATE",
133
- ]
134
- )
135
- ),
136
- }
137
- ),
138
- supervised_keys=None,
139
- homepage="https://arxiv.org/abs/2210.12391",
140
- citation=_CITATION,
141
- )
142
-
143
- def _split_generators(self, dl_manager):
144
- """Returns SplitGenerators."""
145
- urls_to_download = {
146
- "train": f"{_URL}{self.config.name}/{_TRAINING_FILE}",
147
- "dev": f"{_URL}{self.config.name}/{_DEV_FILE}",
148
- "test": f"{_URL}{self.config.name}/{_TEST_FILE}",
149
- }
150
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
151
-
152
- return [
153
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
154
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
155
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
156
- ]
157
-
158
- def _generate_examples(self, filepath):
159
- logger.info("⏳ Generating examples from = %s", filepath)
160
- with open(filepath, encoding="utf-8") as f:
161
- guid = 0
162
- tokens = []
163
- ner_tags = []
164
- for line in f:
165
- if line == "" or line == "\n":
166
- if tokens:
167
- yield guid, {
168
- "id": str(guid),
169
- "tokens": tokens,
170
- "ner_tags": ner_tags,
171
- }
172
- guid += 1
173
- tokens = []
174
- ner_tags = []
175
- else:
176
- # Masakhaner tokens are space separated
177
- splits = line.split(" ")
178
- tokens.append(splits[0])
179
- ner_tags.append(splits[1].rstrip())
180
- # last example
181
- if tokens:
182
- yield guid, {
183
- "id": str(guid),
184
- "tokens": tokens,
185
- "ner_tags": ner_tags,
186
- }