NazaGara commited on
Commit
20b3e87
·
1 Parent(s): 577d465

Upload loading file

Browse files
Files changed (1) hide show
  1. wikiner.py +124 -0
wikiner.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import logging
17
+
18
+ import datasets
19
+
20
+
21
+ _CITATION = """/something"""
22
+
23
+ _DESCRIPTION = """Dataset used to train a NER model"""
24
+
25
+ _URL = "https://github.com/NazaGara/betoNER/tree/main/data/wikiner"
26
+ _TRAINING_FILE = "train.conllu"
27
+
28
+
29
+ class ConllppConfig(datasets.BuilderConfig):
30
+ """BuilderConfig"""
31
+
32
+ def __init__(self, **kwargs):
33
+ """BuilderConfig
34
+ Args:
35
+ **kwargs: keyword arguments forwarded to super.
36
+ """
37
+ super(ConllppConfig, self).__init__(**kwargs)
38
+
39
+
40
+ class Conllpp(datasets.GeneratorBasedBuilder):
41
+ """Conllpp dataset."""
42
+
43
+ BUILDER_CONFIGS = [
44
+ ConllppConfig(
45
+ name="wikiner",
46
+ version=datasets.Version("1.0.0"),
47
+ description="wikiner dataset",
48
+ ),
49
+ ]
50
+
51
+ def _info(self):
52
+ return datasets.DatasetInfo(
53
+ description=_DESCRIPTION,
54
+ features=datasets.Features(
55
+ {
56
+ "id": datasets.Value("string"),
57
+ "tokens": datasets.Sequence(datasets.Value("string")),
58
+ "ner_tags": datasets.Sequence(
59
+ datasets.features.ClassLabel(
60
+ names=[
61
+ "O",
62
+ "B-PER",
63
+ "I-PER",
64
+ "B-ORG",
65
+ "I-ORG",
66
+ "B-LOC",
67
+ "I-LOC",
68
+ "B-MISC",
69
+ "I-MISC",
70
+ ]
71
+ )
72
+ ),
73
+ }
74
+ ),
75
+ supervised_keys=None,
76
+ homepage=_URL,
77
+ citation=_CITATION,
78
+ )
79
+
80
+ def _split_generators(self, dl_manager):
81
+ """Returns SplitGenerators."""
82
+ urls_to_download = {
83
+ "train": f"{_URL}{_TRAINING_FILE}",
84
+ }
85
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
86
+
87
+ return [
88
+ datasets.SplitGenerator(
89
+ name=datasets.Split.TRAIN,
90
+ gen_kwargs={"filepath": downloaded_files["train"]},
91
+ ),
92
+ ]
93
+
94
+ def _generate_examples(self, filepath):
95
+ logging.info("⏳ Generating examples from = %s", filepath)
96
+ with open(filepath, encoding="utf-8") as f: # CHECK ENCODING
97
+ guid = 0
98
+ tokens = []
99
+ ner_tags = []
100
+ for line in f:
101
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
102
+ if tokens:
103
+ yield guid, {
104
+ "id": str(guid),
105
+ "tokens": tokens,
106
+ "ner_tags": ner_tags,
107
+ }
108
+ guid += 1
109
+ tokens = []
110
+ ner_tags = []
111
+ else:
112
+ # conll_like tokens are space separated
113
+ splits = line.split(" ")
114
+ tokens.append(splits[0])
115
+ ner_tags.append(
116
+ splits[1].rstrip()
117
+ ) # ner_tags.append(splits[3].rstrip())
118
+ # last example
119
+ if tokens:
120
+ yield guid, {
121
+ "id": str(guid),
122
+ "tokens": tokens,
123
+ "ner_tags": ner_tags,
124
+ }