qanastek commited on
Commit
51d3529
1 Parent(s): 00d66fb

Create MANTRAGSC.py

Browse files
Files changed (1) hide show
  1. MANTRAGSC.py +284 -0
MANTRAGSC.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # pip install xmltodict
17
+
18
+ import random
19
+ from pathlib import Path
20
+ from itertools import product
21
+ from dataclasses import dataclass
22
+ from typing import Dict, List, Tuple
23
+
24
+ import xmltodict
25
+ import numpy as np
26
+
27
+ import datasets
28
+
29
+ _CITATION = """\
30
+ @article{10.1093/jamia/ocv037,
31
+ author = {Kors, Jan A and Clematide, Simon and Akhondi,
32
+ Saber A and van Mulligen, Erik M and Rebholz-Schuhmann, Dietrich},
33
+ title = "{A multilingual gold-standard corpus for biomedical concept recognition: the Mantra GSC}",
34
+ journal = {Journal of the American Medical Informatics Association},
35
+ volume = {22},
36
+ number = {5},
37
+ pages = {948-956},
38
+ year = {2015},
39
+ month = {05},
40
+ abstract = "{Objective To create a multilingual gold-standard corpus for biomedical concept recognition.Materials
41
+ and methods We selected text units from different parallel corpora (Medline abstract titles, drug labels,
42
+ biomedical patent claims) in English, French, German, Spanish, and Dutch. Three annotators per language
43
+ independently annotated the biomedical concepts, based on a subset of the Unified Medical Language System and
44
+ covering a wide range of semantic groups. To reduce the annotation workload, automatically generated
45
+ preannotations were provided. Individual annotations were automatically harmonized and then adjudicated, and
46
+ cross-language consistency checks were carried out to arrive at the final annotations.Results The number of final
47
+ annotations was 5530. Inter-annotator agreement scores indicate good agreement (median F-score 0.79), and are
48
+ similar to those between individual annotators and the gold standard. The automatically generated harmonized
49
+ annotation set for each language performed equally well as the best annotator for that language.Discussion The use
50
+ of automatic preannotations, harmonized annotations, and parallel corpora helped to keep the manual annotation
51
+ efforts manageable. The inter-annotator agreement scores provide a reference standard for gauging the performance
52
+ of automatic annotation techniques.Conclusion To our knowledge, this is the first gold-standard corpus for
53
+ biomedical concept recognition in languages other than English. Other distinguishing features are the wide variety
54
+ of semantic groups that are being covered, and the diversity of text genres that were annotated.}",
55
+ issn = {1067-5027},
56
+ doi = {10.1093/jamia/ocv037},
57
+ url = {https://doi.org/10.1093/jamia/ocv037},
58
+ eprint = {https://academic.oup.com/jamia/article-pdf/22/5/948/34146393/ocv037.pdf},
59
+ }
60
+ """
61
+
62
+ _DESCRIPTION = """\
63
+ We selected text units from different parallel corpora (Medline abstract titles, drug labels, biomedical patent claims)
64
+ in English, French, German, Spanish, and Dutch. Three annotators per language independently annotated the biomedical
65
+ concepts, based on a subset of the Unified Medical Language System and covering a wide range of semantic groups.
66
+ """
67
+
68
+ _HOMEPAGE = "https://biosemantics.erasmusmc.nl/index.php/resources/mantra-gsc"
69
+
70
+ _LICENSE = "CC_BY_4p0"
71
+
72
+ _URL = "https://files.ifi.uzh.ch/cl/mantra/gsc/GSC-v1.1.zip"
73
+
74
+ _LANGUAGES_2 = {
75
+ "es": "Spanish",
76
+ "fr": "French",
77
+ "de": "German",
78
+ "nl": "Dutch",
79
+ "en": "English",
80
+ }
81
+
82
+ _DATASET_TYPES = {
83
+ "emea": "EMEA",
84
+ "medline": "Medline",
85
+ "patents": "Patent",
86
+ }
87
+
88
+ @dataclass
89
+ class DrBenchmarkConfig(datasets.BuilderConfig):
90
+ name: str = None
91
+ version: datasets.Version = None
92
+ description: str = None
93
+ schema: str = None
94
+ subset_id: str = None
95
+
96
+ class MANTRAGSC(datasets.GeneratorBasedBuilder):
97
+
98
+ SOURCE_VERSION = datasets.Version("1.0.0")
99
+
100
+ BUILDER_CONFIGS = []
101
+
102
+ for language, dataset_type in product(_LANGUAGES_2, _DATASET_TYPES):
103
+
104
+ if dataset_type == "patents" and language in ["nl", "es"]:
105
+ continue
106
+
107
+ BUILDER_CONFIGS.append(
108
+ DrBenchmarkConfig(
109
+ name=f"{language}_{dataset_type}",
110
+ version=SOURCE_VERSION,
111
+ description=f"Mantra GSC {_LANGUAGES_2[language]} {_DATASET_TYPES[dataset_type]} source schema",
112
+ schema="source",
113
+ subset_id=f"{language}_{_DATASET_TYPES[dataset_type]}",
114
+ )
115
+ )
116
+
117
+ DEFAULT_CONFIG_NAME = "fr_medline"
118
+
119
+ def _info(self):
120
+
121
+ if self.config.name.find("emea") != -1:
122
+ names = ['B-ANAT', 'I-ANAT', 'I-PHEN', 'B-PROC', 'I-CHEM', 'I-PHYS', 'B-DEVI', 'O', 'B-PHYS', 'I-DEVI', 'B-OBJC', 'I-DISO', 'B-PHEN', 'I-LIVB', 'B-DISO', 'B-LIVB', 'B-CHEM', 'I-PROC']
123
+ elif self.config.name.find("medline") != -1:
124
+ names = ['B-ANAT', 'I-ANAT', 'B-PROC', 'I-CHEM', 'I-PHYS', 'B-GEOG', 'B-DEVI', 'O', 'B-PHYS', 'I-LIVB', 'B-OBJC', 'I-DISO', 'I-DEVI', 'B-PHEN', 'B-DISO', 'B-LIVB', 'B-CHEM', 'I-PROC']
125
+ elif self.config.name.find("patents") != -1:
126
+ names = ['B-ANAT', 'I-ANAT', 'B-PROC', 'I-CHEM', 'I-PHYS', 'B-DEVI', 'O', 'I-LIVB', 'B-OBJC', 'I-DISO', 'B-PHEN', 'I-PROC', 'B-DISO', 'I-DEVI', 'B-LIVB', 'B-CHEM', 'B-PHYS']
127
+
128
+ features = datasets.Features(
129
+ {
130
+ "id": datasets.Value("string"),
131
+ "tokens": [datasets.Value("string")],
132
+ "ner_tags": datasets.Sequence(
133
+ datasets.features.ClassLabel(
134
+ names = names,
135
+ )
136
+ ),
137
+ }
138
+ )
139
+
140
+ return datasets.DatasetInfo(
141
+ description=_DESCRIPTION,
142
+ features=features,
143
+ homepage=_HOMEPAGE,
144
+ license=str(_LICENSE),
145
+ citation=_CITATION,
146
+ )
147
+
148
+ def _split_generators(self, dl_manager):
149
+
150
+ language, dataset_type = self.config.name.split("_")
151
+
152
+ data_dir = dl_manager.download_and_extract(_URL)
153
+ data_dir = Path(data_dir) / "GSC-v1.1" / f"{_DATASET_TYPES[dataset_type]}_GSC_{language}_man.xml"
154
+
155
+ return [
156
+ datasets.SplitGenerator(
157
+ name=datasets.Split.TRAIN,
158
+ gen_kwargs={
159
+ "data_dir": data_dir,
160
+ "split": "train",
161
+ },
162
+ ),
163
+ datasets.SplitGenerator(
164
+ name=datasets.Split.VALIDATION,
165
+ gen_kwargs={
166
+ "data_dir": data_dir,
167
+ "split": "validation",
168
+ },
169
+ ),
170
+ datasets.SplitGenerator(
171
+ name=datasets.Split.TEST,
172
+ gen_kwargs={
173
+ "data_dir": data_dir,
174
+ "split": "test",
175
+ },
176
+ ),
177
+ ]
178
+
179
+ def _generate_examples(self, data_dir, split):
180
+
181
+ with open(data_dir) as fd:
182
+ doc = xmltodict.parse(fd.read())
183
+
184
+ all_res = []
185
+
186
+ for d in doc["Corpus"]["document"]:
187
+
188
+ if type(d["unit"]) != type(list()):
189
+ d["unit"] = [d["unit"]]
190
+
191
+ for u in d["unit"]:
192
+
193
+ text = u["text"]
194
+
195
+ if "e" in u.keys():
196
+
197
+ if type(u["e"]) != type(list()):
198
+ u["e"] = [u["e"]]
199
+
200
+ tags = [{
201
+ "label": current["@grp"].upper(),
202
+ "offset_start": int(current["@offset"]),
203
+ "offset_end": int(current["@offset"]) + int(current["@len"]),
204
+ } for current in u["e"]]
205
+
206
+ else:
207
+ tags = []
208
+
209
+ _tokens = text.split(" ")
210
+ tokens = []
211
+ for i, t in enumerate(_tokens):
212
+
213
+ concat = " ".join(_tokens[0:i+1])
214
+
215
+ offset_start = len(concat) - len(t)
216
+ offset_end = len(concat)
217
+
218
+ tokens.append({
219
+ "token": t,
220
+ "offset_start": offset_start,
221
+ "offset_end": offset_end,
222
+ })
223
+
224
+ ner_tags = [["O", 0] for o in tokens]
225
+
226
+ for tag in tags:
227
+
228
+ cpt = 0
229
+
230
+ for idx, token in enumerate(tokens):
231
+
232
+ rtok = range(token["offset_start"], token["offset_end"]+1)
233
+ rtag = range(tag["offset_start"], tag["offset_end"]+1)
234
+
235
+ # Check if the ranges are overlapping
236
+ if bool(set(rtok) & set(rtag)):
237
+
238
+ # if ner_tags[idx] != "O" and ner_tags[idx] != tag['label']:
239
+ # print(f"{token} - currently: {ner_tags[idx]} - after: {tag['label']}")
240
+
241
+ if ner_tags[idx][0] == "O":
242
+ cpt += 1
243
+ ner_tags[idx][0] = tag["label"]
244
+ ner_tags[idx][1] = cpt
245
+
246
+ for i in range(len(ner_tags)):
247
+
248
+ tag = ner_tags[i][0]
249
+
250
+ if tag == "O":
251
+ continue
252
+ elif tag != "O" and ner_tags[i][1] == 1:
253
+ ner_tags[i][0] = "B-" + tag
254
+ elif tag != "O" and ner_tags[i][1] != 1:
255
+ ner_tags[i][0] = "I-" + tag
256
+
257
+ obj = {
258
+ "id": u["@id"],
259
+ "tokens": [t["token"] for t in tokens],
260
+ "ner_tags": [n[0] for n in ner_tags],
261
+ }
262
+
263
+ all_res.append(obj)
264
+
265
+ ids = [r["id"] for r in all_res]
266
+
267
+ random.seed(4)
268
+ random.shuffle(ids)
269
+ random.shuffle(ids)
270
+ random.shuffle(ids)
271
+
272
+ train, validation, test = np.split(ids, [int(len(ids)*0.70), int(len(ids)*0.80)])
273
+
274
+ if split == "train":
275
+ allowed_ids = list(train)
276
+ elif split == "validation":
277
+ allowed_ids = list(validation)
278
+ elif split == "test":
279
+ allowed_ids = list(test)
280
+
281
+ for r in all_res:
282
+ identifier = r["id"]
283
+ if identifier in allowed_ids:
284
+ yield identifier, r