smeoni commited on
Commit
fd17def
·
1 Parent(s): 1424af7

Upload 3 files

Browse files

new version of e3c using bigbio format

Files changed (3) hide show
  1. README.md +122 -79
  2. bigbiohub.py +587 -0
  3. e3c.py +247 -375
README.md CHANGED
@@ -1,86 +1,128 @@
1
  ---
2
  dataset_info:
3
  features:
4
- - name: text
5
- dtype: string
6
- - name: tokens
7
- sequence: string
8
- - name: tokens_offsets
9
- sequence:
10
- sequence: int32
11
- - name: clinical_entity_tags
12
- sequence:
13
- class_label:
14
- names:
15
- '0': O
16
- '1': B-CLINENTITY
17
- '2': I-CLINENTITY
18
- - name: clinical_entity_cuid
19
- sequence: string
20
- - name: temporal_information_tags
21
- sequence:
22
- class_label:
23
- names:
24
- '0': O
25
- '1': B-EVENT
26
- '2': B-ACTOR
27
- '3': B-BODYPART
28
- '4': B-TIMEX3
29
- '5': B-RML
30
- '6': I-EVENT
31
- '7': I-ACTOR
32
- '8': I-BODYPART
33
- '9': I-TIMEX3
34
- '10': I-RML
35
- config_name: e3c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  splits:
37
- - name: en.layer1
38
- num_bytes: 1632165
39
- num_examples: 1520
40
- - name: en.layer2
41
- num_bytes: 3263885
42
- num_examples: 2873
43
- - name: en.layer2.validation
44
- num_bytes: 371196
45
- num_examples: 334
46
- - name: es.layer1
47
- num_bytes: 1599169
48
- num_examples: 1134
49
- - name: es.layer2
50
- num_bytes: 3192361
51
- num_examples: 2347
52
- - name: es.layer2.validation
53
- num_bytes: 352193
54
- num_examples: 261
55
- - name: eu.layer1
56
- num_bytes: 1931109
57
- num_examples: 3126
58
- - name: eu.layer2
59
- num_bytes: 1066405
60
- num_examples: 1594
61
- - name: eu.layer2.validation
62
- num_bytes: 279306
63
- num_examples: 468
64
- - name: fr.layer1
65
- num_bytes: 1610663
66
- num_examples: 1109
67
- - name: fr.layer2
68
- num_bytes: 3358033
69
- num_examples: 2389
70
- - name: fr.layer2.validation
71
- num_bytes: 361816
72
- num_examples: 293
73
- - name: it.layer1
74
- num_bytes: 1633613
75
- num_examples: 1146
76
- - name: it.layer2
77
- num_bytes: 3373977
78
- num_examples: 2436
79
- - name: it.layer2.validation
80
- num_bytes: 366932
81
- num_examples: 275
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  download_size: 230213492
83
- dataset_size: 24392823
84
  ---
85
 
86
  # Dataset Card for E3C
@@ -88,6 +130,7 @@ dataset_info:
88
  ## Dataset Description
89
 
90
  - **Homepage:** https://github.com/hltfbk/E3C-Corpus
 
91
  - **Public:** True
92
  - **Tasks:** NER,RE
93
 
@@ -109,4 +152,4 @@ information about clinical entities based on medical taxonomies, to be used for
109
  url = {https://uts.nlm.nih.gov/uts/umls/home},
110
  year = {2021},
111
  }
112
- ```
 
1
  ---
2
  dataset_info:
3
  features:
4
+ - name: id
5
+ dtype: string
6
+ - name: document_id
7
+ dtype: int32
8
+ - name: text
9
+ dtype: string
10
+ - name: passages
11
+ list:
12
+ - name: id
13
+ dtype: string
14
+ - name: text
15
+ dtype: string
16
+ - name: offsets
17
+ list: int32
18
+ - name: entities
19
+ list:
20
+ - name: id
21
+ dtype: string
22
+ - name: type
23
+ dtype: string
24
+ - name: text
25
+ dtype: string
26
+ - name: offsets
27
+ list: int32
28
+ - name: semantic_type_id
29
+ dtype: string
30
+ - name: role
31
+ dtype: string
32
+ - name: relations
33
+ list:
34
+ - name: id
35
+ dtype: string
36
+ - name: type
37
+ dtype: string
38
+ - name: contextualAspect
39
+ dtype: string
40
+ - name: contextualModality
41
+ dtype: string
42
+ - name: degree
43
+ dtype: string
44
+ - name: docTimeRel
45
+ dtype: string
46
+ - name: eventType
47
+ dtype: string
48
+ - name: permanence
49
+ dtype: string
50
+ - name: polarity
51
+ dtype: string
52
+ - name: functionInDocument
53
+ dtype: string
54
+ - name: timex3Class
55
+ dtype: string
56
+ - name: value
57
+ dtype: string
58
+ - name: concept_1
59
+ dtype: string
60
+ - name: concept_2
61
+ dtype: string
62
+ config_name: e3c_source
63
  splits:
64
+ - name: en.layer1
65
+ num_bytes: 1645819
66
+ num_examples: 84
67
+ - name: en.layer2
68
+ num_bytes: 881290
69
+ num_examples: 171
70
+ - name: en.layer2.validation
71
+ num_bytes: 101379
72
+ num_examples: 19
73
+ - name: en.layer3
74
+ num_bytes: 7672589
75
+ num_examples: 9779
76
+ - name: es.layer1
77
+ num_bytes: 1398186
78
+ num_examples: 81
79
+ - name: es.layer2
80
+ num_bytes: 907515
81
+ num_examples: 162
82
+ - name: es.layer2.validation
83
+ num_bytes: 103936
84
+ num_examples: 18
85
+ - name: es.layer3
86
+ num_bytes: 6656630
87
+ num_examples: 1876
88
+ - name: eu.layer1
89
+ num_bytes: 2217479
90
+ num_examples: 90
91
+ - name: eu.layer2
92
+ num_bytes: 306291
93
+ num_examples: 111
94
+ - name: eu.layer2.validation
95
+ num_bytes: 95276
96
+ num_examples: 10
97
+ - name: eu.layer3
98
+ num_bytes: 4656179
99
+ num_examples: 1232
100
+ - name: fr.layer1
101
+ num_bytes: 1474138
102
+ num_examples: 81
103
+ - name: fr.layer2
104
+ num_bytes: 905084
105
+ num_examples: 168
106
+ - name: fr.layer2.validation
107
+ num_bytes: 101701
108
+ num_examples: 18
109
+ - name: fr.layer3
110
+ num_bytes: 457927491
111
+ num_examples: 25740
112
+ - name: it.layer1
113
+ num_bytes: 1036560
114
+ num_examples: 86
115
+ - name: it.layer2
116
+ num_bytes: 888138
117
+ num_examples: 174
118
+ - name: it.layer2.validation
119
+ num_bytes: 99549
120
+ num_examples: 18
121
+ - name: it.layer3
122
+ num_bytes: 86243680
123
+ num_examples: 10213
124
  download_size: 230213492
125
+ dataset_size: 575318910
126
  ---
127
 
128
  # Dataset Card for E3C
 
130
  ## Dataset Description
131
 
132
  - **Homepage:** https://github.com/hltfbk/E3C-Corpus
133
+ - **PubMed** False
134
  - **Public:** True
135
  - **Tasks:** NER,RE
136
 
 
152
  url = {https://uts.nlm.nih.gov/uts/umls/home},
153
  year = {2021},
154
  }
155
+ ```
bigbiohub.py ADDED
@@ -0,0 +1,587 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+ # flake8: noqa
3
+ import logging
4
+ from collections import defaultdict
5
+ from dataclasses import dataclass
6
+ from enum import Enum
7
+ from pathlib import Path
8
+ from types import SimpleNamespace
9
+ from typing import TYPE_CHECKING, Dict, Iterable, List, Tuple
10
+
11
+ import datasets
12
+
13
+ if TYPE_CHECKING:
14
+ import bioc
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ BigBioValues = SimpleNamespace(NULL="<BB_NULL_STR>")
20
+
21
+
22
+ @dataclass
23
+ class BigBioConfig(datasets.BuilderConfig):
24
+ """BuilderConfig for BigBio."""
25
+
26
+ name: str = None
27
+ version: datasets.Version = None
28
+ description: str = None
29
+ schema: str = None
30
+ subset_id: str = None
31
+
32
+
33
+ class Tasks(Enum):
34
+ NAMED_ENTITY_RECOGNITION = "NER"
35
+ NAMED_ENTITY_DISAMBIGUATION = "NED"
36
+ EVENT_EXTRACTION = "EE"
37
+ RELATION_EXTRACTION = "RE"
38
+ COREFERENCE_RESOLUTION = "COREF"
39
+ QUESTION_ANSWERING = "QA"
40
+ TEXTUAL_ENTAILMENT = "TE"
41
+ SEMANTIC_SIMILARITY = "STS"
42
+ TEXT_PAIRS_CLASSIFICATION = "TXT2CLASS"
43
+ PARAPHRASING = "PARA"
44
+ TRANSLATION = "TRANSL"
45
+ SUMMARIZATION = "SUM"
46
+ TEXT_CLASSIFICATION = "TXTCLASS"
47
+
48
+
49
+ entailment_features = datasets.Features(
50
+ {
51
+ "id": datasets.Value("string"),
52
+ "premise": datasets.Value("string"),
53
+ "hypothesis": datasets.Value("string"),
54
+ "label": datasets.Value("string"),
55
+ }
56
+ )
57
+
58
+ pairs_features = datasets.Features(
59
+ {
60
+ "id": datasets.Value("string"),
61
+ "document_id": datasets.Value("string"),
62
+ "text_1": datasets.Value("string"),
63
+ "text_2": datasets.Value("string"),
64
+ "label": datasets.Value("string"),
65
+ }
66
+ )
67
+
68
+ qa_features = datasets.Features(
69
+ {
70
+ "id": datasets.Value("string"),
71
+ "question_id": datasets.Value("string"),
72
+ "document_id": datasets.Value("string"),
73
+ "question": datasets.Value("string"),
74
+ "type": datasets.Value("string"),
75
+ "choices": [datasets.Value("string")],
76
+ "context": datasets.Value("string"),
77
+ "answer": datasets.Sequence(datasets.Value("string")),
78
+ }
79
+ )
80
+
81
+ text_features = datasets.Features(
82
+ {
83
+ "id": datasets.Value("string"),
84
+ "document_id": datasets.Value("string"),
85
+ "text": datasets.Value("string"),
86
+ "labels": [datasets.Value("string")],
87
+ }
88
+ )
89
+
90
+ text2text_features = datasets.Features(
91
+ {
92
+ "id": datasets.Value("string"),
93
+ "document_id": datasets.Value("string"),
94
+ "text_1": datasets.Value("string"),
95
+ "text_2": datasets.Value("string"),
96
+ "text_1_name": datasets.Value("string"),
97
+ "text_2_name": datasets.Value("string"),
98
+ }
99
+ )
100
+
101
+ kb_features = datasets.Features(
102
+ {
103
+ "id": datasets.Value("string"),
104
+ "document_id": datasets.Value("string"),
105
+ "passages": [
106
+ {
107
+ "id": datasets.Value("string"),
108
+ "type": datasets.Value("string"),
109
+ "text": datasets.Sequence(datasets.Value("string")),
110
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
111
+ }
112
+ ],
113
+ "entities": [
114
+ {
115
+ "id": datasets.Value("string"),
116
+ "type": datasets.Value("string"),
117
+ "text": datasets.Sequence(datasets.Value("string")),
118
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
119
+ "normalized": [
120
+ {
121
+ "db_name": datasets.Value("string"),
122
+ "db_id": datasets.Value("string"),
123
+ }
124
+ ],
125
+ }
126
+ ],
127
+ "events": [
128
+ {
129
+ "id": datasets.Value("string"),
130
+ "type": datasets.Value("string"),
131
+ # refers to the text_bound_annotation of the trigger
132
+ "trigger": {
133
+ "text": datasets.Sequence(datasets.Value("string")),
134
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
135
+ },
136
+ "arguments": [
137
+ {
138
+ "role": datasets.Value("string"),
139
+ "ref_id": datasets.Value("string"),
140
+ }
141
+ ],
142
+ }
143
+ ],
144
+ "coreferences": [
145
+ {
146
+ "id": datasets.Value("string"),
147
+ "entity_ids": datasets.Sequence(datasets.Value("string")),
148
+ }
149
+ ],
150
+ "relations": [
151
+ {
152
+ "id": datasets.Value("string"),
153
+ "type": datasets.Value("string"),
154
+ "arg1_id": datasets.Value("string"),
155
+ "arg2_id": datasets.Value("string"),
156
+ "normalized": [
157
+ {
158
+ "db_name": datasets.Value("string"),
159
+ "db_id": datasets.Value("string"),
160
+ }
161
+ ],
162
+ }
163
+ ],
164
+ }
165
+ )
166
+
167
+
168
+ TASK_TO_SCHEMA = {
169
+ Tasks.NAMED_ENTITY_RECOGNITION.name: "KB",
170
+ Tasks.NAMED_ENTITY_DISAMBIGUATION.name: "KB",
171
+ Tasks.EVENT_EXTRACTION.name: "KB",
172
+ Tasks.RELATION_EXTRACTION.name: "KB",
173
+ Tasks.COREFERENCE_RESOLUTION.name: "KB",
174
+ Tasks.QUESTION_ANSWERING.name: "QA",
175
+ Tasks.TEXTUAL_ENTAILMENT.name: "TE",
176
+ Tasks.SEMANTIC_SIMILARITY.name: "PAIRS",
177
+ Tasks.TEXT_PAIRS_CLASSIFICATION.name: "PAIRS",
178
+ Tasks.PARAPHRASING.name: "T2T",
179
+ Tasks.TRANSLATION.name: "T2T",
180
+ Tasks.SUMMARIZATION.name: "T2T",
181
+ Tasks.TEXT_CLASSIFICATION.name: "TEXT",
182
+ }
183
+
184
+ SCHEMA_TO_TASKS = defaultdict(set)
185
+ for task, schema in TASK_TO_SCHEMA.items():
186
+ SCHEMA_TO_TASKS[schema].add(task)
187
+ SCHEMA_TO_TASKS = dict(SCHEMA_TO_TASKS)
188
+
189
+ VALID_TASKS = set(TASK_TO_SCHEMA.keys())
190
+ VALID_SCHEMAS = set(TASK_TO_SCHEMA.values())
191
+
192
+ SCHEMA_TO_FEATURES = {
193
+ "KB": kb_features,
194
+ "QA": qa_features,
195
+ "TE": entailment_features,
196
+ "T2T": text2text_features,
197
+ "TEXT": text_features,
198
+ "PAIRS": pairs_features,
199
+ }
200
+
201
+
202
+ def get_texts_and_offsets_from_bioc_ann(ann: "bioc.BioCAnnotation") -> Tuple:
203
+
204
+ offsets = [(loc.offset, loc.offset + loc.length) for loc in ann.locations]
205
+
206
+ text = ann.text
207
+
208
+ if len(offsets) > 1:
209
+ i = 0
210
+ texts = []
211
+ for start, end in offsets:
212
+ chunk_len = end - start
213
+ texts.append(text[i : chunk_len + i])
214
+ i += chunk_len
215
+ while i < len(text) and text[i] == " ":
216
+ i += 1
217
+ else:
218
+ texts = [text]
219
+
220
+ return offsets, texts
221
+
222
+
223
+ def remove_prefix(a: str, prefix: str) -> str:
224
+ if a.startswith(prefix):
225
+ a = a[len(prefix) :]
226
+ return a
227
+
228
+
229
+ def parse_brat_file(
230
+ txt_file: Path,
231
+ annotation_file_suffixes: List[str] = None,
232
+ parse_notes: bool = False,
233
+ ) -> Dict:
234
+ """
235
+ Parse a brat file into the schema defined below.
236
+ `txt_file` should be the path to the brat '.txt' file you want to parse, e.g. 'data/1234.txt'
237
+ Assumes that the annotations are contained in one or more of the corresponding '.a1', '.a2' or '.ann' files,
238
+ e.g. 'data/1234.ann' or 'data/1234.a1' and 'data/1234.a2'.
239
+ Will include annotator notes, when `parse_notes == True`.
240
+ brat_features = datasets.Features(
241
+ {
242
+ "id": datasets.Value("string"),
243
+ "document_id": datasets.Value("string"),
244
+ "text": datasets.Value("string"),
245
+ "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
246
+ {
247
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
248
+ "text": datasets.Sequence(datasets.Value("string")),
249
+ "type": datasets.Value("string"),
250
+ "id": datasets.Value("string"),
251
+ }
252
+ ],
253
+ "events": [ # E line in brat
254
+ {
255
+ "trigger": datasets.Value(
256
+ "string"
257
+ ), # refers to the text_bound_annotation of the trigger,
258
+ "id": datasets.Value("string"),
259
+ "type": datasets.Value("string"),
260
+ "arguments": datasets.Sequence(
261
+ {
262
+ "role": datasets.Value("string"),
263
+ "ref_id": datasets.Value("string"),
264
+ }
265
+ ),
266
+ }
267
+ ],
268
+ "relations": [ # R line in brat
269
+ {
270
+ "id": datasets.Value("string"),
271
+ "head": {
272
+ "ref_id": datasets.Value("string"),
273
+ "role": datasets.Value("string"),
274
+ },
275
+ "tail": {
276
+ "ref_id": datasets.Value("string"),
277
+ "role": datasets.Value("string"),
278
+ },
279
+ "type": datasets.Value("string"),
280
+ }
281
+ ],
282
+ "equivalences": [ # Equiv line in brat
283
+ {
284
+ "id": datasets.Value("string"),
285
+ "ref_ids": datasets.Sequence(datasets.Value("string")),
286
+ }
287
+ ],
288
+ "attributes": [ # M or A lines in brat
289
+ {
290
+ "id": datasets.Value("string"),
291
+ "type": datasets.Value("string"),
292
+ "ref_id": datasets.Value("string"),
293
+ "value": datasets.Value("string"),
294
+ }
295
+ ],
296
+ "normalizations": [ # N lines in brat
297
+ {
298
+ "id": datasets.Value("string"),
299
+ "type": datasets.Value("string"),
300
+ "ref_id": datasets.Value("string"),
301
+ "resource_name": datasets.Value(
302
+ "string"
303
+ ), # Name of the resource, e.g. "Wikipedia"
304
+ "cuid": datasets.Value(
305
+ "string"
306
+ ), # ID in the resource, e.g. 534366
307
+ "text": datasets.Value(
308
+ "string"
309
+ ), # Human readable description/name of the entity, e.g. "Barack Obama"
310
+ }
311
+ ],
312
+ ### OPTIONAL: Only included when `parse_notes == True`
313
+ "notes": [ # # lines in brat
314
+ {
315
+ "id": datasets.Value("string"),
316
+ "type": datasets.Value("string"),
317
+ "ref_id": datasets.Value("string"),
318
+ "text": datasets.Value("string"),
319
+ }
320
+ ],
321
+ },
322
+ )
323
+ """
324
+
325
+ example = {}
326
+ example["document_id"] = txt_file.with_suffix("").name
327
+ with txt_file.open() as f:
328
+ example["text"] = f.read()
329
+
330
+ # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
331
+ # for event extraction
332
+ if annotation_file_suffixes is None:
333
+ annotation_file_suffixes = [".a1", ".a2", ".ann"]
334
+
335
+ if len(annotation_file_suffixes) == 0:
336
+ raise AssertionError(
337
+ "At least one suffix for the to-be-read annotation files should be given!"
338
+ )
339
+
340
+ ann_lines = []
341
+ for suffix in annotation_file_suffixes:
342
+ annotation_file = txt_file.with_suffix(suffix)
343
+ if annotation_file.exists():
344
+ with annotation_file.open() as f:
345
+ ann_lines.extend(f.readlines())
346
+
347
+ example["text_bound_annotations"] = []
348
+ example["events"] = []
349
+ example["relations"] = []
350
+ example["equivalences"] = []
351
+ example["attributes"] = []
352
+ example["normalizations"] = []
353
+
354
+ if parse_notes:
355
+ example["notes"] = []
356
+
357
+ for line in ann_lines:
358
+ line = line.strip()
359
+ if not line:
360
+ continue
361
+
362
+ if line.startswith("T"): # Text bound
363
+ ann = {}
364
+ fields = line.split("\t")
365
+
366
+ ann["id"] = fields[0]
367
+ ann["type"] = fields[1].split()[0]
368
+ ann["offsets"] = []
369
+ span_str = remove_prefix(fields[1], (ann["type"] + " "))
370
+ text = fields[2]
371
+ for span in span_str.split(";"):
372
+ start, end = span.split()
373
+ ann["offsets"].append([int(start), int(end)])
374
+
375
+ # Heuristically split text of discontiguous entities into chunks
376
+ ann["text"] = []
377
+ if len(ann["offsets"]) > 1:
378
+ i = 0
379
+ for start, end in ann["offsets"]:
380
+ chunk_len = end - start
381
+ ann["text"].append(text[i : chunk_len + i])
382
+ i += chunk_len
383
+ while i < len(text) and text[i] == " ":
384
+ i += 1
385
+ else:
386
+ ann["text"] = [text]
387
+
388
+ example["text_bound_annotations"].append(ann)
389
+
390
+ elif line.startswith("E"):
391
+ ann = {}
392
+ fields = line.split("\t")
393
+
394
+ ann["id"] = fields[0]
395
+
396
+ ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
397
+
398
+ ann["arguments"] = []
399
+ for role_ref_id in fields[1].split()[1:]:
400
+ argument = {
401
+ "role": (role_ref_id.split(":"))[0],
402
+ "ref_id": (role_ref_id.split(":"))[1],
403
+ }
404
+ ann["arguments"].append(argument)
405
+
406
+ example["events"].append(ann)
407
+
408
+ elif line.startswith("R"):
409
+ ann = {}
410
+ fields = line.split("\t")
411
+
412
+ ann["id"] = fields[0]
413
+ ann["type"] = fields[1].split()[0]
414
+
415
+ ann["head"] = {
416
+ "role": fields[1].split()[1].split(":")[0],
417
+ "ref_id": fields[1].split()[1].split(":")[1],
418
+ }
419
+ ann["tail"] = {
420
+ "role": fields[1].split()[2].split(":")[0],
421
+ "ref_id": fields[1].split()[2].split(":")[1],
422
+ }
423
+
424
+ example["relations"].append(ann)
425
+
426
+ # '*' seems to be the legacy way to mark equivalences,
427
+ # but I couldn't find any info on the current way
428
+ # this might have to be adapted dependent on the brat version
429
+ # of the annotation
430
+ elif line.startswith("*"):
431
+ ann = {}
432
+ fields = line.split("\t")
433
+
434
+ ann["id"] = fields[0]
435
+ ann["ref_ids"] = fields[1].split()[1:]
436
+
437
+ example["equivalences"].append(ann)
438
+
439
+ elif line.startswith("A") or line.startswith("M"):
440
+ ann = {}
441
+ fields = line.split("\t")
442
+
443
+ ann["id"] = fields[0]
444
+
445
+ info = fields[1].split()
446
+ ann["type"] = info[0]
447
+ ann["ref_id"] = info[1]
448
+
449
+ if len(info) > 2:
450
+ ann["value"] = info[2]
451
+ else:
452
+ ann["value"] = ""
453
+
454
+ example["attributes"].append(ann)
455
+
456
+ elif line.startswith("N"):
457
+ ann = {}
458
+ fields = line.split("\t")
459
+
460
+ ann["id"] = fields[0]
461
+ ann["text"] = fields[2]
462
+
463
+ info = fields[1].split()
464
+
465
+ ann["type"] = info[0]
466
+ ann["ref_id"] = info[1]
467
+ ann["resource_name"] = info[2].split(":")[0]
468
+ ann["cuid"] = info[2].split(":")[1]
469
+ example["normalizations"].append(ann)
470
+
471
+ elif parse_notes and line.startswith("#"):
472
+ ann = {}
473
+ fields = line.split("\t")
474
+
475
+ ann["id"] = fields[0]
476
+ ann["text"] = fields[2] if len(fields) == 3 else BigBioValues.NULL
477
+
478
+ info = fields[1].split()
479
+
480
+ ann["type"] = info[0]
481
+ ann["ref_id"] = info[1]
482
+ example["notes"].append(ann)
483
+
484
+ return example
485
+
486
+
487
+ def brat_parse_to_bigbio_kb(brat_parse: Dict) -> Dict:
488
+ """
489
+ Transform a brat parse (conforming to the standard brat schema) obtained with
490
+ `parse_brat_file` into a dictionary conforming to the `bigbio-kb` schema (as defined in ../schemas/kb.py)
491
+ :param brat_parse:
492
+ """
493
+
494
+ unified_example = {}
495
+
496
+ # Prefix all ids with document id to ensure global uniqueness,
497
+ # because brat ids are only unique within their document
498
+ id_prefix = brat_parse["document_id"] + "_"
499
+
500
+ # identical
501
+ unified_example["document_id"] = brat_parse["document_id"]
502
+ unified_example["passages"] = [
503
+ {
504
+ "id": id_prefix + "_text",
505
+ "type": "abstract",
506
+ "text": [brat_parse["text"]],
507
+ "offsets": [[0, len(brat_parse["text"])]],
508
+ }
509
+ ]
510
+
511
+ # get normalizations
512
+ ref_id_to_normalizations = defaultdict(list)
513
+ for normalization in brat_parse["normalizations"]:
514
+ ref_id_to_normalizations[normalization["ref_id"]].append(
515
+ {
516
+ "db_name": normalization["resource_name"],
517
+ "db_id": normalization["cuid"],
518
+ }
519
+ )
520
+
521
+ # separate entities and event triggers
522
+ unified_example["events"] = []
523
+ non_event_ann = brat_parse["text_bound_annotations"].copy()
524
+ for event in brat_parse["events"]:
525
+ event = event.copy()
526
+ event["id"] = id_prefix + event["id"]
527
+ trigger = next(
528
+ tr for tr in brat_parse["text_bound_annotations"] if tr["id"] == event["trigger"]
529
+ )
530
+ if trigger in non_event_ann:
531
+ non_event_ann.remove(trigger)
532
+ event["trigger"] = {
533
+ "text": trigger["text"].copy(),
534
+ "offsets": trigger["offsets"].copy(),
535
+ }
536
+ for argument in event["arguments"]:
537
+ argument["ref_id"] = id_prefix + argument["ref_id"]
538
+
539
+ unified_example["events"].append(event)
540
+
541
+ unified_example["entities"] = []
542
+ anno_ids = [ref_id["id"] for ref_id in non_event_ann]
543
+ for ann in non_event_ann:
544
+ entity_ann = ann.copy()
545
+ entity_ann["id"] = id_prefix + entity_ann["id"]
546
+ entity_ann["normalized"] = ref_id_to_normalizations[ann["id"]]
547
+ unified_example["entities"].append(entity_ann)
548
+
549
+ # massage relations
550
+ unified_example["relations"] = []
551
+ skipped_relations = set()
552
+ for ann in brat_parse["relations"]:
553
+ if ann["head"]["ref_id"] not in anno_ids or ann["tail"]["ref_id"] not in anno_ids:
554
+ skipped_relations.add(ann["id"])
555
+ continue
556
+ unified_example["relations"].append(
557
+ {
558
+ "arg1_id": id_prefix + ann["head"]["ref_id"],
559
+ "arg2_id": id_prefix + ann["tail"]["ref_id"],
560
+ "id": id_prefix + ann["id"],
561
+ "type": ann["type"],
562
+ "normalized": [],
563
+ }
564
+ )
565
+ if len(skipped_relations) > 0:
566
+ example_id = brat_parse["document_id"]
567
+ logger.info(
568
+ f"Example:{example_id}: The `bigbio_kb` schema allows `relations` only between entities."
569
+ f" Skip (for now): "
570
+ f"{list(skipped_relations)}"
571
+ )
572
+
573
+ # get coreferences
574
+ unified_example["coreferences"] = []
575
+ for i, ann in enumerate(brat_parse["equivalences"], start=1):
576
+ is_entity_cluster = True
577
+ for ref_id in ann["ref_ids"]:
578
+ if not ref_id.startswith("T"): # not textbound -> no entity
579
+ is_entity_cluster = False
580
+ elif ref_id not in anno_ids: # event trigger -> no entity
581
+ is_entity_cluster = False
582
+ if is_entity_cluster:
583
+ entity_ids = [id_prefix + i for i in ann["ref_ids"]]
584
+ unified_example["coreferences"].append(
585
+ {"id": id_prefix + str(i), "entity_ids": entity_ids}
586
+ )
587
+ return unified_example
e3c.py CHANGED
@@ -1,13 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
- from typing import Iterator
 
3
 
4
  import datasets
5
- from bs4 import BeautifulSoup, ResultSet
6
- from datasets import DownloadManager
7
- from syntok.tokenizer import Tokenizer
8
 
9
- tok = Tokenizer()
10
 
 
11
 
12
  _CITATION = """\
13
  @report{Magnini2021,
@@ -19,9 +41,10 @@ European Clinical Case Corpus El proyecto E3C: European Clinical Case Corpus},
19
  url = {https://uts.nlm.nih.gov/uts/umls/home},
20
  year = {2021},
21
  }
22
-
23
  """
24
 
 
 
25
  _DESCRIPTION = """\
26
  The European Clinical Case Corpus (E3C) project aims at collecting and \
27
  annotating a large corpus of clinical documents in five European languages (Spanish, \
@@ -30,402 +53,251 @@ include temporal information, to allow temporal reasoning on chronologies, and \
30
  information about clinical entities based on medical taxonomies, to be used for semantic reasoning.
31
  """
32
 
33
- _URL = "https://github.com/hltfbk/E3C-Corpus/archive/refs/tags/v2.0.0.zip"
34
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
- class E3CConfig(datasets.BuilderConfig):
37
- """BuilderConfig for E3C."""
38
 
39
- def __init__(self, **kwargs):
40
- """BuilderConfig for E3C.
41
- Args:
42
- **kwargs: keyword arguments forwarded to super.
43
- """
44
- super(E3CConfig, self).__init__(**kwargs)
45
 
 
 
46
 
47
- class E3C(datasets.GeneratorBasedBuilder):
48
- VERSION = datasets.Version("1.1.0")
49
  BUILDER_CONFIGS = [
50
- E3CConfig(
51
- name="e3c",
52
- version=VERSION,
53
- description="this is an implementation of the E3C dataset",
 
 
54
  ),
55
  ]
56
 
57
- def _info(self):
58
- """This method specifies the DatasetInfo which contains information and typings."""
 
 
 
 
59
  features = datasets.Features(
60
  {
 
 
61
  "text": datasets.Value("string"),
62
- "tokens": datasets.Sequence(datasets.Value("string")),
63
- "tokens_offsets": datasets.Sequence(datasets.Sequence(datasets.Value("int32"))),
64
- "clinical_entity_tags": datasets.Sequence(
65
- datasets.features.ClassLabel(
66
- names=[
67
- "O",
68
- "B-CLINENTITY",
69
- "I-CLINENTITY",
70
- ],
71
- ),
72
- ),
73
- "clinical_entity_cuid": datasets.Sequence(
74
- datasets.Value("string"),
75
- ),
76
- "temporal_information_tags": datasets.Sequence(
77
- datasets.features.ClassLabel(
78
- names=[
79
- "O",
80
- "B-EVENT",
81
- "B-ACTOR",
82
- "B-BODYPART",
83
- "B-TIMEX3",
84
- "B-RML",
85
- "I-EVENT",
86
- "I-ACTOR",
87
- "I-BODYPART",
88
- "I-TIMEX3",
89
- "I-RML",
90
- ],
91
- ),
92
- ),
 
 
 
 
93
  }
94
  )
95
  return datasets.DatasetInfo(
96
  description=_DESCRIPTION,
97
  features=features,
 
 
98
  citation=_CITATION,
99
- supervised_keys=None,
100
  )
101
 
102
- def _split_generators(self, dl_manager: DownloadManager) -> list[datasets.SplitGenerator]:
103
- """Returns SplitGenerators who contains all the difference splits of the dataset.
104
- Each language has its own split and each split has 3 different layers (sub-split):
105
- - layer 1: full manual annotation of clinical entities, temporal information and
106
- factuality, for benchmarking and linguistic analysis.
107
- - layer 2: semi-automatic annotation of clinical entities
108
- - layer 3: non-annotated documents
109
- Args:
110
- dl_manager: A `datasets.utils.DownloadManager` that can be used to download and
111
- extract URLs.
112
- Returns:
113
- A list of `datasets.SplitGenerator`. Contains all subsets of the dataset depending on
114
- the language and the layer.
115
- """
116
- url = _URL
117
- data_dir = dl_manager.download_and_extract(url)
118
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  return [
120
  datasets.SplitGenerator(
121
- name="en.layer1",
 
122
  gen_kwargs={
123
- "filepath": os.path.join(
124
- data_dir,
125
- "E3C-Corpus-2.0.0/data_annotation",
126
- "English",
127
- "layer1",
128
- ),
129
  },
130
- ),
131
- datasets.SplitGenerator(
132
- name="en.layer2",
133
- gen_kwargs={
134
- "filepath": os.path.join(
135
- data_dir,
136
- "E3C-Corpus-2.0.0/data_annotation",
137
- "English",
138
- "layer2",
139
- ),
140
- },
141
- ),
142
- datasets.SplitGenerator(
143
- name="en.layer2.validation",
144
- gen_kwargs={
145
- "filepath": os.path.join(
146
- data_dir,
147
- "E3C-Corpus-2.0.0/data_validation",
148
- "English",
149
- "layer2",
150
- ),
151
- },
152
- ),
153
- datasets.SplitGenerator(
154
- name="es.layer1",
155
- gen_kwargs={
156
- "filepath": os.path.join(
157
- data_dir,
158
- "E3C-Corpus-2.0.0/data_annotation",
159
- "Spanish",
160
- "layer1",
161
- ),
162
- },
163
- ),
164
- datasets.SplitGenerator(
165
- name="es.layer2",
166
- gen_kwargs={
167
- "filepath": os.path.join(
168
- data_dir,
169
- "E3C-Corpus-2.0.0/data_annotation",
170
- "Spanish",
171
- "layer2",
172
- ),
173
- },
174
- ),
175
- datasets.SplitGenerator(
176
- name="es.layer2.validation",
177
- gen_kwargs={
178
- "filepath": os.path.join(
179
- data_dir,
180
- "E3C-Corpus-2.0.0/data_validation",
181
- "Spanish",
182
- "layer2",
183
- ),
184
- },
185
- ),
186
- datasets.SplitGenerator(
187
- name="eu.layer1",
188
- gen_kwargs={
189
- "filepath": os.path.join(
190
- data_dir,
191
- "E3C-Corpus-2.0.0/data_annotation",
192
- "Basque",
193
- "layer1",
194
- ),
195
- },
196
- ),
197
- datasets.SplitGenerator(
198
- name="eu.layer2",
199
- gen_kwargs={
200
- "filepath": os.path.join(
201
- data_dir,
202
- "E3C-Corpus-2.0.0/data_annotation",
203
- "Basque",
204
- "layer2",
205
- ),
206
- },
207
- ),
208
- datasets.SplitGenerator(
209
- name="eu.layer2.validation",
210
- gen_kwargs={
211
- "filepath": os.path.join(
212
- data_dir,
213
- "E3C-Corpus-2.0.0/data_validation",
214
- "Basque",
215
- "layer2",
216
- ),
217
- },
218
- ),
219
- datasets.SplitGenerator(
220
- name="fr.layer1",
221
- gen_kwargs={
222
- "filepath": os.path.join(
223
- data_dir,
224
- "E3C-Corpus-2.0.0/data_annotation",
225
- "French",
226
- "layer1",
227
- ),
228
- },
229
- ),
230
- datasets.SplitGenerator(
231
- name="fr.layer2",
232
- gen_kwargs={
233
- "filepath": os.path.join(
234
- data_dir,
235
- "E3C-Corpus-2.0.0/data_annotation",
236
- "French",
237
- "layer2",
238
- ),
239
- },
240
- ),
241
- datasets.SplitGenerator(
242
- name="fr.layer2.validation",
243
- gen_kwargs={
244
- "filepath": os.path.join(
245
- data_dir,
246
- "E3C-Corpus-2.0.0/data_validation",
247
- "French",
248
- "layer2",
249
- ),
250
- },
251
- ),
252
- datasets.SplitGenerator(
253
- name="it.layer1",
254
- gen_kwargs={
255
- "filepath": os.path.join(
256
- data_dir,
257
- "E3C-Corpus-2.0.0/data_annotation",
258
- "Italian",
259
- "layer1",
260
- ),
261
- },
262
- ),
263
- datasets.SplitGenerator(
264
- name="it.layer2",
265
- gen_kwargs={
266
- "filepath": os.path.join(
267
- data_dir,
268
- "E3C-Corpus-2.0.0/data_annotation",
269
- "Italian",
270
- "layer2",
271
- ),
272
- },
273
- ),
274
- datasets.SplitGenerator(
275
- name="it.layer2.validation",
276
- gen_kwargs={
277
- "filepath": os.path.join(
278
- data_dir,
279
- "E3C-Corpus-2.0.0/data_validation",
280
- "Italian",
281
- "layer2",
282
- ),
283
- },
284
- ),
285
- ]
286
-
287
- @staticmethod
288
- def get_annotations(entities: ResultSet, text: str) -> list:
289
- """Extract the offset, the text and the type of the entity.
290
-
291
- Args:
292
- entities: The entities to extract.
293
- text: The text of the document.
294
- Returns:
295
- A list of list containing the offset, the text and the type of the entity.
296
- """
297
- return [
298
-
299
- [
300
- int(entity.get("begin")),
301
- int(entity.get("end")),
302
- text[int(entity.get("begin")) : int(entity.get("end"))],
303
- ]
304
- for entity in entities
305
  ]
306
 
307
- def get_clinical_annotations(self, entities: ResultSet, text: str) -> list:
308
- """Extract the offset, the text and the type of the entity.
309
-
310
- Args:
311
- entities: The entities to extract.
312
- text: The text of the document.
313
- Returns:
314
- A list of list containing the offset, the text and the type of the entity.
315
- """
316
- return [
317
- [
318
- int(entity.get("begin")),
319
- int(entity.get("end")),
320
- text[int(entity.get("begin")) : int(entity.get("end"))],
321
- entity.get("entityID"),
322
- ]
323
- for entity in entities
324
- ]
325
-
326
- def get_parsed_data(self, filepath: str):
327
- """Parse the data from the E3C dataset and store it in a dictionary.
328
- Iterate over the files in the dataset and parse for each file the following entities:
329
- - CLINENTITY
330
- - EVENT
331
- - ACTOR
332
- - BODYPART
333
- - TIMEX3
334
- - RML
335
- for each entity, we extract the offset, the text and the type of the entity.
336
-
337
- Args:
338
- filepath: The path to the folder containing the files to parse.
339
- """
340
- for root, _, files in os.walk(filepath):
341
- for file in files:
342
- with open(f"{root}/{file}") as soup_file:
343
- soup = BeautifulSoup(soup_file, "xml")
344
- text = soup.find("cas:Sofa").get("sofaString")
345
- yield {
346
- "CLINENTITY": self.get_clinical_annotations(
347
- soup.find_all("custom:CLINENTITY"), text
348
- ),
349
- "EVENT": self.get_annotations(soup.find_all("custom:EVENT"), text),
350
- "ACTOR": self.get_annotations(soup.find_all("custom:ACTOR"), text),
351
- "BODYPART": self.get_annotations(soup.find_all("custom:BODYPART"), text),
352
- "TIMEX3": self.get_annotations(soup.find_all("custom:TIMEX3"), text),
353
- "RML": self.get_annotations(soup.find_all("custom:RML"), text),
354
- "SENTENCE": self.get_annotations(soup.find_all("type4:Sentence"), text),
355
- "TOKENS": self.get_annotations(soup.find_all("type4:Token"), text),
356
- }
357
-
358
- def _generate_examples(self, filepath) -> Iterator:
359
- """Yields examples as (key, example) tuples.
360
- Args:
361
- filepath: The path to the folder containing the files to parse.
362
- Yields:
363
- an example containing four fields: the text, the annotations, the tokens offsets and
364
- the sentences.
365
- """
366
  guid = 0
367
- for content in self.get_parsed_data(filepath):
368
- for sentence in content["SENTENCE"]:
369
- tokens = [
370
- (
371
- token.offset + sentence[0],
372
- token.offset + sentence[0] + len(token.value),
373
- token.value,
374
- )
375
- for token in list(tok.tokenize(sentence[-1]))
376
- ]
377
 
378
- filtered_tokens = list(
379
- filter(
380
- lambda token: token[0] >= sentence[0] and token[1] <= sentence[1],
381
- tokens,
382
- )
383
- )
384
- tokens_offsets = [
385
- [token[0] - sentence[0], token[1] - sentence[0]] for token in filtered_tokens
386
- ]
387
- clinical_labels = ["O"] * len(filtered_tokens)
388
- clinical_cuid = ["CUI_LESS"] * len(filtered_tokens)
389
- temporal_information_labels = ["O"] * len(filtered_tokens)
390
- for entity_type in [
391
- "CLINENTITY",
392
- "EVENT",
393
- "ACTOR",
394
- "BODYPART",
395
- "TIMEX3",
396
- "RML",
397
- ]:
398
- if len(content[entity_type]) != 0:
399
- for entities in list(
400
- content[entity_type],
401
- ):
402
- annotated_tokens = [
403
- idx_token
404
- for idx_token, token in enumerate(filtered_tokens)
405
- if token[0] >= entities[0] and token[1] <= entities[1]
406
  ]
407
- for idx_token in annotated_tokens:
408
- if entity_type == "CLINENTITY":
409
- if idx_token == annotated_tokens[0]:
410
- clinical_labels[idx_token] = f"B-{entity_type}"
411
- else:
412
- clinical_labels[idx_token] = f"I-{entity_type}"
413
- clinical_cuid[idx_token] = entities[-1]
414
- else:
415
- if idx_token == annotated_tokens[0]:
416
- temporal_information_labels[idx_token] = f"B-{entity_type}"
417
- else:
418
- temporal_information_labels[idx_token] = f"I-{entity_type}"
419
- yield guid, {
420
- "text": sentence[-1],
421
- "tokens": list(map(lambda token: token[2], filtered_tokens)),
422
- "clinical_entity_tags": clinical_labels,
423
- "clinical_entity_cuid": clinical_cuid,
424
- "temporal_information_tags": temporal_information_labels,
425
- "tokens_offsets": tokens_offsets,
426
- }
427
- guid += 1
428
-
429
- if __name__ == "__main__":
430
- builder = E3C()
431
- builder.download_and_prepare()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ The European Clinical Case Corpus (E3C) project aims at collecting and \
18
+ annotating a large corpus of clinical documents in five European languages (Spanish, \
19
+ Basque, English, French and Italian), which will be freely distributed. Annotations \
20
+ include temporal information, to allow temporal reasoning on chronologies, and \
21
+ information about clinical entities based on medical taxonomies, to be used for semantic reasoning.
22
+ """
23
+ import json
24
  import os
25
+ import xml.etree.ElementTree as et
26
+ from typing import Dict, Iterator, List, Tuple
27
 
28
  import datasets
 
 
 
29
 
30
+ from .bigbiohub import BigBioConfig, Tasks
31
 
32
+ _LOCAL = True
33
 
34
  _CITATION = """\
35
  @report{Magnini2021,
 
41
  url = {https://uts.nlm.nih.gov/uts/umls/home},
42
  year = {2021},
43
  }
 
44
  """
45
 
46
+ _DATASETNAME = "e3c"
47
+
48
  _DESCRIPTION = """\
49
  The European Clinical Case Corpus (E3C) project aims at collecting and \
50
  annotating a large corpus of clinical documents in five European languages (Spanish, \
 
53
  information about clinical entities based on medical taxonomies, to be used for semantic reasoning.
54
  """
55
 
56
+ _HOMEPAGE = "https://github.com/hltfbk/E3C-Corpus"
57
 
58
+ _LICENSE = ""
59
+
60
+ _URLS = {
61
+ _DATASETNAME: "https://github.com/hltfbk/E3C-Corpus/archive/refs/tags/v2.0.0.zip",
62
+ }
63
+
64
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
65
+
66
+ _SOURCE_VERSION = "2.0.0"
67
+
68
+ _BIGBIO_VERSION = "1.0.0"
69
 
 
 
70
 
71
+ class E3cDataset(datasets.GeneratorBasedBuilder):
72
+ """The European Clinical Case Corpus (E3C) is a multilingual corpus of clinical documents.
73
+ The corpus is annotated with clinical entities and temporal information.
74
+ The corpus is available in five languages: Spanish, Basque, English, French and Italian.
75
+ """
 
76
 
77
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
78
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
79
 
 
 
80
  BUILDER_CONFIGS = [
81
+ BigBioConfig(
82
+ name=f"{_DATASETNAME}_source",
83
+ version=SOURCE_VERSION,
84
+ description=f"{_DATASETNAME} source schema",
85
+ schema="source",
86
+ subset_id=_DATASETNAME,
87
  ),
88
  ]
89
 
90
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
91
+
92
+ def _info(self) -> datasets.DatasetInfo:
93
+ # You can arbitrarily nest lists and dictionaries.
94
+ # For iterables, use lists over tuples or `datasets.Sequence`
95
+
96
  features = datasets.Features(
97
  {
98
+ "id": datasets.Value("string"),
99
+ "document_id": datasets.Value("int32"),
100
  "text": datasets.Value("string"),
101
+ "passages": [
102
+ {
103
+ "id": datasets.Value("string"),
104
+ "text": datasets.Value("string"),
105
+ "offsets": [datasets.Value("int32")],
106
+ }
107
+ ],
108
+ "entities": [
109
+ {
110
+ "id": datasets.Value("string"),
111
+ "type": datasets.Value("string"),
112
+ "text": datasets.Value("string"),
113
+ "offsets": [datasets.Value("int32")],
114
+ "semantic_type_id": datasets.Value("string"),
115
+ "role": datasets.Value("string"),
116
+ }
117
+ ],
118
+ "relations": [
119
+ {
120
+ "id": datasets.Value("string"),
121
+ "type": datasets.Value("string"),
122
+ "contextualAspect": datasets.Value("string"),
123
+ "contextualModality": datasets.Value("string"),
124
+ "degree": datasets.Value("string"),
125
+ "docTimeRel": datasets.Value("string"),
126
+ "eventType": datasets.Value("string"),
127
+ "permanence": datasets.Value("string"),
128
+ "polarity": datasets.Value("string"),
129
+ "functionInDocument": datasets.Value("string"),
130
+ "timex3Class": datasets.Value("string"),
131
+ "value": datasets.Value("string"),
132
+ "concept_1": datasets.Value("string"),
133
+ "concept_2": datasets.Value("string"),
134
+ }
135
+ ],
136
  }
137
  )
138
  return datasets.DatasetInfo(
139
  description=_DESCRIPTION,
140
  features=features,
141
+ homepage=_HOMEPAGE,
142
+ license=_LICENSE,
143
  citation=_CITATION,
 
144
  )
145
 
146
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
147
+ """Returns SplitGenerators."""
148
+ urls = _URLS[_DATASETNAME]
149
+ data_dir = dl_manager.download_and_extract(urls)
 
 
 
 
 
 
 
 
 
 
 
 
150
 
151
+ paths = {
152
+ "en.layer1": "data_annotation/English/layer1",
153
+ "en.layer2": "data_annotation/English/layer2",
154
+ "en.layer2.validation": "data_validation/English/layer2",
155
+ "en.layer3": "data_collection/English/layer3",
156
+ "es.layer1": "data_annotation/Spanish/layer1",
157
+ "es.layer2": "data_annotation/Spanish/layer2",
158
+ "es.layer2.validation": "data_validation/Spanish/layer2",
159
+ "es.layer3": "data_collection/Spanish/layer3",
160
+ "eu.layer1": "data_annotation/Basque/layer1",
161
+ "eu.layer2": "data_annotation/Basque/layer2",
162
+ "eu.layer2.validation": "data_validation/Basque/layer2",
163
+ "eu.layer3": "data_collection/Basque/layer3",
164
+ "fr.layer1": "data_annotation/French/layer1",
165
+ "fr.layer2": "data_annotation/French/layer2",
166
+ "fr.layer2.validation": "data_validation/French/layer2",
167
+ "fr.layer3": "data_collection/French/layer3",
168
+ "it.layer1": "data_annotation/Italian/layer1",
169
+ "it.layer2": "data_annotation/Italian/layer2",
170
+ "it.layer2.validation": "data_validation/Italian/layer2",
171
+ "it.layer3": "data_collection/Italian/layer3",
172
+ }
173
  return [
174
  datasets.SplitGenerator(
175
+ name=split,
176
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
177
  gen_kwargs={
178
+ "filepath": os.path.join(data_dir, "E3C-Corpus-2.0.0", path),
179
+ "split": "train",
 
 
 
 
180
  },
181
+ )
182
+ for split, path in paths.items()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183
  ]
184
 
185
+ def _generate_examples(self, filepath, split: str) -> Iterator[Tuple[int, Dict]]:
186
+ """Yields examples as (key, example) tuples."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
  guid = 0
188
+ for folder, _, files in os.walk(filepath):
189
+ for file in files:
190
+ with open(f"{folder}/{file}") as document:
191
+ if "layer3" not in folder:
192
+ root = et.fromstring(document.read())
193
+ annotations: dict = {}
194
+ for child in root:
195
+ annotations.setdefault(child.tag, []).append(
196
+ child.attrib | {"type": child.tag.split("}")[1]}
197
+ )
198
 
199
+ text = annotations["{http:///uima/cas.ecore}Sofa"][0]["sofaString"]
200
+ links = {
201
+ link["{http://www.omg.org/XMI}id"]: link
202
+ for link in [
203
+ *annotations.get(
204
+ "{http:///webanno/custom.ecore}EVENTTLINKLink", []
205
+ ),
206
+ *annotations.get(
207
+ "{http:///webanno/custom.ecore}RMLPERTAINSTOLink", []
208
+ ),
209
+ *annotations.get(
210
+ "{http:///webanno/custom.ecore}TIMEX3TimexLinkLink", []
211
+ ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
  ]
213
+ }
214
+ joined_relations = []
215
+ for relation in [
216
+ *annotations.get("{http:///webanno/custom.ecore}EVENT", []),
217
+ *annotations.get("{http:///webanno/custom.ecore}TIMEX3", []),
218
+ *annotations.get("{http:///webanno/custom.ecore}RML", []),
219
+ ]:
220
+ link_ids = []
221
+ if "TLINK" in relation.keys():
222
+ link_ids = relation["TLINK"].split(" ")
223
+ elif "PERTAINSTO" in relation.keys():
224
+ link_ids = relation["PERTAINSTO"].split(" ")
225
+ elif "timexLink" in relation.keys():
226
+ link_ids = relation["timexLink"].split(" ")
227
+ elif not link_ids:
228
+ joined_relations.append(
229
+ relation | {"source": relation["{http://www.omg.org/XMI}id"]}
230
+ )
231
+ if link_ids != [""]:
232
+ for link_id in link_ids:
233
+ joined_relations.append(
234
+ relation
235
+ | links[link_id]
236
+ | {"source": relation["{http://www.omg.org/XMI}id"]}
237
+ )
238
+ yield guid, {
239
+ "id": "e3c",
240
+ "document_id": guid,
241
+ "text": text,
242
+ "passages": [
243
+ {
244
+ "text": text[int(sentence["begin"]) : int(sentence["end"])],
245
+ "id": sentence["{http://www.omg.org/XMI}id"],
246
+ "offsets": [int(sentence["begin"]), int(sentence["end"])],
247
+ }
248
+ for sentence in annotations[
249
+ "{http:///de/tudarmstadt/ukp/dkpro/core"
250
+ "/api/segmentation/type.ecore}Sentence"
251
+ ]
252
+ ],
253
+ "entities": [
254
+ {
255
+ "text": text[int(annotation["begin"]) : int(annotation["end"])],
256
+ "offsets": [int(annotation["begin"]), int(annotation["end"])],
257
+ "id": annotation["{http://www.omg.org/XMI}id"],
258
+ "semantic_type_id": annotation.get("entityID", ""),
259
+ "role": annotation.get("role", ""),
260
+ "type": annotation.get("type"),
261
+ }
262
+ for annotation in [
263
+ *annotations.get("{http:///webanno/custom.ecore}EVENT", []),
264
+ *annotations.get(
265
+ "{http:///webanno/custom.ecore}CLINENTITY", []
266
+ ),
267
+ *annotations.get("{http:///webanno/custom.ecore}BODYPART", []),
268
+ *annotations.get("{http:///webanno/custom.ecore}ACTOR", []),
269
+ *annotations.get("{http:///webanno/custom.ecore}RML", []),
270
+ *annotations.get("{http:///webanno/custom.ecore}TIMEX3", []),
271
+ ]
272
+ ],
273
+ "relations": [
274
+ {
275
+ "id": relation["{http://www.omg.org/XMI}id"],
276
+ "type": relation.get("type"),
277
+ "contextualAspect": relation.get("contextualAspect", ""),
278
+ "contextualModality": relation.get("contextualModality", ""),
279
+ "degree": relation.get("degree", ""),
280
+ "docTimeRel": relation.get("docTimeRel", ""),
281
+ "eventType": relation.get("eventType", ""),
282
+ "permanence": relation.get("permanence", ""),
283
+ "polarity": relation.get("polarity", ""),
284
+ "functionInDocument": relation.get("functionInDocument", ""),
285
+ "timex3Class": relation.get("timex3Class", ""),
286
+ "value": relation.get("value", ""),
287
+ "concept_1": relation.get("source"),
288
+ "concept_2": relation.get("target", ""),
289
+ }
290
+ for relation in joined_relations
291
+ ],
292
+ }
293
+ else:
294
+ unannotated_text = json.load(document)
295
+ yield guid, {
296
+ "id": "e3c",
297
+ "document_id": guid,
298
+ "text": unannotated_text["text"],
299
+ "passages": [],
300
+ "entities": [],
301
+ "relations": [],
302
+ }
303
+ guid += 1