Datasets:

ArneBinder commited on
Commit
0ac3643
·
verified ·
1 Parent(s): 73c1aa7

Upload 4 files

Browse files

original code from https://huggingface.co/datasets/bigbio/chemprot, but with local data

Files changed (4) hide show
  1. ChemProt_Corpus.zip +3 -0
  2. README.md +49 -0
  3. bigbiohub.py +574 -0
  4. chemprot.py +434 -0
ChemProt_Corpus.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:492e3d607f38e2727b799e9d60263b776ebd2a5e61cf0fb59bea2b3eb68e1c28
3
+ size 4977337
README.md ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ______________________________________________________________________
2
+
3
+ language:
4
+
5
+ - en
6
+ bigbio_language:
7
+ - English
8
+ license: other
9
+ multilinguality: monolingual
10
+ bigbio_license_shortname: PUBLIC_DOMAIN_MARK_1p0
11
+ pretty_name: ChemProt
12
+ homepage: https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vi/track-5/
13
+ bigbio_pubmed: True
14
+ bigbio_public: True
15
+ bigbio_tasks:
16
+ - RELATION_EXTRACTION
17
+ - NAMED_ENTITY_RECOGNITION
18
+
19
+ ______________________________________________________________________
20
+
21
+ # Dataset Card for ChemProt
22
+
23
+ ## Dataset Description
24
+
25
+ - **Homepage:** https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vi/track-5/
26
+ - **Pubmed:** True
27
+ - **Public:** True
28
+ - **Tasks:** RE,NER
29
+
30
+ The BioCreative VI Chemical-Protein interaction dataset identifies entities of
31
+ chemicals and proteins and their likely relation to one other. Compounds are
32
+ generally agonists (activators) or antagonists (inhibitors) of proteins.
33
+
34
+ ## Citation Information
35
+
36
+ ```
37
+ @article{DBLP:journals/biodb/LiSJSWLDMWL16,
38
+ author = {Krallinger, M., Rabal, O., Lourenço, A.},
39
+ title = {Overview of the BioCreative VI chemical-protein interaction Track},
40
+ journal = {Proceedings of the BioCreative VI Workshop,},
41
+ volume = {141-146},
42
+ year = {2017},
43
+ url = {https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vi/track-5/},
44
+ doi = {},
45
+ biburl = {},
46
+ bibsource = {}
47
+ }
48
+
49
+ ```
bigbiohub.py ADDED
@@ -0,0 +1,574 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from collections import defaultdict
3
+ from dataclasses import dataclass
4
+ from enum import Enum
5
+ from pathlib import Path
6
+ from types import SimpleNamespace
7
+ from typing import TYPE_CHECKING, Dict, Iterable, List, Tuple
8
+
9
+ import datasets
10
+
11
+ if TYPE_CHECKING:
12
+ import bioc
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ BigBioValues = SimpleNamespace(NULL="<BB_NULL_STR>")
18
+
19
+
20
+ @dataclass
21
+ class BigBioConfig(datasets.BuilderConfig):
22
+ """BuilderConfig for BigBio."""
23
+
24
+ name: str = None
25
+ version: datasets.Version = None
26
+ description: str = None
27
+ schema: str = None
28
+ subset_id: str = None
29
+
30
+
31
+ class Tasks(Enum):
32
+ NAMED_ENTITY_RECOGNITION = "NER"
33
+ NAMED_ENTITY_DISAMBIGUATION = "NED"
34
+ EVENT_EXTRACTION = "EE"
35
+ RELATION_EXTRACTION = "RE"
36
+ COREFERENCE_RESOLUTION = "COREF"
37
+ QUESTION_ANSWERING = "QA"
38
+ TEXTUAL_ENTAILMENT = "TE"
39
+ SEMANTIC_SIMILARITY = "STS"
40
+ TEXT_PAIRS_CLASSIFICATION = "TXT2CLASS"
41
+ PARAPHRASING = "PARA"
42
+ TRANSLATION = "TRANSL"
43
+ SUMMARIZATION = "SUM"
44
+ TEXT_CLASSIFICATION = "TXTCLASS"
45
+
46
+
47
+ entailment_features = datasets.Features(
48
+ {
49
+ "id": datasets.Value("string"),
50
+ "premise": datasets.Value("string"),
51
+ "hypothesis": datasets.Value("string"),
52
+ "label": datasets.Value("string"),
53
+ }
54
+ )
55
+
56
+ pairs_features = datasets.Features(
57
+ {
58
+ "id": datasets.Value("string"),
59
+ "document_id": datasets.Value("string"),
60
+ "text_1": datasets.Value("string"),
61
+ "text_2": datasets.Value("string"),
62
+ "label": datasets.Value("string"),
63
+ }
64
+ )
65
+
66
+ qa_features = datasets.Features(
67
+ {
68
+ "id": datasets.Value("string"),
69
+ "question_id": datasets.Value("string"),
70
+ "document_id": datasets.Value("string"),
71
+ "question": datasets.Value("string"),
72
+ "type": datasets.Value("string"),
73
+ "choices": [datasets.Value("string")],
74
+ "context": datasets.Value("string"),
75
+ "answer": datasets.Sequence(datasets.Value("string")),
76
+ }
77
+ )
78
+
79
+ text_features = datasets.Features(
80
+ {
81
+ "id": datasets.Value("string"),
82
+ "document_id": datasets.Value("string"),
83
+ "text": datasets.Value("string"),
84
+ "labels": [datasets.Value("string")],
85
+ }
86
+ )
87
+
88
+ text2text_features = datasets.Features(
89
+ {
90
+ "id": datasets.Value("string"),
91
+ "document_id": datasets.Value("string"),
92
+ "text_1": datasets.Value("string"),
93
+ "text_2": datasets.Value("string"),
94
+ "text_1_name": datasets.Value("string"),
95
+ "text_2_name": datasets.Value("string"),
96
+ }
97
+ )
98
+
99
+ kb_features = datasets.Features(
100
+ {
101
+ "id": datasets.Value("string"),
102
+ "document_id": datasets.Value("string"),
103
+ "passages": [
104
+ {
105
+ "id": datasets.Value("string"),
106
+ "type": datasets.Value("string"),
107
+ "text": datasets.Sequence(datasets.Value("string")),
108
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
109
+ }
110
+ ],
111
+ "entities": [
112
+ {
113
+ "id": datasets.Value("string"),
114
+ "type": datasets.Value("string"),
115
+ "text": datasets.Sequence(datasets.Value("string")),
116
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
117
+ "normalized": [
118
+ {
119
+ "db_name": datasets.Value("string"),
120
+ "db_id": datasets.Value("string"),
121
+ }
122
+ ],
123
+ }
124
+ ],
125
+ "events": [
126
+ {
127
+ "id": datasets.Value("string"),
128
+ "type": datasets.Value("string"),
129
+ # refers to the text_bound_annotation of the trigger
130
+ "trigger": {
131
+ "text": datasets.Sequence(datasets.Value("string")),
132
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
133
+ },
134
+ "arguments": [
135
+ {
136
+ "role": datasets.Value("string"),
137
+ "ref_id": datasets.Value("string"),
138
+ }
139
+ ],
140
+ }
141
+ ],
142
+ "coreferences": [
143
+ {
144
+ "id": datasets.Value("string"),
145
+ "entity_ids": datasets.Sequence(datasets.Value("string")),
146
+ }
147
+ ],
148
+ "relations": [
149
+ {
150
+ "id": datasets.Value("string"),
151
+ "type": datasets.Value("string"),
152
+ "arg1_id": datasets.Value("string"),
153
+ "arg2_id": datasets.Value("string"),
154
+ "normalized": [
155
+ {
156
+ "db_name": datasets.Value("string"),
157
+ "db_id": datasets.Value("string"),
158
+ }
159
+ ],
160
+ }
161
+ ],
162
+ }
163
+ )
164
+
165
+
166
+ TASK_TO_SCHEMA = {
167
+ Tasks.NAMED_ENTITY_RECOGNITION.name: "KB",
168
+ Tasks.NAMED_ENTITY_DISAMBIGUATION.name: "KB",
169
+ Tasks.EVENT_EXTRACTION.name: "KB",
170
+ Tasks.RELATION_EXTRACTION.name: "KB",
171
+ Tasks.COREFERENCE_RESOLUTION.name: "KB",
172
+ Tasks.QUESTION_ANSWERING.name: "QA",
173
+ Tasks.TEXTUAL_ENTAILMENT.name: "TE",
174
+ Tasks.SEMANTIC_SIMILARITY.name: "PAIRS",
175
+ Tasks.TEXT_PAIRS_CLASSIFICATION.name: "PAIRS",
176
+ Tasks.PARAPHRASING.name: "T2T",
177
+ Tasks.TRANSLATION.name: "T2T",
178
+ Tasks.SUMMARIZATION.name: "T2T",
179
+ Tasks.TEXT_CLASSIFICATION.name: "TEXT",
180
+ }
181
+
182
+ SCHEMA_TO_TASKS = defaultdict(set)
183
+ for task, schema in TASK_TO_SCHEMA.items():
184
+ SCHEMA_TO_TASKS[schema].add(task)
185
+ SCHEMA_TO_TASKS = dict(SCHEMA_TO_TASKS)
186
+
187
+ VALID_TASKS = set(TASK_TO_SCHEMA.keys())
188
+ VALID_SCHEMAS = set(TASK_TO_SCHEMA.values())
189
+
190
+ SCHEMA_TO_FEATURES = {
191
+ "KB": kb_features,
192
+ "QA": qa_features,
193
+ "TE": entailment_features,
194
+ "T2T": text2text_features,
195
+ "TEXT": text_features,
196
+ "PAIRS": pairs_features,
197
+ }
198
+
199
+
200
+ def get_texts_and_offsets_from_bioc_ann(ann: "bioc.BioCAnnotation") -> Tuple:
201
+
202
+ offsets = [(loc.offset, loc.offset + loc.length) for loc in ann.locations]
203
+
204
+ text = ann.text
205
+
206
+ if len(offsets) > 1:
207
+ i = 0
208
+ texts = []
209
+ for start, end in offsets:
210
+ chunk_len = end - start
211
+ texts.append(text[i : chunk_len + i])
212
+ i += chunk_len
213
+ while i < len(text) and text[i] == " ":
214
+ i += 1
215
+ else:
216
+ texts = [text]
217
+
218
+ return offsets, texts
219
+
220
+
221
+ def remove_prefix(a: str, prefix: str) -> str:
222
+ if a.startswith(prefix):
223
+ a = a[len(prefix) :]
224
+ return a
225
+
226
+
227
+ def parse_brat_file(
228
+ txt_file: Path,
229
+ annotation_file_suffixes: List[str] = None,
230
+ parse_notes: bool = False,
231
+ ) -> Dict:
232
+ """Parse a brat file into the schema defined below. `txt_file` should be the path to the brat
233
+ '.txt' file you want to parse, e.g. 'data/1234.txt' Assumes that the annotations are contained
234
+ in one or more of the corresponding '.a1', '.a2' or '.ann' files, e.g. 'data/1234.ann' or
235
+ 'data/1234.a1' and 'data/1234.a2'. Will include annotator notes, when `parse_notes == True`.
236
+ brat_features = datasets.Features( { "id": datasets.Value("string"), "document_id":
237
+ datasets.Value("string"), "text": datasets.Value("string"), "text_bound_annotations": [ # T
238
+ line in brat, e.g. type or event trigger { "offsets":
239
+ datasets.Sequence([datasets.Value("int32")]), "text":
240
+ datasets.Sequence(datasets.Value("string")), "type": datasets.Value("string"), "id":
241
+ datasets.Value("string"), } ], "events": [ # E line in brat { "trigger": datasets.Value(
242
+
243
+ "string"
244
+ ), # refers to the text_bound_annotation of the trigger,
245
+ "id": datasets.Value("string"),
246
+ "type": datasets.Value("string"),
247
+ "arguments": datasets.Sequence(
248
+ {
249
+ "role": datasets.Value("string"),
250
+ "ref_id": datasets.Value("string"),
251
+ }
252
+ ),
253
+ }
254
+ ],
255
+ "relations": [ # R line in brat
256
+ {
257
+ "id": datasets.Value("string"),
258
+ "head": {
259
+ "ref_id": datasets.Value("string"),
260
+ "role": datasets.Value("string"),
261
+ },
262
+ "tail": {
263
+ "ref_id": datasets.Value("string"),
264
+ "role": datasets.Value("string"),
265
+ },
266
+ "type": datasets.Value("string"),
267
+ }
268
+ ],
269
+ "equivalences": [ # Equiv line in brat
270
+ {
271
+ "id": datasets.Value("string"),
272
+ "ref_ids": datasets.Sequence(datasets.Value("string")),
273
+ }
274
+ ],
275
+ "attributes": [ # M or A lines in brat
276
+ {
277
+ "id": datasets.Value("string"),
278
+ "type": datasets.Value("string"),
279
+ "ref_id": datasets.Value("string"),
280
+ "value": datasets.Value("string"),
281
+ }
282
+ ],
283
+ "normalizations": [ # N lines in brat
284
+ {
285
+ "id": datasets.Value("string"),
286
+ "type": datasets.Value("string"),
287
+ "ref_id": datasets.Value("string"),
288
+ "resource_name": datasets.Value(
289
+ "string"
290
+ ), # Name of the resource, e.g. "Wikipedia"
291
+ "cuid": datasets.Value(
292
+ "string"
293
+ ), # ID in the resource, e.g. 534366
294
+ "text": datasets.Value(
295
+ "string"
296
+ ), # Human readable description/name of the entity, e.g. "Barack Obama"
297
+ }
298
+ ],
299
+ ### OPTIONAL: Only included when `parse_notes == True`
300
+ "notes": [ # # lines in brat
301
+ {
302
+ "id": datasets.Value("string"),
303
+ "type": datasets.Value("string"),
304
+ "ref_id": datasets.Value("string"),
305
+ "text": datasets.Value("string"),
306
+ }
307
+ ],
308
+ },
309
+ )
310
+ """
311
+
312
+ example = {}
313
+ example["document_id"] = txt_file.with_suffix("").name
314
+ with txt_file.open() as f:
315
+ example["text"] = f.read()
316
+
317
+ # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
318
+ # for event extraction
319
+ if annotation_file_suffixes is None:
320
+ annotation_file_suffixes = [".a1", ".a2", ".ann"]
321
+
322
+ if len(annotation_file_suffixes) == 0:
323
+ raise AssertionError(
324
+ "At least one suffix for the to-be-read annotation files should be given!"
325
+ )
326
+
327
+ ann_lines = []
328
+ for suffix in annotation_file_suffixes:
329
+ annotation_file = txt_file.with_suffix(suffix)
330
+ try:
331
+ with annotation_file.open() as f:
332
+ ann_lines.extend(f.readlines())
333
+ except Exception:
334
+ continue
335
+
336
+ example["text_bound_annotations"] = []
337
+ example["events"] = []
338
+ example["relations"] = []
339
+ example["equivalences"] = []
340
+ example["attributes"] = []
341
+ example["normalizations"] = []
342
+
343
+ if parse_notes:
344
+ example["notes"] = []
345
+
346
+ for line in ann_lines:
347
+ line = line.strip()
348
+ if not line:
349
+ continue
350
+
351
+ if line.startswith("T"): # Text bound
352
+ ann = {}
353
+ fields = line.split("\t")
354
+
355
+ ann["id"] = fields[0]
356
+ ann["type"] = fields[1].split()[0]
357
+ ann["offsets"] = []
358
+ span_str = remove_prefix(fields[1], (ann["type"] + " "))
359
+ text = fields[2]
360
+ for span in span_str.split(";"):
361
+ start, end = span.split()
362
+ ann["offsets"].append([int(start), int(end)])
363
+
364
+ # Heuristically split text of discontiguous entities into chunks
365
+ ann["text"] = []
366
+ if len(ann["offsets"]) > 1:
367
+ i = 0
368
+ for start, end in ann["offsets"]:
369
+ chunk_len = end - start
370
+ ann["text"].append(text[i : chunk_len + i])
371
+ i += chunk_len
372
+ while i < len(text) and text[i] == " ":
373
+ i += 1
374
+ else:
375
+ ann["text"] = [text]
376
+
377
+ example["text_bound_annotations"].append(ann)
378
+
379
+ elif line.startswith("E"):
380
+ ann = {}
381
+ fields = line.split("\t")
382
+
383
+ ann["id"] = fields[0]
384
+
385
+ ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
386
+
387
+ ann["arguments"] = []
388
+ for role_ref_id in fields[1].split()[1:]:
389
+ argument = {
390
+ "role": (role_ref_id.split(":"))[0],
391
+ "ref_id": (role_ref_id.split(":"))[1],
392
+ }
393
+ ann["arguments"].append(argument)
394
+
395
+ example["events"].append(ann)
396
+
397
+ elif line.startswith("R"):
398
+ ann = {}
399
+ fields = line.split("\t")
400
+
401
+ ann["id"] = fields[0]
402
+ ann["type"] = fields[1].split()[0]
403
+
404
+ ann["head"] = {
405
+ "role": fields[1].split()[1].split(":")[0],
406
+ "ref_id": fields[1].split()[1].split(":")[1],
407
+ }
408
+ ann["tail"] = {
409
+ "role": fields[1].split()[2].split(":")[0],
410
+ "ref_id": fields[1].split()[2].split(":")[1],
411
+ }
412
+
413
+ example["relations"].append(ann)
414
+
415
+ # '*' seems to be the legacy way to mark equivalences,
416
+ # but I couldn't find any info on the current way
417
+ # this might have to be adapted dependent on the brat version
418
+ # of the annotation
419
+ elif line.startswith("*"):
420
+ ann = {}
421
+ fields = line.split("\t")
422
+
423
+ ann["id"] = fields[0]
424
+ ann["ref_ids"] = fields[1].split()[1:]
425
+
426
+ example["equivalences"].append(ann)
427
+
428
+ elif line.startswith("A") or line.startswith("M"):
429
+ ann = {}
430
+ fields = line.split("\t")
431
+
432
+ ann["id"] = fields[0]
433
+
434
+ info = fields[1].split()
435
+ ann["type"] = info[0]
436
+ ann["ref_id"] = info[1]
437
+
438
+ if len(info) > 2:
439
+ ann["value"] = info[2]
440
+ else:
441
+ ann["value"] = ""
442
+
443
+ example["attributes"].append(ann)
444
+
445
+ elif line.startswith("N"):
446
+ ann = {}
447
+ fields = line.split("\t")
448
+
449
+ ann["id"] = fields[0]
450
+ ann["text"] = fields[2]
451
+
452
+ info = fields[1].split()
453
+
454
+ ann["type"] = info[0]
455
+ ann["ref_id"] = info[1]
456
+ ann["resource_name"] = info[2].split(":")[0]
457
+ ann["cuid"] = info[2].split(":")[1]
458
+ example["normalizations"].append(ann)
459
+
460
+ elif parse_notes and line.startswith("#"):
461
+ ann = {}
462
+ fields = line.split("\t")
463
+
464
+ ann["id"] = fields[0]
465
+ ann["text"] = fields[2] if len(fields) == 3 else BigBioValues.NULL
466
+
467
+ info = fields[1].split()
468
+
469
+ ann["type"] = info[0]
470
+ ann["ref_id"] = info[1]
471
+ example["notes"].append(ann)
472
+
473
+ return example
474
+
475
+
476
+ def brat_parse_to_bigbio_kb(brat_parse: Dict) -> Dict:
477
+ """Transform a brat parse (conforming to the standard brat schema) obtained with
478
+ `parse_brat_file` into a dictionary conforming to the `bigbio-kb` schema (as defined in
479
+ ../schemas/kb.py) :param brat_parse:"""
480
+
481
+ unified_example = {}
482
+
483
+ # Prefix all ids with document id to ensure global uniqueness,
484
+ # because brat ids are only unique within their document
485
+ id_prefix = brat_parse["document_id"] + "_"
486
+
487
+ # identical
488
+ unified_example["document_id"] = brat_parse["document_id"]
489
+ unified_example["passages"] = [
490
+ {
491
+ "id": id_prefix + "_text",
492
+ "type": "abstract",
493
+ "text": [brat_parse["text"]],
494
+ "offsets": [[0, len(brat_parse["text"])]],
495
+ }
496
+ ]
497
+
498
+ # get normalizations
499
+ ref_id_to_normalizations = defaultdict(list)
500
+ for normalization in brat_parse["normalizations"]:
501
+ ref_id_to_normalizations[normalization["ref_id"]].append(
502
+ {
503
+ "db_name": normalization["resource_name"],
504
+ "db_id": normalization["cuid"],
505
+ }
506
+ )
507
+
508
+ # separate entities and event triggers
509
+ unified_example["events"] = []
510
+ non_event_ann = brat_parse["text_bound_annotations"].copy()
511
+ for event in brat_parse["events"]:
512
+ event = event.copy()
513
+ event["id"] = id_prefix + event["id"]
514
+ trigger = next(
515
+ tr for tr in brat_parse["text_bound_annotations"] if tr["id"] == event["trigger"]
516
+ )
517
+ if trigger in non_event_ann:
518
+ non_event_ann.remove(trigger)
519
+ event["trigger"] = {
520
+ "text": trigger["text"].copy(),
521
+ "offsets": trigger["offsets"].copy(),
522
+ }
523
+ for argument in event["arguments"]:
524
+ argument["ref_id"] = id_prefix + argument["ref_id"]
525
+
526
+ unified_example["events"].append(event)
527
+
528
+ unified_example["entities"] = []
529
+ anno_ids = [ref_id["id"] for ref_id in non_event_ann]
530
+ for ann in non_event_ann:
531
+ entity_ann = ann.copy()
532
+ entity_ann["id"] = id_prefix + entity_ann["id"]
533
+ entity_ann["normalized"] = ref_id_to_normalizations[ann["id"]]
534
+ unified_example["entities"].append(entity_ann)
535
+
536
+ # massage relations
537
+ unified_example["relations"] = []
538
+ skipped_relations = set()
539
+ for ann in brat_parse["relations"]:
540
+ if ann["head"]["ref_id"] not in anno_ids or ann["tail"]["ref_id"] not in anno_ids:
541
+ skipped_relations.add(ann["id"])
542
+ continue
543
+ unified_example["relations"].append(
544
+ {
545
+ "arg1_id": id_prefix + ann["head"]["ref_id"],
546
+ "arg2_id": id_prefix + ann["tail"]["ref_id"],
547
+ "id": id_prefix + ann["id"],
548
+ "type": ann["type"],
549
+ "normalized": [],
550
+ }
551
+ )
552
+ if len(skipped_relations) > 0:
553
+ example_id = brat_parse["document_id"]
554
+ logger.info(
555
+ f"Example:{example_id}: The `bigbio_kb` schema allows `relations` only between entities."
556
+ f" Skip (for now): "
557
+ f"{list(skipped_relations)}"
558
+ )
559
+
560
+ # get coreferences
561
+ unified_example["coreferences"] = []
562
+ for i, ann in enumerate(brat_parse["equivalences"], start=1):
563
+ is_entity_cluster = True
564
+ for ref_id in ann["ref_ids"]:
565
+ if not ref_id.startswith("T"): # not textbound -> no entity
566
+ is_entity_cluster = False
567
+ elif ref_id not in anno_ids: # event trigger -> no entity
568
+ is_entity_cluster = False
569
+ if is_entity_cluster:
570
+ entity_ids = [id_prefix + i for i in ann["ref_ids"]]
571
+ unified_example["coreferences"].append(
572
+ {"id": id_prefix + str(i), "entity_ids": entity_ids}
573
+ )
574
+ return unified_example
chemprot.py ADDED
@@ -0,0 +1,434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """The BioCreative VI Chemical-Protein interaction dataset identifies entities of chemicals and
15
+ proteins and their likely relation to one other.
16
+
17
+ Compounds are generally agonists (activators) or antagonists (inhibitors) of proteins. The script
18
+ loads dataset in bigbio schema (using knowledgebase schema: schemas/kb) AND/OR source (default)
19
+ schema
20
+ """
21
+ import os
22
+ from typing import Dict, Tuple
23
+
24
+ import datasets
25
+
26
+ from .bigbiohub import BigBioConfig, Tasks, kb_features
27
+
28
+ _LANGUAGES = ["English"]
29
+ _PUBMED = True
30
+ _LOCAL = True
31
+ _CITATION = """\
32
+ @article{DBLP:journals/biodb/LiSJSWLDMWL16,
33
+ author = {Krallinger, M., Rabal, O., Lourenço, A.},
34
+ title = {Overview of the BioCreative VI chemical-protein interaction Track},
35
+ journal = {Proceedings of the BioCreative VI Workshop,},
36
+ volume = {141-146},
37
+ year = {2017},
38
+ url = {https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vi/track-5/},
39
+ doi = {},
40
+ biburl = {},
41
+ bibsource = {}
42
+ }
43
+ """
44
+ _DESCRIPTION = """\
45
+ The BioCreative VI Chemical-Protein interaction dataset identifies entities of
46
+ chemicals and proteins and their likely relation to one other. Compounds are
47
+ generally agonists (activators) or antagonists (inhibitors) of proteins.
48
+ """
49
+
50
+ _DATASETNAME = "chemprot"
51
+ _DISPLAYNAME = "ChemProt"
52
+
53
+ _HOMEPAGE = "https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vi/track-5/"
54
+
55
+ _LICENSE = "Public Domain Mark 1.0"
56
+
57
+ _URLs = {
58
+ "source": "./ChemProt_Corpus.zip",
59
+ "bigbio_kb": "./ChemProt_Corpus.zip",
60
+ }
61
+
62
+ _SUPPORTED_TASKS = [Tasks.RELATION_EXTRACTION, Tasks.NAMED_ENTITY_RECOGNITION]
63
+ _SOURCE_VERSION = "1.0.0"
64
+ _BIGBIO_VERSION = "1.0.0"
65
+
66
+
67
+ # Chemprot specific variables
68
+ # NOTE: There are 3 examples (2 in dev, 1 in training) with CPR:0
69
+ _GROUP_LABELS = {
70
+ "CPR:0": "Undefined",
71
+ "CPR:1": "Part_of",
72
+ "CPR:2": "Regulator",
73
+ "CPR:3": "Upregulator",
74
+ "CPR:4": "Downregulator",
75
+ "CPR:5": "Agonist",
76
+ "CPR:6": "Antagonist",
77
+ "CPR:7": "Modulator",
78
+ "CPR:8": "Cofactor",
79
+ "CPR:9": "Substrate",
80
+ "CPR:10": "Not",
81
+ }
82
+
83
+
84
+ class ChemprotDataset(datasets.GeneratorBasedBuilder):
85
+ """BioCreative VI Chemical-Protein Interaction Task."""
86
+
87
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
88
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
89
+
90
+ BUILDER_CONFIGS = [
91
+ BigBioConfig(
92
+ name="chemprot_full_source",
93
+ version=SOURCE_VERSION,
94
+ description="chemprot source schema",
95
+ schema="source",
96
+ subset_id="chemprot_full",
97
+ ),
98
+ BigBioConfig(
99
+ name="chemprot_shared_task_eval_source",
100
+ version=SOURCE_VERSION,
101
+ description="chemprot source schema with only the relation types that were used in the shared task evaluation",
102
+ schema="source",
103
+ subset_id="chemprot_shared_task_eval",
104
+ ),
105
+ BigBioConfig(
106
+ name="chemprot_bigbio_kb",
107
+ version=BIGBIO_VERSION,
108
+ description="chemprot BigBio schema",
109
+ schema="bigbio_kb",
110
+ subset_id="chemprot",
111
+ ),
112
+ ]
113
+
114
+ DEFAULT_CONFIG_NAME = "chemprot_full_source"
115
+
116
+ def _info(self):
117
+
118
+ if self.config.schema == "source":
119
+ features = datasets.Features(
120
+ {
121
+ "pmid": datasets.Value("string"),
122
+ "text": datasets.Value("string"),
123
+ "entities": datasets.Sequence(
124
+ {
125
+ "id": datasets.Value("string"),
126
+ "type": datasets.Value("string"),
127
+ "text": datasets.Value("string"),
128
+ "offsets": datasets.Sequence(datasets.Value("int64")),
129
+ }
130
+ ),
131
+ "relations": datasets.Sequence(
132
+ {
133
+ "type": datasets.Value("string"),
134
+ "arg1": datasets.Value("string"),
135
+ "arg2": datasets.Value("string"),
136
+ }
137
+ ),
138
+ }
139
+ )
140
+
141
+ elif self.config.schema == "bigbio_kb":
142
+ features = kb_features
143
+
144
+ return datasets.DatasetInfo(
145
+ description=_DESCRIPTION,
146
+ features=features,
147
+ homepage=_HOMEPAGE,
148
+ license=str(_LICENSE),
149
+ citation=_CITATION,
150
+ )
151
+
152
+ def _split_generators(self, dl_manager):
153
+ """Returns SplitGenerators."""
154
+ my_urls = _URLs[self.config.schema]
155
+ data_dir = dl_manager.download_and_extract(my_urls)
156
+
157
+ # Extract each of the individual folders
158
+ # NOTE: omitting "extract" call cause it uses a new folder
159
+ train_path = dl_manager.extract(
160
+ os.path.join(data_dir, "ChemProt_Corpus/chemprot_training.zip")
161
+ )
162
+ test_path = dl_manager.extract(
163
+ os.path.join(data_dir, "ChemProt_Corpus/chemprot_test_gs.zip")
164
+ )
165
+ dev_path = dl_manager.extract(
166
+ os.path.join(data_dir, "ChemProt_Corpus/chemprot_development.zip")
167
+ )
168
+ sample_path = dl_manager.extract(
169
+ os.path.join(data_dir, "ChemProt_Corpus/chemprot_sample.zip")
170
+ )
171
+
172
+ return [
173
+ datasets.SplitGenerator(
174
+ name="sample", # should be a named split : /
175
+ gen_kwargs={
176
+ "filepath": os.path.join(sample_path, "chemprot_sample"),
177
+ "abstract_file": "chemprot_sample_abstracts.tsv",
178
+ "entity_file": "chemprot_sample_entities.tsv",
179
+ "relation_file": "chemprot_sample_relations.tsv",
180
+ "gold_standard_file": "chemprot_sample_gold_standard.tsv",
181
+ "split": "sample",
182
+ },
183
+ ),
184
+ datasets.SplitGenerator(
185
+ name=datasets.Split.TRAIN,
186
+ gen_kwargs={
187
+ "filepath": os.path.join(train_path, "chemprot_training"),
188
+ "abstract_file": "chemprot_training_abstracts.tsv",
189
+ "entity_file": "chemprot_training_entities.tsv",
190
+ "relation_file": "chemprot_training_relations.tsv",
191
+ "gold_standard_file": "chemprot_training_gold_standard.tsv",
192
+ "split": "train",
193
+ },
194
+ ),
195
+ datasets.SplitGenerator(
196
+ name=datasets.Split.TEST,
197
+ gen_kwargs={
198
+ "filepath": os.path.join(test_path, "chemprot_test_gs"),
199
+ "abstract_file": "chemprot_test_abstracts_gs.tsv",
200
+ "entity_file": "chemprot_test_entities_gs.tsv",
201
+ "relation_file": "chemprot_test_relations_gs.tsv",
202
+ "gold_standard_file": "chemprot_test_gold_standard.tsv",
203
+ "split": "test",
204
+ },
205
+ ),
206
+ datasets.SplitGenerator(
207
+ name=datasets.Split.VALIDATION,
208
+ gen_kwargs={
209
+ "filepath": os.path.join(dev_path, "chemprot_development"),
210
+ "abstract_file": "chemprot_development_abstracts.tsv",
211
+ "entity_file": "chemprot_development_entities.tsv",
212
+ "relation_file": "chemprot_development_relations.tsv",
213
+ "gold_standard_file": "chemprot_development_gold_standard.tsv",
214
+ "split": "dev",
215
+ },
216
+ ),
217
+ ]
218
+
219
+ def _generate_examples(
220
+ self,
221
+ filepath,
222
+ abstract_file,
223
+ entity_file,
224
+ relation_file,
225
+ gold_standard_file,
226
+ split,
227
+ ):
228
+ """Yields examples as (key, example) tuples."""
229
+ if self.config.schema == "source":
230
+ abstracts = self._get_abstract(os.path.join(filepath, abstract_file))
231
+
232
+ entities, entity_id = self._get_entities(os.path.join(filepath, entity_file))
233
+
234
+ if self.config.subset_id == "chemprot_full":
235
+ relations = self._get_relations(os.path.join(filepath, relation_file))
236
+ elif self.config.subset_id == "chemprot_shared_task_eval":
237
+ relations = self._get_relations_gs(os.path.join(filepath, gold_standard_file))
238
+ else:
239
+ raise ValueError(self.config)
240
+
241
+ for id_, pmid in enumerate(abstracts.keys()):
242
+ yield id_, {
243
+ "pmid": pmid,
244
+ "text": abstracts[pmid],
245
+ "entities": entities[pmid],
246
+ "relations": relations.get(pmid, []),
247
+ }
248
+
249
+ elif self.config.schema == "bigbio_kb":
250
+
251
+ abstracts = self._get_abstract(os.path.join(filepath, abstract_file))
252
+ entities, entity_id = self._get_entities(os.path.join(filepath, entity_file))
253
+ relations = self._get_relations(os.path.join(filepath, relation_file), is_mapped=True)
254
+
255
+ uid = 0
256
+ for id_, pmid in enumerate(abstracts.keys()):
257
+ data = {
258
+ "id": str(uid),
259
+ "document_id": str(pmid),
260
+ "passages": [],
261
+ "entities": [],
262
+ "relations": [],
263
+ "events": [],
264
+ "coreferences": [],
265
+ }
266
+ uid += 1
267
+
268
+ data["passages"] = [
269
+ {
270
+ "id": str(uid),
271
+ "type": "title and abstract",
272
+ "text": [abstracts[pmid]],
273
+ "offsets": [[0, len(abstracts[pmid])]],
274
+ }
275
+ ]
276
+ uid += 1
277
+
278
+ entity_to_id = {}
279
+ for entity in entities[pmid]:
280
+ _text = entity["text"]
281
+ entity.update({"text": [_text]})
282
+ entity_to_id[entity["id"]] = str(uid)
283
+ entity.update({"id": str(uid)})
284
+ _offsets = entity["offsets"]
285
+ entity.update({"offsets": [_offsets]})
286
+ entity["normalized"] = []
287
+ data["entities"].append(entity)
288
+ uid += 1
289
+
290
+ for relation in relations.get(pmid, []):
291
+ relation["arg1_id"] = entity_to_id[relation.pop("arg1")]
292
+ relation["arg2_id"] = entity_to_id[relation.pop("arg2")]
293
+ relation.update({"id": str(uid)})
294
+ relation["normalized"] = []
295
+ data["relations"].append(relation)
296
+ uid += 1
297
+
298
+ yield id_, data
299
+
300
+ @staticmethod
301
+ def _get_abstract(abs_filename: str) -> Dict[str, str]:
302
+ """For each document in PubMed ID (PMID) in the ChemProt abstract data file, return the
303
+ abstract. Data is tab-separated.
304
+
305
+ :param filename:`*_abstracts.tsv from ChemProt :returns Dictionary with PMID keys and
306
+ abstract text as values.
307
+ """
308
+ with open(abs_filename) as f:
309
+ contents = [i.strip() for i in f.readlines()]
310
+
311
+ # PMID is the first column, Abstract is last
312
+ return {
313
+ doc.split("\t")[0]: "\n".join(doc.split("\t")[1:]) for doc in contents
314
+ } # Includes title as line 1
315
+
316
+ @staticmethod
317
+ def _get_entities(ents_filename: str) -> Tuple[Dict[str, str]]:
318
+ """
319
+ For each document in the corpus, return entity annotations per PMID.
320
+ Each column in the entity file is as follows:
321
+ (1) PMID
322
+ (2) Entity Number
323
+ (3) Entity Type (Chemical, Gene-Y, Gene-N)
324
+ (4) Start index
325
+ (5) End index
326
+ (6) Actual text of entity
327
+
328
+ :param ents_filename: `_*entities.tsv` file from ChemProt
329
+
330
+ :returns: Dictionary with PMID keys and entity annotations.
331
+ """
332
+ with open(ents_filename) as f:
333
+ contents = [i.strip() for i in f.readlines()]
334
+
335
+ entities = {}
336
+ entity_id = {}
337
+
338
+ for line in contents:
339
+
340
+ pmid, idx, label, start_offset, end_offset, name = line.split("\t")
341
+
342
+ # Populate entity dictionary
343
+ if pmid not in entities:
344
+ entities[pmid] = []
345
+
346
+ ann = {
347
+ "offsets": [int(start_offset), int(end_offset)],
348
+ "text": name,
349
+ "type": label,
350
+ "id": idx,
351
+ }
352
+
353
+ entities[pmid].append(ann)
354
+
355
+ # Populate entity mapping
356
+ entity_id.update({idx: name})
357
+
358
+ return entities, entity_id
359
+
360
+ @staticmethod
361
+ def _get_relations(rel_filename: str, is_mapped: bool = False) -> Dict[str, str]:
362
+ """For each document in the ChemProt corpus, create an annotation for all relationships.
363
+
364
+ :param is_mapped: Whether to convert into NL the relation tags. Default is OFF
365
+ """
366
+ with open(rel_filename) as f:
367
+ contents = [i.strip() for i in f.readlines()]
368
+
369
+ relations = {}
370
+
371
+ for line in contents:
372
+ pmid, label, _, _, arg1, arg2 = line.split("\t")
373
+ arg1 = arg1.split("Arg1:")[-1]
374
+ arg2 = arg2.split("Arg2:")[-1]
375
+
376
+ if pmid not in relations:
377
+ relations[pmid] = []
378
+
379
+ if is_mapped:
380
+ label = _GROUP_LABELS[label]
381
+
382
+ ann = {
383
+ "type": label,
384
+ "arg1": arg1,
385
+ "arg2": arg2,
386
+ }
387
+
388
+ relations[pmid].append(ann)
389
+
390
+ return relations
391
+
392
+ @staticmethod
393
+ def _get_relations_gs(rel_filename: str, is_mapped: bool = False) -> Dict[str, str]:
394
+ """For each document in the ChemProt corpus, create an annotation for the gold-standard
395
+ relationships.
396
+
397
+ The columns include:
398
+ (1) PMID
399
+ (2) Relationship Label (CPR)
400
+ (3) Used in shared task
401
+ (3) Interactor Argument 1 Entity Identifier
402
+ (4) Interactor Argument 2 Entity Identifier
403
+
404
+ Gold standard includes CPRs 3-9. Relationships are always Gene + Protein.
405
+ Unlike entities, there is no counter, hence once must be made
406
+
407
+ :param rel_filename: Gold standard file name
408
+ :param ent_dict: Entity Identifier to text
409
+ """
410
+ with open(rel_filename) as f:
411
+ contents = [i.strip() for i in f.readlines()]
412
+
413
+ relations = {}
414
+
415
+ for line in contents:
416
+ pmid, label, arg1, arg2 = line.split("\t")
417
+ arg1 = arg1.split("Arg1:")[-1]
418
+ arg2 = arg2.split("Arg2:")[-1]
419
+
420
+ if pmid not in relations:
421
+ relations[pmid] = []
422
+
423
+ if is_mapped:
424
+ label = _GROUP_LABELS[label]
425
+
426
+ ann = {
427
+ "type": label,
428
+ "arg1": arg1,
429
+ "arg2": arg2,
430
+ }
431
+
432
+ relations[pmid].append(ann)
433
+
434
+ return relations