Datasets:
Tasks:
Token Classification
Modalities:
Text
Sub-tasks:
named-entity-recognition
Languages:
Spanish
Size:
10K - 100K
License:
Update pharmaconer.py
Browse files- pharmaconer.py +40 -18
pharmaconer.py
CHANGED
@@ -1,4 +1,12 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import datasets
|
3 |
|
4 |
|
@@ -6,8 +14,8 @@ logger = datasets.logging.get_logger(__name__)
|
|
6 |
|
7 |
|
8 |
_CITATION = """\
|
9 |
-
@inproceedings{
|
10 |
-
title = "
|
11 |
author = "Gonzalez-Agirre, Aitor and
|
12 |
Marimon, Montserrat and
|
13 |
Intxaurrondo, Ander and
|
@@ -22,13 +30,33 @@ _CITATION = """\
|
|
22 |
url = "https://aclanthology.org/D19-5701",
|
23 |
doi = "10.18653/v1/D19-5701",
|
24 |
pages = "1--10",
|
25 |
-
abstract = "
|
26 |
}
|
27 |
"""
|
28 |
|
29 |
_DESCRIPTION = """\
|
30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
"""
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
_URL = "https://huggingface.co/datasets/PlanTL-GOB-ES/pharmaconer/resolve/main/"
|
34 |
_TRAINING_FILE = "train.conll"
|
@@ -36,14 +64,9 @@ _DEV_FILE = "dev.conll"
|
|
36 |
_TEST_FILE = "test.conll"
|
37 |
|
38 |
class PharmaCoNERConfig(datasets.BuilderConfig):
|
39 |
-
"""BuilderConfig for PharmaCoNER dataset"""
|
40 |
|
41 |
def __init__(self, **kwargs):
|
42 |
-
"""BuilderConfig for PharmaCoNER.
|
43 |
-
|
44 |
-
Args:
|
45 |
-
**kwargs: keyword arguments forwarded to super.
|
46 |
-
"""
|
47 |
super(PharmaCoNERConfig, self).__init__(**kwargs)
|
48 |
|
49 |
|
@@ -53,7 +76,7 @@ class PharmaCoNER(datasets.GeneratorBasedBuilder):
|
|
53 |
BUILDER_CONFIGS = [
|
54 |
PharmaCoNERConfig(
|
55 |
name="PharmaCoNER",
|
56 |
-
version=datasets.Version(
|
57 |
description="PharmaCoNER dataset"),
|
58 |
]
|
59 |
|
@@ -82,7 +105,7 @@ class PharmaCoNER(datasets.GeneratorBasedBuilder):
|
|
82 |
}
|
83 |
),
|
84 |
supervised_keys=None,
|
85 |
-
homepage=
|
86 |
citation=_CITATION,
|
87 |
)
|
88 |
|
@@ -90,8 +113,8 @@ class PharmaCoNER(datasets.GeneratorBasedBuilder):
|
|
90 |
"""Returns SplitGenerators."""
|
91 |
urls_to_download = {
|
92 |
"train": f"{_URL}{_TRAINING_FILE}",
|
93 |
-
"dev":
|
94 |
-
"test":
|
95 |
}
|
96 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
97 |
|
@@ -109,7 +132,7 @@ class PharmaCoNER(datasets.GeneratorBasedBuilder):
|
|
109 |
pos_tags = []
|
110 |
ner_tags = []
|
111 |
for line in f:
|
112 |
-
if line
|
113 |
if tokens:
|
114 |
yield guid, {
|
115 |
"id": str(guid),
|
@@ -120,7 +143,6 @@ class PharmaCoNER(datasets.GeneratorBasedBuilder):
|
|
120 |
tokens = []
|
121 |
ner_tags = []
|
122 |
else:
|
123 |
-
# PharmaCoNER tokens are tab separated
|
124 |
splits = line.split("\t")
|
125 |
tokens.append(splits[0])
|
126 |
ner_tags.append(splits[-1].rstrip())
|
@@ -129,4 +151,4 @@ class PharmaCoNER(datasets.GeneratorBasedBuilder):
|
|
129 |
"id": str(guid),
|
130 |
"tokens": tokens,
|
131 |
"ner_tags": ner_tags,
|
132 |
-
}
|
|
|
1 |
+
"""
|
2 |
+
A dataset loading script for the PharmaCoNER corpus.
|
3 |
+
|
4 |
+
The PharmaCoNER datset is a manually annotated collection of clinical case
|
5 |
+
studies derived from the Spanish Clinical Case Corpus (SPACCC). It was designed
|
6 |
+
for the Pharmacological Substances, Compounds and Proteins NER track, the first
|
7 |
+
shared task on detecting drug and chemical entities in Spanish medical documents.
|
8 |
+
"""
|
9 |
+
|
10 |
import datasets
|
11 |
|
12 |
|
|
|
14 |
|
15 |
|
16 |
_CITATION = """\
|
17 |
+
@inproceedings{,
|
18 |
+
title = "PharmaCoNER: Pharmacological Substances, Compounds and proteins Named Entity Recognition track",
|
19 |
author = "Gonzalez-Agirre, Aitor and
|
20 |
Marimon, Montserrat and
|
21 |
Intxaurrondo, Ander and
|
|
|
30 |
url = "https://aclanthology.org/D19-5701",
|
31 |
doi = "10.18653/v1/D19-5701",
|
32 |
pages = "1--10",
|
33 |
+
abstract = "",
|
34 |
}
|
35 |
"""
|
36 |
|
37 |
_DESCRIPTION = """\
|
38 |
+
PharmaCoNER: Pharmacological Substances, Compounds and Proteins Named Entity Recognition track
|
39 |
+
|
40 |
+
This dataset is designed for the PharmaCoNER task, sponsored by Plan de Impulso de las Tecnologías del Lenguaje (Plan TL).
|
41 |
+
|
42 |
+
It is a manually classified collection of clinical case studies derived from the Spanish Clinical Case Corpus (SPACCC), an
|
43 |
+
open access electronic library that gathers Spanish medical publications from SciELO (Scientific Electronic Library Online).
|
44 |
+
|
45 |
+
The annotation of the entire set of entity mentions was carried out by medicinal chemistry experts
|
46 |
+
and it includes the following 4 entity types: NORMALIZABLES, NO_NORMALIZABLES, PROTEINAS and UNCLEAR.
|
47 |
+
|
48 |
+
The PharmaCoNER corpus contains a total of 396,988 words and 1,000 clinical cases that have been randomly sampled into 3 subsets.
|
49 |
+
The training set contains 500 clinical cases, while the development and test sets contain 250 clinical cases each.
|
50 |
+
In terms of training examples, this translates to a total of 8074, 3764 and 3931 annotated sentences in each set.
|
51 |
+
The original dataset was distributed in Brat format (https://brat.nlplab.org/standoff.html).
|
52 |
+
|
53 |
+
For further information, please visit https://temu.bsc.es/pharmaconer/ or send an email to [email protected]
|
54 |
"""
|
55 |
+
_HOMEPAGE = "https://temu.bsc.es/pharmaconer/index.php/datasets/"
|
56 |
+
|
57 |
+
_LICENSE = "Creative Commons Attribution 4.0 International"
|
58 |
+
|
59 |
+
_VERSION = "1.1.0"
|
60 |
|
61 |
_URL = "https://huggingface.co/datasets/PlanTL-GOB-ES/pharmaconer/resolve/main/"
|
62 |
_TRAINING_FILE = "train.conll"
|
|
|
64 |
_TEST_FILE = "test.conll"
|
65 |
|
66 |
class PharmaCoNERConfig(datasets.BuilderConfig):
|
67 |
+
"""BuilderConfig for PharmaCoNER dataset."""
|
68 |
|
69 |
def __init__(self, **kwargs):
|
|
|
|
|
|
|
|
|
|
|
70 |
super(PharmaCoNERConfig, self).__init__(**kwargs)
|
71 |
|
72 |
|
|
|
76 |
BUILDER_CONFIGS = [
|
77 |
PharmaCoNERConfig(
|
78 |
name="PharmaCoNER",
|
79 |
+
version=datasets.Version(_VERSION),
|
80 |
description="PharmaCoNER dataset"),
|
81 |
]
|
82 |
|
|
|
105 |
}
|
106 |
),
|
107 |
supervised_keys=None,
|
108 |
+
homepage=_HOMEPAGE,
|
109 |
citation=_CITATION,
|
110 |
)
|
111 |
|
|
|
113 |
"""Returns SplitGenerators."""
|
114 |
urls_to_download = {
|
115 |
"train": f"{_URL}{_TRAINING_FILE}",
|
116 |
+
"dev": f"{_URL}{_DEV_FILE}",
|
117 |
+
"test": f"{_URL}{_TEST_FILE}",
|
118 |
}
|
119 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
120 |
|
|
|
132 |
pos_tags = []
|
133 |
ner_tags = []
|
134 |
for line in f:
|
135 |
+
if line == "\n":
|
136 |
if tokens:
|
137 |
yield guid, {
|
138 |
"id": str(guid),
|
|
|
143 |
tokens = []
|
144 |
ner_tags = []
|
145 |
else:
|
|
|
146 |
splits = line.split("\t")
|
147 |
tokens.append(splits[0])
|
148 |
ner_tags.append(splits[-1].rstrip())
|
|
|
151 |
"id": str(guid),
|
152 |
"tokens": tokens,
|
153 |
"ner_tags": ner_tags,
|
154 |
+
}
|