Datasets:

Modalities:
Text
Languages:
Spanish
Libraries:
Datasets
License:
mapama247 commited on
Commit
25dc84b
·
1 Parent(s): 7dda3d7

Update pharmaconer.py

Browse files
Files changed (1) hide show
  1. pharmaconer.py +40 -18
pharmaconer.py CHANGED
@@ -1,4 +1,12 @@
1
- # Loading script for the PharmaCoNER dataset.
 
 
 
 
 
 
 
 
2
  import datasets
3
 
4
 
@@ -6,8 +14,8 @@ logger = datasets.logging.get_logger(__name__)
6
 
7
 
8
  _CITATION = """\
9
- @inproceedings{gonzalez-agirre-etal-2019-pharmaconer,
10
- title = "{P}harma{C}o{NER}: Pharmacological Substances, Compounds and proteins Named Entity Recognition track",
11
  author = "Gonzalez-Agirre, Aitor and
12
  Marimon, Montserrat and
13
  Intxaurrondo, Ander and
@@ -22,13 +30,33 @@ _CITATION = """\
22
  url = "https://aclanthology.org/D19-5701",
23
  doi = "10.18653/v1/D19-5701",
24
  pages = "1--10",
25
- abstract = "One of the biomedical entity types of relevance for medicine or biosciences are chemical compounds and drugs. The correct detection these entities is critical for other text mining applications building on them, such as adverse drug-reaction detection, medication-related fake news or drug-target extraction. Although a significant effort was made to detect mentions of drugs/chemicals in English texts, so far only very limited attempts were made to recognize them in medical documents in other languages. Taking into account the growing amount of medical publications and clinical records written in Spanish, we have organized the first shared task on detecting drug and chemical entities in Spanish medical documents. Additionally, we included a clinical concept-indexing sub-track asking teams to return SNOMED-CT identifiers related to drugs/chemicals for a collection of documents. For this task, named PharmaCoNER, we generated annotation guidelines together with a corpus of 1,000 manually annotated clinical case studies. A total of 22 teams participated in the sub-track 1, (77 system runs), and 7 teams in the sub-track 2 (19 system runs). Top scoring teams used sophisticated deep learning approaches yielding very competitive results with F-measures above 0.91. These results indicate that there is a real interest in promoting biomedical text mining efforts beyond English. We foresee that the PharmaCoNER annotation guidelines, corpus and participant systems will foster the development of new resources for clinical and biomedical text mining systems of Spanish medical data.",
26
  }
27
  """
28
 
29
  _DESCRIPTION = """\
30
- https://temu.bsc.es/pharmaconer/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  """
 
 
 
 
 
32
 
33
  _URL = "https://huggingface.co/datasets/PlanTL-GOB-ES/pharmaconer/resolve/main/"
34
  _TRAINING_FILE = "train.conll"
@@ -36,14 +64,9 @@ _DEV_FILE = "dev.conll"
36
  _TEST_FILE = "test.conll"
37
 
38
  class PharmaCoNERConfig(datasets.BuilderConfig):
39
- """BuilderConfig for PharmaCoNER dataset"""
40
 
41
  def __init__(self, **kwargs):
42
- """BuilderConfig for PharmaCoNER.
43
-
44
- Args:
45
- **kwargs: keyword arguments forwarded to super.
46
- """
47
  super(PharmaCoNERConfig, self).__init__(**kwargs)
48
 
49
 
@@ -53,7 +76,7 @@ class PharmaCoNER(datasets.GeneratorBasedBuilder):
53
  BUILDER_CONFIGS = [
54
  PharmaCoNERConfig(
55
  name="PharmaCoNER",
56
- version=datasets.Version("1.0.0"),
57
  description="PharmaCoNER dataset"),
58
  ]
59
 
@@ -82,7 +105,7 @@ class PharmaCoNER(datasets.GeneratorBasedBuilder):
82
  }
83
  ),
84
  supervised_keys=None,
85
- homepage="https://temu.bsc.es/pharmaconer/",
86
  citation=_CITATION,
87
  )
88
 
@@ -90,8 +113,8 @@ class PharmaCoNER(datasets.GeneratorBasedBuilder):
90
  """Returns SplitGenerators."""
91
  urls_to_download = {
92
  "train": f"{_URL}{_TRAINING_FILE}",
93
- "dev": f"{_URL}{_DEV_FILE}",
94
- "test": f"{_URL}{_TEST_FILE}",
95
  }
96
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
97
 
@@ -109,7 +132,7 @@ class PharmaCoNER(datasets.GeneratorBasedBuilder):
109
  pos_tags = []
110
  ner_tags = []
111
  for line in f:
112
- if line.startswith("-DOCSTART-") or line == "" or line == "\n":
113
  if tokens:
114
  yield guid, {
115
  "id": str(guid),
@@ -120,7 +143,6 @@ class PharmaCoNER(datasets.GeneratorBasedBuilder):
120
  tokens = []
121
  ner_tags = []
122
  else:
123
- # PharmaCoNER tokens are tab separated
124
  splits = line.split("\t")
125
  tokens.append(splits[0])
126
  ner_tags.append(splits[-1].rstrip())
@@ -129,4 +151,4 @@ class PharmaCoNER(datasets.GeneratorBasedBuilder):
129
  "id": str(guid),
130
  "tokens": tokens,
131
  "ner_tags": ner_tags,
132
- }
 
1
+ """
2
+ A dataset loading script for the PharmaCoNER corpus.
3
+
4
+ The PharmaCoNER datset is a manually annotated collection of clinical case
5
+ studies derived from the Spanish Clinical Case Corpus (SPACCC). It was designed
6
+ for the Pharmacological Substances, Compounds and Proteins NER track, the first
7
+ shared task on detecting drug and chemical entities in Spanish medical documents.
8
+ """
9
+
10
  import datasets
11
 
12
 
 
14
 
15
 
16
  _CITATION = """\
17
+ @inproceedings{,
18
+ title = "PharmaCoNER: Pharmacological Substances, Compounds and proteins Named Entity Recognition track",
19
  author = "Gonzalez-Agirre, Aitor and
20
  Marimon, Montserrat and
21
  Intxaurrondo, Ander and
 
30
  url = "https://aclanthology.org/D19-5701",
31
  doi = "10.18653/v1/D19-5701",
32
  pages = "1--10",
33
+ abstract = "",
34
  }
35
  """
36
 
37
  _DESCRIPTION = """\
38
+ PharmaCoNER: Pharmacological Substances, Compounds and Proteins Named Entity Recognition track
39
+
40
+ This dataset is designed for the PharmaCoNER task, sponsored by Plan de Impulso de las Tecnologías del Lenguaje (Plan TL).
41
+
42
+ It is a manually classified collection of clinical case studies derived from the Spanish Clinical Case Corpus (SPACCC), an
43
+ open access electronic library that gathers Spanish medical publications from SciELO (Scientific Electronic Library Online).
44
+
45
+ The annotation of the entire set of entity mentions was carried out by medicinal chemistry experts
46
+ and it includes the following 4 entity types: NORMALIZABLES, NO_NORMALIZABLES, PROTEINAS and UNCLEAR.
47
+
48
+ The PharmaCoNER corpus contains a total of 396,988 words and 1,000 clinical cases that have been randomly sampled into 3 subsets.
49
+ The training set contains 500 clinical cases, while the development and test sets contain 250 clinical cases each.
50
+ In terms of training examples, this translates to a total of 8074, 3764 and 3931 annotated sentences in each set.
51
+ The original dataset was distributed in Brat format (https://brat.nlplab.org/standoff.html).
52
+
53
+ For further information, please visit https://temu.bsc.es/pharmaconer/ or send an email to [email protected]
54
  """
55
+ _HOMEPAGE = "https://temu.bsc.es/pharmaconer/index.php/datasets/"
56
+
57
+ _LICENSE = "Creative Commons Attribution 4.0 International"
58
+
59
+ _VERSION = "1.1.0"
60
 
61
  _URL = "https://huggingface.co/datasets/PlanTL-GOB-ES/pharmaconer/resolve/main/"
62
  _TRAINING_FILE = "train.conll"
 
64
  _TEST_FILE = "test.conll"
65
 
66
  class PharmaCoNERConfig(datasets.BuilderConfig):
67
+ """BuilderConfig for PharmaCoNER dataset."""
68
 
69
  def __init__(self, **kwargs):
 
 
 
 
 
70
  super(PharmaCoNERConfig, self).__init__(**kwargs)
71
 
72
 
 
76
  BUILDER_CONFIGS = [
77
  PharmaCoNERConfig(
78
  name="PharmaCoNER",
79
+ version=datasets.Version(_VERSION),
80
  description="PharmaCoNER dataset"),
81
  ]
82
 
 
105
  }
106
  ),
107
  supervised_keys=None,
108
+ homepage=_HOMEPAGE,
109
  citation=_CITATION,
110
  )
111
 
 
113
  """Returns SplitGenerators."""
114
  urls_to_download = {
115
  "train": f"{_URL}{_TRAINING_FILE}",
116
+ "dev": f"{_URL}{_DEV_FILE}",
117
+ "test": f"{_URL}{_TEST_FILE}",
118
  }
119
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
120
 
 
132
  pos_tags = []
133
  ner_tags = []
134
  for line in f:
135
+ if line == "\n":
136
  if tokens:
137
  yield guid, {
138
  "id": str(guid),
 
143
  tokens = []
144
  ner_tags = []
145
  else:
 
146
  splits = line.split("\t")
147
  tokens.append(splits[0])
148
  ner_tags.append(splits[-1].rstrip())
 
151
  "id": str(guid),
152
  "tokens": tokens,
153
  "ner_tags": ner_tags,
154
+ }