qanastek commited on
Commit
e25c52f
1 Parent(s): 543c2fe

Create PxCorpus.py

Browse files
Files changed (1) hide show
  1. PxCorpus.py +179 -0
PxCorpus.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pip install bs4 syntok
2
+
3
+ import os
4
+ import random
5
+
6
+ import datasets
7
+
8
+ import numpy as np
9
+ from bs4 import BeautifulSoup, ResultSet
10
+ from syntok.tokenizer import Tokenizer
11
+
12
+ tokenizer = Tokenizer()
13
+
14
+ _CITATION = """\
15
+ @InProceedings{Kocabiyikoglu2022,
16
+ author = "Alican Kocabiyikoglu and Fran{\c c}ois Portet and Prudence Gibert and Hervé Blanchon and Jean-Marc Babouchkine and Gaëtan Gavazzi",
17
+ title = "A Spoken Drug Prescription Dataset in French for Spoken Language Understanding",
18
+ booktitle = "13th Language Resources and Evaluation Conference (LREC 2022)",
19
+ year = "2022",
20
+ location = "Marseille, France"
21
+ }
22
+ """
23
+
24
+ _DESCRIPTION = """\
25
+ PxSLU is to the best of our knowledge, the first spoken medical drug prescriptions corpus to be distributed. It contains 4 hours of transcribed
26
+ and annotated dialogues of drug prescriptions in French acquired through an experiment with 55 participants experts and non-experts in drug prescriptions.
27
+
28
+ The automatic transcriptions were verified by human effort and aligned with semantic labels to allow training of NLP models. The data acquisition
29
+ protocol was reviewed by medical experts and permit free distribution without breach of privacy and regulation.
30
+
31
+ Overview of the Corpus
32
+
33
+ The experiment has been performed in wild conditions with naive participants and medical experts. In total, the dataset includes 1981 recordings
34
+ of 55 participants (38% non-experts, 25% doctors, 36% medical practitioners), manually transcribed and semantically annotated.
35
+ """
36
+
37
+ _URL = "https://zenodo.org/record/6524162/files/pxslu.zip?download=1"
38
+
39
+ class PxCorpus(datasets.GeneratorBasedBuilder):
40
+
41
+ BUILDER_CONFIGS = [
42
+ datasets.BuilderConfig(name=f"default", version="1.0.0", description=f"PxCorpus data"),
43
+ ]
44
+
45
+ DEFAULT_CONFIG_NAME = "default"
46
+
47
+ def _info(self):
48
+
49
+ features = datasets.Features(
50
+ {
51
+ "id": datasets.Value("string"),
52
+ "text": datasets.Value("string"),
53
+ "label": datasets.features.ClassLabel(
54
+ names=["medical_prescription", "negate", "none", "replace"],
55
+ ),
56
+ "tokens": datasets.Sequence(datasets.Value("string")),
57
+ "ner_tags": datasets.Sequence(
58
+ datasets.features.ClassLabel(
59
+ names=['O', 'B-A', 'B-cma_event', 'B-d_dos_form', 'B-d_dos_form_ext', 'B-d_dos_up', 'B-d_dos_val', 'B-dos_cond', 'B-dos_uf', 'B-dos_val', 'B-drug', 'B-dur_ut', 'B-dur_val', 'B-fasting', 'B-freq_days', 'B-freq_int_v1', 'B-freq_int_v1_ut', 'B-freq_int_v2', 'B-freq_int_v2_ut', 'B-freq_startday', 'B-freq_ut', 'B-freq_val', 'B-inn', 'B-max_unit_uf', 'B-max_unit_ut', 'B-max_unit_val', 'B-min_gap_ut', 'B-min_gap_val', 'B-qsp_ut', 'B-qsp_val', 'B-re_ut', 'B-re_val', 'B-rhythm_hour', 'B-rhythm_perday', 'B-rhythm_rec_ut', 'B-rhythm_rec_val', 'B-rhythm_tdte', 'B-roa', 'I-cma_event', 'I-d_dos_form', 'I-d_dos_form_ext', 'I-d_dos_up', 'I-d_dos_val', 'I-dos_cond', 'I-dos_uf', 'I-dos_val', 'I-drug', 'I-fasting', 'I-freq_startday', 'I-inn', 'I-rhythm_tdte', 'I-roa'],
60
+ ),
61
+ ),
62
+ }
63
+ )
64
+
65
+ return datasets.DatasetInfo(
66
+ description=_DESCRIPTION,
67
+ features=features,
68
+ citation=_CITATION,
69
+ supervised_keys=None,
70
+ )
71
+
72
+ def _split_generators(self, dl_manager):
73
+
74
+ data_dir = dl_manager.download_and_extract(_URL)
75
+
76
+ print(data_dir)
77
+
78
+ return [
79
+ datasets.SplitGenerator(
80
+ name=datasets.Split.TRAIN,
81
+ gen_kwargs={
82
+ "filepath_1": os.path.join(data_dir, "seq.in"),
83
+ "filepath_2": os.path.join(data_dir, "seq.label"),
84
+ "filepath_3": os.path.join(data_dir, "PxSLU_conll.txt"),
85
+ "split": "train",
86
+ },
87
+ ),
88
+ datasets.SplitGenerator(
89
+ name=datasets.Split.VALIDATION,
90
+ gen_kwargs={
91
+ "filepath_1": os.path.join(data_dir, "seq.in"),
92
+ "filepath_2": os.path.join(data_dir, "seq.label"),
93
+ "filepath_3": os.path.join(data_dir, "PxSLU_conll.txt"),
94
+ "split": "validation",
95
+ },
96
+ ),
97
+ datasets.SplitGenerator(
98
+ name=datasets.Split.TEST,
99
+ gen_kwargs={
100
+ "filepath_1": os.path.join(data_dir, "seq.in"),
101
+ "filepath_2": os.path.join(data_dir, "seq.label"),
102
+ "filepath_3": os.path.join(data_dir, "PxSLU_conll.txt"),
103
+ "split": "test",
104
+ },
105
+ ),
106
+ ]
107
+
108
+ def getTokenTags(self, document):
109
+
110
+ tokens = []
111
+ ner_tags = []
112
+
113
+ for pair in document.split("\n"):
114
+
115
+ if len(pair) <= 0:
116
+ continue
117
+
118
+ text, label = pair.split("\t")
119
+ tokens.append(text)
120
+ ner_tags.append(label)
121
+
122
+ return tokens, ner_tags
123
+
124
+ def _generate_examples(self, filepath_1, filepath_2, filepath_3, split):
125
+
126
+ key = 0
127
+ all_res = []
128
+
129
+ f_seq_in = open(filepath_1, "r")
130
+ seq_in = f_seq_in.read().split("\n")
131
+ f_seq_in.close()
132
+
133
+ f_seq_label = open(filepath_2, "r")
134
+ seq_label = f_seq_label.read().split("\n")
135
+ f_seq_label.close()
136
+
137
+ f_in_ner = open(filepath_3, "r")
138
+ docs = f_in_ner.read().split("\n\n")
139
+ f_in_ner.close()
140
+
141
+ for idx, doc in enumerate(docs):
142
+
143
+ text = seq_in[idx]
144
+ label = seq_label[idx]
145
+
146
+ tokens, ner_tags = self.getTokenTags(docs[idx])
147
+
148
+ if len(text) <= 0 or len(label) <= 0:
149
+ continue
150
+
151
+ all_res.append({
152
+ "id": key,
153
+ "text": text,
154
+ "label": label,
155
+ "tokens": tokens,
156
+ "ner_tags": ner_tags,
157
+ })
158
+
159
+ key += 1
160
+
161
+ ids = [r["id"] for r in all_res]
162
+
163
+ random.seed(4)
164
+ random.shuffle(ids)
165
+ random.shuffle(ids)
166
+ random.shuffle(ids)
167
+
168
+ train, validation, test = np.split(ids, [int(len(ids)*0.70), int(len(ids)*0.80)])
169
+
170
+ if split == "train":
171
+ allowed_ids = list(train)
172
+ elif split == "validation":
173
+ allowed_ids = list(validation)
174
+ elif split == "test":
175
+ allowed_ids = list(test)
176
+
177
+ for r in all_res:
178
+ if r["id"] in allowed_ids:
179
+ yield r["id"], r