File size: 5,456 Bytes
ecafb29 94b9891 ecafb29 94b9891 ecafb29 453e251 ecafb29 453e251 ecafb29 453e251 ecafb29 94b9891 ecafb29 2809c75 f5b1743 ecafb29 8ddc7c3 ecafb29 94b9891 ecafb29 94b9891 ecafb29 94b9891 ecafb29 94b9891 ecafb29 94b9891 ecafb29 94b9891 ecafb29 94b9891 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 |
import json
import random
import datasets
import numpy as np
import pandas as pd
_CITATION = """\
@inproceedings{hiebel:cea-03740484,
TITLE = {{CLISTER: A corpus for semantic textual similarity in French clinical narratives}},
AUTHOR = {Hiebel, Nicolas and Ferret, Olivier and Fort, Kar{\"e}n and N{\'e}v{\'e}ol, Aur{\'e}lie},
URL = {https://hal-cea.archives-ouvertes.fr/cea-03740484},
BOOKTITLE = {{LREC 2022 - 13th Language Resources and Evaluation Conference}},
ADDRESS = {Marseille, France},
PUBLISHER = {{European Language Resources Association}},
SERIES = {LREC 2022 - Proceedings of the 13th Conference on Language Resources and Evaluation},
VOLUME = {2022},
PAGES = {4306‑4315},
YEAR = {2022},
MONTH = Jun,
KEYWORDS = {Semantic Similarity ; Corpus Development ; Clinical Text ; French ; Semantic Similarity},
PDF = {https://hal-cea.archives-ouvertes.fr/cea-03740484/file/2022.lrec-1.459.pdf},
HAL_ID = {cea-03740484},
HAL_VERSION = {v1},
}
"""
_DESCRIPTION = """\
Modern Natural Language Processing relies on the availability of annotated corpora for training and \
evaluating models. Such resources are scarce, especially for specialized domains in languages other \
than English. In particular, there are very few resources for semantic similarity in the clinical domain \
in French. This can be useful for many biomedical natural language processing applications, including \
text generation. We introduce a definition of similarity that is guided by clinical facts and apply it \
to the development of a new French corpus of 1,000 sentence pairs manually annotated according to \
similarity scores. This new sentence similarity corpus is made freely available to the community. We \
further evaluate the corpus through experiments of automatic similarity measurement. We show that a \
model of sentence embeddings can capture similarity with state of the art performance on the DEFT STS \
shared task evaluation data set (Spearman=0.8343). We also show that the CLISTER corpus is complementary \
to DEFT STS. \
"""
_HOMEPAGE = "https://gitlab.inria.fr/codeine/clister"
_LICENSE = "unknown"
_URL = "data.zip"
class CLISTER(datasets.GeneratorBasedBuilder):
DEFAULT_CONFIG_NAME = "source"
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="source", version="1.0.0", description="The CLISTER corpora"),
]
def _info(self):
features = datasets.Features(
{
"id": datasets.Value("string"),
"document_1_id": datasets.Value("string"),
"document_2_id": datasets.Value("string"),
"text_1": datasets.Value("string"),
"text_2": datasets.Value("string"),
"label": datasets.Value("float"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=str(_LICENSE),
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download_and_extract(_URL).rstrip("/")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"csv_file": data_dir + "/train.csv",
"json_file": data_dir + "/id_to_sentence_train.json",
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"csv_file": data_dir + "/train.csv",
"json_file": data_dir + "/id_to_sentence_train.json",
"split": "validation",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"csv_file": data_dir + "/test.csv",
"json_file": data_dir + "/id_to_sentence_test.json",
"split": "test",
},
),
]
def _generate_examples(self, csv_file, json_file, split):
all_res = []
key = 0
# Load JSON file
f_json = open(json_file)
data_map = json.load(f_json)
f_json.close()
# Load CSV file
df = pd.read_csv(csv_file, sep="\t")
for index, e in df.iterrows():
all_res.append({
"id": str(key),
"document_1_id": e["id_1"],
"document_2_id": e["id_2"],
"text_1": data_map["_".join(e["id_1"].split("_")[0:2])],
"text_2": data_map["_".join(e["id_2"].split("_")[0:2])],
"label": float(e["sim"]),
})
key += 1
if split != "test":
ids = [r["id"] for r in all_res]
random.seed(4)
random.shuffle(ids)
random.shuffle(ids)
random.shuffle(ids)
train, validation = np.split(ids, [int(len(ids)*0.8333)])
if split == "train":
allowed_ids = list(train)
elif split == "validation":
allowed_ids = list(validation)
for r in all_res:
if r["id"] in allowed_ids:
yield r["id"], r
else:
for r in all_res:
yield r["id"], r
|