pico-breast-cancer / pico-breast-cancer.py
lhoestq's picture
lhoestq HF staff
fix parsing
f5d9f91
raw
history blame
6.84 kB
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@InProceedings{mutinda2022pico,
title = {PICO Corpus: A Publicly Available Corpus to Support Automatic Data Extraction from Biomedical Literature},
author = {Mutinda, Faith and Liew, Kongmeng and Yada, Shuntaro and Wakamiya, Shoko and Aramaki, Eiji},
booktitle = {Proceedings of the first Workshop on Information Extraction from Scientific Publications},
pages = {26--31},
year = {2022}
}
"""
_DESCRIPTION = """\
The corpus consists of about 1,011 PubMed abstracts which are RCTs related
to breast cancer. For each abstract, text snippets that identify the
Participants, Intervention, Control, and Outcome (PICO elements) are annotated.
The abstracts were annotated using BRAT (https://brat.nlplab.org/) and later
converted to CoNLL-2003.
"""
_URL = "https://raw.githubusercontent.com/Martin-Masson/pico-breast-cancer/main/pico_conll/"
_TRAINING_FILE = "train.txt"
_DEV_FILE = "dev.txt"
_TEST_FILE = "test.txt"
class PicoBreastCancerConfig(datasets.BuilderConfig):
"""BuilderConfig for PicoBreastCancer"""
def __init__(self, **kwargs):
"""BuilderConfig for PicoBreastCancer.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(PicoBreastCancerConfig, self).__init__(**kwargs)
class PicoBreastCancer(datasets.GeneratorBasedBuilder):
"""A corpus of about 1,011 PubMed abstracts from RCTs related to breast cancer."""
BUILDER_CONFIGS = [
PicoBreastCancerConfig(name="pico-breast-cancer", version=datasets.Version("1.0.0"), description="A corpus of about 1,011 PubMed abstracts from RCTs related to breast cancer."),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-total-participants",
"I-total-participants",
"B-intervention-participants",
"I-intervention-participants",
"B-control-participants",
"I-control-participants",
"B-age",
"I-age",
"B-eligibility",
"I-eligibility",
"B-ethinicity",
"I-ethinicity",
"B-condition",
"I-condition",
"B-location",
"I-location",
"B-intervention",
"I-intervention",
"B-control",
"I-control",
"B-outcome",
"I-outcome",
"B-outcome-measure",
"I-outcome-measure",
"B-iv-bin-abs",
"I-iv-bin-abs",
"B-cv-bin-abs",
"I-cv-bin-abs",
"B-iv-bin-percent",
"I-iv-bin-percent",
"B-cv-bin-percent",
"I-cv-bin-percent",
"B-iv-cont-mean",
"I-iv-cont-mean",
"B-cv-cont-mean",
"I-cv-cont-mean",
"B-iv-cont-median",
"I-iv-cont-median",
"B-cv-cont-median",
"I-cv-cont-median",
"B-iv-cont-sd",
"I-iv-cont-sd",
"B-cv-cont-sd",
"I-cv-cont-sd",
"B-iv-cont-q1",
"I-iv-cont-q1",
"B-cv-cont-q1",
"I-cv-cont-q1",
"B-iv-cont-q3",
"I-iv-cont-q3",
"B-cv-cont-q3",
"I-cv-cont-q3",
]
)
),
}
),
supervised_keys=None,
homepage="https://github.com/Martin-Masson/pico-corpus",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": f"{_URL}{_TRAINING_FILE}",
"dev": f"{_URL}{_DEV_FILE}",
"test": f"{_URL}{_TEST_FILE}",
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
idx = 0
tokens = []
ner_tags = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if tokens:
yield idx, {
"id": str(idx),
"tokens": tokens,
"ner_tags": ner_tags,
}
idx += 1
tokens = []
ner_tags = []
else:
# conll2003 tokens are space separated
splits = line.rstrip().rsplit(" ", 1)
tokens.append(splits[0])
ner_tags.append(splits[1])
# last example
if tokens:
yield idx, {
"id": str(idx),
"tokens": tokens,
"ner_tags": ner_tags,
}