Datasets:

Modalities:
Text
Libraries:
Datasets
fact2020 / fact2020.py
filevich's picture
Upload fact2020.py
2cdd019
# coding=utf-8
#
#
# Lint as: python3
"""Overview of FACT at IberLEF 2020: Events Detection and Classification"""
import os
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{fact2020,
title = "Overview of FACT at IberLEF 2020: Events Detection and Classification",
author = "Rosa, Aiala and Chiruzzo, Luis and Wonsever, Dina and Malcuori, Marisa and Curell, Hortènsia and Castellón, Irene and Vázquez, Gloria and Fernández-Montraveta, Ana and Góngora, Santiago and Alonso, Laura",
booktitle = "Proceedings of the Seventh Conference on Natural Language Learning at {HLT}-{NAACL} 2003",
year = "2020",
url = "https://www.aclweb.org/anthology/W03-0419",
}
"""
_DESCRIPTION = """\
In this paper we present the second edition of the FACT shared task (Factuality Annotation and Classification
Task), included in IberLEF2020. The main objective of this task is to advance in the study of the factuality of
the events mentioned in texts. This year, the FACT task includes a subtask on event identification in addition
to the factuality classification subtask. We describe the submitted systems as well as the corpus used, which is
the same used in FACT 2019 but extended by adding annotations for nominal events.
"""
_URL = "https://huggingface.co/datasets/filevich/fact2020/raw/main/"
_URLS = {
"train": _URL + "train.json",
"validation": _URL + "validation.json",
"test": _URL + "test-task1.json",
}
class Fact2020Config(datasets.BuilderConfig):
"""BuilderConfig for Fact2020"""
def __init__(self, **kwargs):
"""BuilderConfig forFact2020.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(Fact2020Config, self).__init__(**kwargs)
class Fact2020(datasets.GeneratorBasedBuilder):
"""Fact2020 dataset."""
BUILDER_CONFIGS = [
Fact2020Config(name="fact2020", version=datasets.Version("1.0.0"), description="Fact2020 dataset"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"fact_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"F",
"CF",
"U",
]
)
),
}
),
supervised_keys=None,
homepage="https://ceur-ws.org/Vol-2664/fact_overview.pdf",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls_to_download = _URLS
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
import json
with open(filepath, encoding="utf-8") as f:
data = json.load(f)
return [(i, {"id": str(i), **d}) for i,d in enumerate(data)]