Datasets:
Tasks:
Token Classification
Modalities:
Text
Sub-tasks:
parsing
Languages:
English
Size:
10M - 100M
License:
"""\ | |
Annotated Reference Strings dataset synthesized using CSL processor on citations obtained from CrossRef, JSTOR and | |
PubMed | |
""" | |
import gzip | |
import json | |
import os | |
import datasets | |
_CITATION = """\ | |
@techreport{kee2021, | |
author = {Yuan Chuan Kee}, | |
title = {Synthesis of a large dataset of annotated reference strings for developing citation parsers}, | |
institution = {National University of Singapore}, | |
year = {2021} | |
} | |
""" | |
_DESCRIPTION = """\ | |
A repository of reference strings annotated using CSL processor using citations obtained from various sources. | |
""" | |
# TODO: Add a link to an official homepage for the dataset here | |
_HOMEPAGE = "https://www.github.com/kylase" | |
_LICENSE = "cc-by-4.0" | |
_BASE_URL = "https://huggingface.co/datasets/yuanchuan/annotated_reference_strings/resolve/main/data" | |
_URL_FORMAT = "{base_url}/{source}-part-{part:05}.jsonl.gz" | |
_SOURCES_PARTS = { | |
"crossref": 16, | |
"pubmed": 32, | |
"jstor": 1 | |
} | |
_URLs = { | |
"default": [ | |
_URL_FORMAT.format(base_url=_BASE_URL, source=source, part=i) | |
for source, total_parts in _SOURCES_PARTS.items() | |
for i in range(1, total_parts + 1) | |
] | |
} | |
class AnnotatedReferenceStringsDataset(datasets.GeneratorBasedBuilder): | |
"""Annotated Reference Strings dataset""" | |
VERSION = datasets.Version("0.2.0") | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig(name="default", version=VERSION, | |
description="This dataset is the raw representation without tokenization."), | |
] | |
DEFAULT_CONFIG_NAME = "default" | |
def _info(self): | |
features = datasets.Features( | |
{ | |
"source": datasets.Value("string"), | |
"lang": datasets.Value("string"), | |
"entry_type": datasets.Value("string"), | |
"doi_prefix": datasets.Value("string"), | |
"csl_style": datasets.Value("string"), | |
"content": datasets.Value("string") | |
} | |
) | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=features, | |
supervised_keys=None, | |
homepage=_HOMEPAGE, | |
license=_LICENSE, | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
"""Returns SplitGenerators.""" | |
data_urls = _URLs[self.config.name] | |
files = dl_manager.download(data_urls) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
"filepaths": files, | |
"split": "train", | |
}, | |
) | |
] | |
def _generate_examples(self, filepaths, split): | |
id_ = 0 | |
for filepath in filepaths: | |
with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f: | |
for line in f: | |
if line: | |
example = json.loads(line) | |
yield id_, example | |
id_ += 1 | |