Datasets:
File size: 7,651 Bytes
347557c 8312457 347557c 8312457 347557c 8312457 347557c 8312457 347557c 8312457 347557c 8312457 347557c 8312457 347557c 8312457 347557c 8312457 347557c 8312457 347557c 8312457 347557c 8312457 347557c 8312457 347557c 8312457 347557c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 |
# Acknowledgement: dataset builder script adapted from https://huggingface.co/datasets/glue/blob/main/glue.py
import datasets
import pdb
import jsonlines
CITATION_BLOB = '''
@article{krishna2023usb,
title={USB: A Unified Summarization Benchmark Across Tasks and Domains},
author={Krishna, Kundan and Gupta, Prakhar and Ramprasad, Sanjana and Wallace, Byron C and Bigham, Jeffrey P and Lipton, Zachary C},
booktitle={Findings of the Association for Computational Linguistics: EMNLP 2023},
year={2023}
}
'''
DESCRIPTION_BLOB = '''
The USB benchmark consists of labeled datasets for a collection of 8 tasks dealing with text summarization,
particularly focusing on factuality and controllability of summary generation.
Paper can be found here : https://arxiv.org/abs/2305.14296
'''
class USBConfig(datasets.BuilderConfig):
def __init__(
self,
featurespec,
label_column,
citation=CITATION_BLOB,
data_url="processed_data.tar.gz",
label_classes=None,
process_label=lambda x: x,
**kwargs,
):
super(USBConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
self.featurespec = featurespec
self.label_column = label_column
self.citation = citation
self.label_classes = label_classes
self.process_label = process_label
self.url = "https://github.com/kukrishna/usb"
self.data_url=data_url
class USB(datasets.GeneratorBasedBuilder):
"""The Unified Summarization Benchmark."""
BUILDER_CONFIGS = [
USBConfig(
name="topicbased_summarization",
description="Generate a short summary of the given article covering the given topic",
featurespec={"summ_idx": "int", "input_lines": "listsent", "topic_name": "sent", "output_lines":"listsent"},
label_column="output_lines",
),
USBConfig(
name="fixing_factuality",
description="Given a summary sentence (claim) and presented evidence from the article, edit the summary to remove unsupported or contradicting facts",
featurespec={"summ_idx": "int", "input_lines": "listsent", "initial_summary": "sent", "fixed_summary":"sent"},
label_column="fixed_summary",
),
USBConfig(
name="unsupported_span_prediction",
description="Given a summary sentence (claim) and presented evidence from the article, mark the parts of the summary which are not supported by the evidence by surrounding them with [] and [/] tags.",
featurespec={"summ_idx": "int", "input_lines": "listsent", "summary": "sent", "annotated_summary":"sent"},
label_column="annotated_summary",
),
USBConfig(
name="evidence_extraction",
description="Given an article and its summary, for each summary sentence, produce a minimal list of sentences from the article which provide sufficient evidence for all facts in the summary sentence.",
featurespec={"input_lines": "listsent", "summary_lines": "listsent", "evidence_labels":"listlistint"},
label_column="evidence_labels",
),
USBConfig(
name="multisentence_compression",
description="Given a list of sentences from an article, generate a single sentence summary of the presented cluster of sentences.",
featurespec={"summ_idx": "int", "input_lines": "listsent", "output_lines": "listsent"},
label_column="output_lines",
),
USBConfig(
name="extractive_summarization",
description="Given an article, generate an extractive summary by producing a subset o the article's sentences",
featurespec={"input_lines": "listsent", "labels": "listint"},
label_column="labels",
),
USBConfig(
name="abstractive_summarization",
description="Given an article, generate its abstractive summary",
featurespec={"input_lines": "listsent", "output_lines": "listsent"},
label_column="output_lines",
),
USBConfig(
name="factuality_classification",
description="Given a summary sentence (claim) and presented evidence from the article, predict whether all facts of the claim are supported by and in agreement with the presented evidence, or not.",
featurespec={"summ_idx": "int", "input_lines": "listsent", "summary_sent": "sent", "label":"int"},
label_column="label",
),
USBConfig(
name="all_annotations",
description="All annotations collected in the creation of USB dataset in one place.",
featurespec={},
label_column=None,
),
]
def _split_generators(self, dl_manager):
data_root = dl_manager.download_and_extract(self.config.data_url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": f"{data_root}/{self.config.name}/train.jsonl",
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_file": f"{data_root}/{self.config.name}/validation.jsonl",
"split": "validation",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": f"{data_root}/{self.config.name}/test.jsonl",
"split": "test",
},
),
]
def _generate_examples(self, data_file, split):
with jsonlines.open(data_file) as f:
for ex_idx,example in enumerate(f):
example["id"] = example["id"]+":"+str(ex_idx)
example["domain"] = example["id"].split("/")[0]
yield example["id"], example
def _info(self):
features = {}
features["id"] = datasets.Value("string")
features["domain"] = datasets.Value("string")
if self.config.name=="all_annotations":
# handle this as a special case
features["source"] = datasets.Sequence({"txt": datasets.Value("string"), "section_name": datasets.Value("string"), "section_index": datasets.Value("int32"), "is_header":datasets.Value("bool")})
features["summary"] = datasets.Sequence({"pre_edit": datasets.Value("string"), "post_edit": datasets.Value("string"), "evidence": datasets.Sequence(datasets.Value("int32"))})
for (feature_name,dtype) in self.config.featurespec.items():
hf_dtype = None
if dtype=="int":
hf_dtype = datasets.Value("int32")
elif dtype=="listint":
hf_dtype = datasets.Sequence(datasets.Value("int32"))
elif dtype=="listlistint":
hf_dtype = datasets.Sequence(datasets.Sequence(datasets.Value("int32")))
elif dtype=="sent":
hf_dtype = datasets.Value("string")
elif dtype=="listsent":
hf_dtype = datasets.Sequence(datasets.Value("string"))
else:
raise NotImplementedError
features[feature_name] = hf_dtype
return datasets.DatasetInfo(
description=DESCRIPTION_BLOB,
features=datasets.Features(features),
homepage=self.config.url,
citation=self.config.citation,
) |