text
stringlengths 0
15.3k
|
---|
rouge1_acc = int(rouge1_correct > rouge1_incorrect) |
rouge2_scores = [score['rouge2'] for score in rouge_scores] |
rouge2_correct = np.nanmax(rouge2_scores[:len(true_refs)]) |
rouge2_incorrect = np.nanmax(rouge2_scores[len(true_refs):]) |
rouge2_max = rouge2_correct |
rouge2_diff = rouge2_correct - rouge2_incorrect |
rouge2_acc = int(rouge2_correct > rouge2_incorrect) |
rougeL_scores = [score['rougeLsum'] for score in rouge_scores] |
rougeL_correct = np.nanmax(rougeL_scores[:len(true_refs)]) |
rougeL_incorrect = np.nanmax(rougeL_scores[len(true_refs):]) |
rougeL_max = rougeL_correct |
rougeL_diff = rougeL_correct - rougeL_incorrect |
rougeL_acc = int(rougeL_correct > rougeL_incorrect) |
return {'bleu_max': bleu_max, 'bleu_acc': bleu_acc, 'bleu_diff': bleu_diff, 'rouge1_max': rouge1_max, 'rouge1_acc': rouge1_acc, 'rouge1_diff': rouge1_diff, 'rouge2_max': rouge2_max, 'rouge2_acc': rouge2_acc, 'rouge2_diff': rouge2_diff, 'rougeL_max': rougeL_max, 'rougeL_acc': rougeL_acc, 'rougeL_diff': rougeL_diff} |
def bleu(refs, preds): |
score = sacrebleu.corpus_bleu(preds, refs, smooth_method='exp', smooth_value=0.0, force=False, lowercase=False, tokenize='intl', use_effective_order=False).score |
return score |
def rouge(refs, preds): |
rouge_types = ['rouge1', 'rouge2', 'rougeLsum'] |
global ROUGE_SCORER |
if ROUGE_SCORER is None: |
ROUGE_SCORER = rouge_scorer.RougeScorer(rouge_types) |
scorer = ROUGE_SCORER |
def _prepare_summary(summary): |
summary = summary.replace(' . ', '.\n') |
return summary |
aggregator = scoring.BootstrapAggregator() |
for (ref, pred) in zip(refs, preds): |
ref = _prepare_summary(ref) |
pred = _prepare_summary(pred) |
aggregator.add_scores(scorer.score(ref, pred)) |
result = aggregator.aggregate() |
return {type: result[type].mid.fmeasure * 100 for type in rouge_types} |
# File: lm-evaluation-harness-main/lm_eval/tasks/unitxt/task.py |
"""""" |
from functools import partial |
from typing import Optional |
import evaluate |
from lm_eval.api.instance import Instance |
from lm_eval.api.task import ConfigurableTask |
_CITATION = '\n@misc{bandel2024unitxt,\n title={Unitxt: Flexible, Shareable and Reusable Data Preparation and Evaluation for Generative AI},\n author={Elron Bandel and Yotam Perlitz and Elad Venezian and Roni Friedman-Melamed and Ofir Arviv and Matan Orbach and Shachar Don-Yehyia and Dafna Sheinwald and Ariel Gera and Leshem Choshen and Michal Shmueli-Scheuer and Yoav Katz},\n year={2024},\n eprint={2401.14019},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' |
def score(items, metric): |
(predictions, references) = zip(*items) |
evaluator = evaluate.load('unitxt/metric') |
for reference in references: |
reference['metrics'] = [metric] |
results = evaluator.compute(predictions=predictions, references=references) |
return results[0]['score']['global']['score'] |
class Unitxt(ConfigurableTask): |
VERSION = 0 |
def __init__(self, config: Optional[dict]=None) -> None: |
assert 'recipe' in config, "Unitxt task must have a 'recipe' string." |
super().__init__(config={'metadata': {'version': self.VERSION}, 'dataset_kwargs': {'trust_remote_code': True}, 'dataset_name': config['recipe'], 'dataset_path': 'unitxt/data'}) |
self.metrics = self.dataset['test'][0]['metrics'] |
def has_training_docs(self): |
return 'train' in self.dataset |
def has_validation_docs(self): |
return 'validation' in self.dataset |
def has_test_docs(self): |
return 'test' in self.dataset |
def training_docs(self): |
return self.dataset['train'] |
def validation_docs(self): |
return self.dataset['validation'] |
def test_docs(self): |
return self.dataset['test'] |
def doc_to_text(self, doc): |
return doc['source'] |
def should_decontaminate(self): |
return False |
def doc_to_target(self, doc): |
doc['target'] |
def construct_requests(self, doc, ctx, **kwargs): |
return [Instance(request_type='generate_until', doc=doc, arguments=(ctx, {'until': ['\n']}), idx=0, **kwargs)] |
def process_results(self, doc, results): |
continuation = results[0] |
predictions = continuation |
references = doc |
return {metric.replace('metrics.', ''): (predictions, references) for metric in self.metrics} |
def aggregation(self): |
return {metric.replace('metrics.', ''): partial(score, metric=metric) for metric in self.metrics} |