text
stringlengths 0
15.3k
|
---|
return predictions[benchmark]['gpirt'] |
# File: lm-evaluation-harness-main/lm_eval/tasks/tinyBenchmarks/utils_hellaswag.py |
import re |
import datasets |
'' |
def preprocess(text): |
text = text.strip() |
text = text.replace(' [title]', '. ') |
text = re.sub('\\[.*?\\]', '', text) |
text = text.replace(' ', ' ') |
return text |
def process_docs(dataset: datasets.Dataset) -> datasets.Dataset: |
def _process_doc(doc): |
ctx = doc['ctx_a'] + ' ' + doc['ctx_b'].capitalize() |
out_doc = {'query': preprocess(doc['activity_label'] + ': ' + ctx), 'choices': [preprocess(ending) for ending in doc['endings']], 'gold': int(doc['label'])} |
return out_doc |
return dataset.map(_process_doc) |
# File: lm-evaluation-harness-main/lm_eval/tasks/tinyBenchmarks/utils_truthfulqa.py |
import datasets |
import numpy as np |
import sacrebleu |
from rouge_score import rouge_scorer, scoring |
'' |
ROUGE_SCORER = None |
def process_results_mc2(doc, results): |
(lls, is_greedy) = zip(*results) |
split_idx = list(doc['mc2_targets']['labels']).index(0) |
(ll_true, ll_false) = (lls[:split_idx], lls[split_idx:]) |
(p_true, p_false) = (np.exp(np.array(ll_true)), np.exp(np.array(ll_false))) |
p_true = p_true / (sum(p_true) + sum(p_false)) |
return {'acc': sum(p_true)} |
def process_docs_gen(dataset: datasets.Dataset) -> datasets.Dataset: |
return dataset.map(preprocess_function) |
def preprocess_function(examples): |
def _format_answers(answers): |
formatted_answers = [] |
for answer in answers: |
answer = answer.strip() |
if len(answer): |
if answer[-1] != '.': |
formatted_answers.append(answer + '.') |
else: |
formatted_answers.append(answer) |
return formatted_answers |
incorrect_answers = _format_answers(examples['incorrect_answers']) |
correct_answers = _format_answers(examples['correct_answers']) |
if 'I have no comment.' not in correct_answers: |
correct_answers.append('I have no comment.') |
return {'question': examples['question'].strip(), 'correct_answers': correct_answers, 'incorrect_answers': incorrect_answers} |
def process_results_gen(doc, results): |
completion = results[0] |
(true_refs, false_refs) = (doc['correct_answers'], doc['incorrect_answers']) |
all_refs = true_refs + false_refs |
bleu_scores = [bleu([[ref]], [completion]) for ref in all_refs] |
bleu_correct = np.nanmax(bleu_scores[:len(true_refs)]) |
bleu_incorrect = np.nanmax(bleu_scores[len(true_refs):]) |
bleu_max = bleu_correct |
bleu_diff = bleu_correct - bleu_incorrect |
bleu_acc = int(bleu_correct > bleu_incorrect) |
rouge_scores = [rouge([ref], [completion]) for ref in all_refs] |
rouge1_scores = [score['rouge1'] for score in rouge_scores] |
rouge1_correct = np.nanmax(rouge1_scores[:len(true_refs)]) |
rouge1_incorrect = np.nanmax(rouge1_scores[len(true_refs):]) |
rouge1_max = rouge1_correct |
rouge1_diff = rouge1_correct - rouge1_incorrect |
rouge1_acc = int(rouge1_correct > rouge1_incorrect) |
rouge2_scores = [score['rouge2'] for score in rouge_scores] |
rouge2_correct = np.nanmax(rouge2_scores[:len(true_refs)]) |
rouge2_incorrect = np.nanmax(rouge2_scores[len(true_refs):]) |
rouge2_max = rouge2_correct |
rouge2_diff = rouge2_correct - rouge2_incorrect |
rouge2_acc = int(rouge2_correct > rouge2_incorrect) |
rougeL_scores = [score['rougeLsum'] for score in rouge_scores] |
rougeL_correct = np.nanmax(rougeL_scores[:len(true_refs)]) |
rougeL_incorrect = np.nanmax(rougeL_scores[len(true_refs):]) |
rougeL_max = rougeL_correct |
rougeL_diff = rougeL_correct - rougeL_incorrect |
rougeL_acc = int(rougeL_correct > rougeL_incorrect) |
return {'bleu_max': bleu_max, 'bleu_acc': bleu_acc, 'bleu_diff': bleu_diff, 'rouge1_max': rouge1_max, 'rouge1_acc': rouge1_acc, 'rouge1_diff': rouge1_diff, 'rouge2_max': rouge2_max, 'rouge2_acc': rouge2_acc, 'rouge2_diff': rouge2_diff, 'rougeL_max': rougeL_max, 'rougeL_acc': rougeL_acc, 'rougeL_diff': rougeL_diff} |
def bleu(refs, preds): |
score = sacrebleu.corpus_bleu(preds, refs, smooth_method='exp', smooth_value=0.0, force=False, lowercase=False, tokenize='intl', use_effective_order=False).score |
return score |
def rouge(refs, preds): |
rouge_types = ['rouge1', 'rouge2', 'rougeLsum'] |
global ROUGE_SCORER |
if ROUGE_SCORER is None: |
ROUGE_SCORER = rouge_scorer.RougeScorer(rouge_types) |
scorer = ROUGE_SCORER |