text
stringlengths 0
15.3k
|
---|
def brier_score_fn(items): |
return items |
@register_metric(metric='acc', higher_is_better=True, output_type=['loglikelihood', 'multiple_choice'], aggregation='mean') |
def acc_fn(items): |
return items |
@register_metric(metric='acc_norm', higher_is_better=True, output_type=['loglikelihood', 'multiple_choice'], aggregation='mean') |
def acc_norm_fn(items): |
return items |
@register_metric(metric='acc_mutual_info', higher_is_better=True, output_type='multiple_choice', aggregation='mean') |
def acc_mutual_info_fn(items): |
return items |
def exact_match_hf_evaluate(predictions, references, regexes_to_ignore=None, ignore_case=False, ignore_punctuation=False, ignore_numbers=False): |
if regexes_to_ignore is not None: |
for s in regexes_to_ignore: |
predictions = np.array([re.sub(s, '', x) for x in predictions]) |
references = np.array([re.sub(s, '', x) for x in references]) |
else: |
predictions = np.asarray(predictions) |
references = np.asarray(references) |
if ignore_case: |
predictions = np.char.lower(predictions) |
references = np.char.lower(references) |
if ignore_punctuation: |
repl_table = string.punctuation.maketrans('', '', string.punctuation) |
predictions = np.char.translate(predictions, table=repl_table) |
references = np.char.translate(references, table=repl_table) |
if ignore_numbers: |
repl_table = string.digits.maketrans('', '', string.digits) |
predictions = np.char.translate(predictions, table=repl_table) |
references = np.char.translate(references, table=repl_table) |
score_list = predictions == references |
return {'exact_match': np.mean(score_list)} |
@register_metric(metric='exact_match', higher_is_better=True, output_type='generate_until', aggregation='mean') |
def exact_match_fn(**kwargs): |
return exact_match_hf_evaluate(**kwargs) |
@register_metric(metric='perplexity', higher_is_better=False, output_type='loglikelihood', aggregation='perplexity') |
def perplexity_fn(items): |
return items |
@register_metric(metric='word_perplexity', higher_is_better=False, output_type='loglikelihood_rolling', aggregation='weighted_perplexity') |
def word_perplexity_fn(items): |
return items |
@register_metric(metric='byte_perplexity', higher_is_better=False, output_type='loglikelihood_rolling', aggregation='weighted_perplexity') |
def byte_perplexity_fn(items): |
return items |
@register_metric(metric='bits_per_byte', higher_is_better=False, output_type='loglikelihood_rolling', aggregation='bits_per_byte') |
def bits_per_byte_fn(items): |
return items |
def pop_stddev(arr): |
mu = mean(arr) |
return math.sqrt(sum([(x - mu) ** 2 for x in arr]) / len(arr)) |
def sample_stddev(arr): |
mu = mean(arr) |
return math.sqrt(sum([(x - mu) ** 2 for x in arr]) / (len(arr) - 1)) |
def mean_stderr(arr): |
return sample_stddev(arr) / math.sqrt(len(arr)) |
@register_metric(metric='bypass', higher_is_better=True, output_type=['loglikelihood', 'multiple_choice', 'generate_until'], aggregation='bypass') |
def bypass(items): |
return None |
@register_metric(metric='mcc', higher_is_better=True, output_type='multiple_choice', aggregation='matthews_corrcoef') |
def mcc_fn(items): |
return items |
@register_metric(metric='f1', higher_is_better=True, output_type='multiple_choice', aggregation='f1') |
def f1_fn(items): |
return items |
@register_metric(metric='bleu', higher_is_better=True, output_type='generate_until', aggregation='bleu') |
def bleu_fn(items): |
return items |
@register_metric(metric='chrf', higher_is_better=True, output_type='generate_until', aggregation='chrf') |
def chrf_fn(items): |
return items |
@register_metric(metric='ter', higher_is_better=True, output_type='generate_until', aggregation='ter') |
def ter_fn(items): |
return items |
@register_metric(metric='acc_all', higher_is_better=True, output_type='loglikelihood', aggregation='mean') |
def acc_all(items): |
question_scoring_dict = {} |
preds = list(zip(*items))[0] |
docs = list(zip(*items))[1] |
for (doc, pred) in zip(docs, preds): |
paragraph_id = doc['idx']['paragraph'] |
question_id = doc['idx']['question'] |