text
stringlengths 0
15.3k
|
---|
reference = clean(docs['span1_text']) |
if ("'" in prediction) != ("'" in reference): |
predicted_referent = False |
else: |
prediction_words = set(prediction.split(' ')) |
referent_words = set(reference.split(' ')) |
predicted_referent = prediction_words.issubset(referent_words) or referent_words.issubset(prediction_words) |
acc = 1.0 if predicted_referent == docs['label'] else 0.0 |
return {'accuracy': acc} |
# File: lm-evaluation-harness-main/lm_eval/tasks/swde/task.py |
import re |
from typing import List |
import numpy as np |
from lm_eval.api.instance import Instance |
from lm_eval.api.task import ConfigurableTask |
class SWDE(ConfigurableTask): |
VERSION = 0 |
DATASET_PATH = 'hazyresearch/based-swde-v2' |
DATASET_NAME = 'default' |
def __init__(self, **kwargs): |
super().__init__(config={'metadata': {'version': self.VERSION}}) |
def has_training_docs(self): |
return False |
def has_validation_docs(self): |
return True |
def has_test_docs(self): |
return False |
def validation_docs(self): |
return self.dataset['validation'] |
def doc_to_text(self, doc): |
return doc['text'] |
def doc_to_target(self, doc): |
return doc['value'] |
def construct_requests(self, doc, ctx, **kwargs): |
return [Instance(request_type='generate_until', doc=doc, arguments=(ctx, {'until': ['\n'], 'max_gen_toks': 48}), idx=0, **kwargs)] |
def process_results(self, doc, results): |
continuation = results |
return {'contains': contains_score(continuation[0], [doc['value']])} |
def aggregation(self): |
return {'contains': np.mean} |
def higher_is_better(self): |
return {'contains': True} |
def contains_score(prediction: str, labels: List[str]): |
return max((int(bool(re.search(re.compile(re.escape(label), re.IGNORECASE), prediction))) for label in labels)) |
# File: lm-evaluation-harness-main/lm_eval/tasks/tinyBenchmarks/agg_functions.py |
from typing import List |
import numpy as np |
try: |
import tinyBenchmarks as tb |
except ModuleNotFoundError: |
raise ModuleNotFoundError('`tinyBenchmarks` is required for tinyBenchmarks task metric calculation, install via `pip install git+https://github.com/felipemaiapolo/tinyBenchmarks`') |
def agg_pirt(items: List[float], benchmark: str) -> float: |
items = np.array(items) |
predictions = tb.evaluate(items, benchmark) |
return predictions[benchmark]['pirt'] |
def agg_gpirt_arc(items: List[float], benchmark: str='arc') -> float: |
items = np.array(items) |
predictions = tb.evaluate(items, benchmark) |
return predictions[benchmark]['gpirt'] |
def agg_gpirt_gsm8k(items: List[float], benchmark: str='gsm8k') -> float: |
items = np.array(items) |
predictions = tb.evaluate(items, benchmark) |
return predictions[benchmark]['gpirt'] |
def agg_gpirt_hellaswag(items: List[float], benchmark: str='hellaswag') -> float: |
items = np.array(items) |
predictions = tb.evaluate(items, benchmark) |
return predictions[benchmark]['gpirt'] |
def agg_gpirt_mmlu(items: List[float], benchmark: str='mmlu') -> float: |
items = np.array(items) |
predictions = tb.evaluate(items, benchmark) |
return predictions[benchmark]['gpirt'] |
def agg_gpirt_truthfulqa(items: List[float], benchmark: str='truthfulqa') -> float: |
items = np.array(items) |
predictions = tb.evaluate(items, benchmark) |
return predictions[benchmark]['gpirt'] |
def agg_gpirt_winogrande(items: List[float], benchmark: str='winogrande') -> float: |
items = np.array(items) |
predictions = tb.evaluate(items, benchmark) |