text
stringlengths
0
15.3k
raise ValueError(f"Attempted to use contextsampler '{name}', but no sampling strategy for this name found! Supported model names: {', '.join(SAMPLER_REGISTRY.keys())}")
# File: lm-evaluation-harness-main/lm_eval/api/task.py
import abc
import ast
import logging
import random
import re
from collections.abc import Callable
from copy import deepcopy
from dataclasses import asdict, dataclass
from inspect import getsource
from typing import Any, Dict, Iterable, Iterator, List, Literal, Mapping, Optional, Tuple, Union
import datasets
import numpy as np
from tqdm import tqdm
from lm_eval import utils
from lm_eval.api import samplers
from lm_eval.api.instance import Instance, OutputType
from lm_eval.api.metrics import bits_per_byte, mean, weighted_perplexity
from lm_eval.api.registry import AGGREGATION_REGISTRY, DEFAULT_METRIC_REGISTRY, get_aggregation, get_metric, get_metric_aggregation, is_higher_better
from lm_eval.caching.cache import load_from_cache, save_to_cache
from lm_eval.filters import build_filter_ensemble
from lm_eval.prompts import get_prompt
ALL_OUTPUT_TYPES = ['loglikelihood', 'multiple_choice', 'loglikelihood_rolling', 'generate_until']
eval_logger = logging.getLogger('lm-eval')
@dataclass
class TaskConfig(dict):
task: Optional[str] = None
task_alias: Optional[str] = None
tag: Optional[Union[str, list]] = None
group: Optional[Union[str, list]] = None
dataset_path: Optional[str] = None
dataset_name: Optional[str] = None
dataset_kwargs: Optional[dict] = None
training_split: Optional[str] = None
validation_split: Optional[str] = None
test_split: Optional[str] = None
fewshot_split: Optional[str] = None
process_docs: Optional[Callable] = None
doc_to_text: Optional[Union[Callable, str]] = None
doc_to_target: Optional[Union[Callable, str]] = None
doc_to_choice: Optional[Union[Callable, str, dict, list]] = None
process_results: Optional[Union[Callable, str]] = None
use_prompt: Optional[str] = None
description: str = ''
target_delimiter: str = ' '
fewshot_delimiter: str = '\n\n'
fewshot_config: Optional[dict] = None
num_fewshot: Optional[int] = None
metric_list: Optional[list] = None
output_type: OutputType = 'generate_until'
generation_kwargs: Optional[dict] = None
repeats: int = 1
filter_list: Optional[Union[str, list]] = None
should_decontaminate: bool = False
doc_to_decontamination_query: Optional[str] = None
metadata: Optional[dict] = None
def __post_init__(self) -> None:
if self.group is not None:
eval_logger.warning('A task YAML file was found to contain a `group` key. Groups which provide aggregate scores over several subtasks now require a separate config file--if not aggregating, you may want to use the `tag` config option instead within your config. Setting `group` within a TaskConfig will be deprecated in v0.4.4. Please see https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/task_guide.md for more information.')
if self.tag is None:
self.tag = self.group
else:
raise ValueError('Got both a `group` and `tag` entry within a TaskConfig. Please use one or the other--`group` values will be deprecated in v0.4.4.')
if self.generation_kwargs is not None:
if self.output_type != 'generate_until':
eval_logger.warning(f'[{self.task}] passed `generation_kwargs`, but not using `output_type: generate_until`!')
if 'temperature' in self.generation_kwargs:
self.generation_kwargs['temperature'] = float(self.generation_kwargs['temperature'])
if 'until' not in self.generation_kwargs:
self.generation_kwargs['until'] = [self.fewshot_delimiter]
elif self.output_type == 'generate_until':
self.generation_kwargs = {'until': None if self.fewshot_delimiter is None else [self.fewshot_delimiter], 'do_sample': False}
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, item, value):
return setattr(self, item, value)
def to_dict(self, keep_callable: bool=False) -> dict:
cfg_dict = asdict(self)
for (k, v) in list(cfg_dict.items()):
if v is None:
cfg_dict.pop(k)
elif k == 'metric_list':
for metric_dict in v:
for (metric_key, metric_value) in metric_dict.items():
if callable(metric_value):
metric_dict[metric_key] = self.serialize_function(metric_value, keep_callable=keep_callable)
cfg_dict[k] = v
elif callable(v):
cfg_dict[k] = self.serialize_function(v, keep_callable=keep_callable)
return cfg_dict
def serialize_function(self, value: Union[Callable, str], keep_callable=False) -> Union[Callable, str]:
if keep_callable: