text
stringlengths 0
15.3k
|
---|
if hasattr(self, 'sampler'): |
self.sampler.rnd = self.fewshot_rnd |
@property |
def eval_docs(self) -> Union[datasets.Dataset, List[dict]]: |
if self.has_test_docs(): |
return self.test_docs() |
elif self.has_validation_docs(): |
return self.validation_docs() |
else: |
raise ValueError(f'Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!') |
def doc_iterator(self, *, rank: int=0, limit: Union[int, None]=None, world_size: int=1) -> Iterator[Tuple[int, Any]]: |
limit = int(limit) if limit else None |
doc_iterator = utils.create_iterator(enumerate(self.eval_docs), rank=int(rank), limit=limit, world_size=int(world_size)) |
return doc_iterator |
class ConfigurableTask(Task): |
VERSION = 'Yaml' |
OUTPUT_TYPE = None |
CONFIG = None |
def __init__(self, data_dir=None, cache_dir=None, download_mode=None, config: Optional[dict]=None) -> None: |
self._config = self.CONFIG |
if self.config is None: |
self._config = TaskConfig(**config) |
elif config is not None: |
self._config.__dict__.update(config) |
if self.config is None: |
raise ValueError('Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg') |
if isinstance(self.config.metadata, dict): |
if 'version' in self.config.metadata: |
self.VERSION = self.config.metadata['version'] |
if self.config.output_type is not None: |
if self.config.output_type not in ALL_OUTPUT_TYPES: |
raise ValueError(f"Got invalid output_type '{self.config.output_type}', must be in '{','.join(ALL_OUTPUT_TYPES)}'") |
self.OUTPUT_TYPE = self.config.output_type |
if self.config.dataset_path is not None: |
self.DATASET_PATH = self.config.dataset_path |
if self.config.dataset_name is not None: |
self.DATASET_NAME = self.config.dataset_name |
self._metric_fn_list = {} |
self._metric_fn_kwargs = {} |
self._aggregation_list = {} |
self._higher_is_better = {} |
if self.config.metric_list is None: |
_metric_list = DEFAULT_METRIC_REGISTRY[self.config.output_type] |
for metric_name in _metric_list: |
self._metric_fn_list[metric_name] = get_metric(metric_name) |
self._metric_fn_kwargs[metric_name] = {} |
self._aggregation_list[metric_name] = get_metric_aggregation(metric_name) |
self._higher_is_better[metric_name] = is_higher_better(metric_name) |
else: |
for metric_config in self.config.metric_list: |
if 'metric' not in metric_config: |
raise ValueError("'metric' key not provided for an entry in 'metric_list', must be specified!") |
metric_name = metric_config['metric'] |
kwargs = {key: metric_config[key] for key in metric_config if key not in ['metric', 'aggregation', 'higher_is_better', 'hf_evaluate']} |
hf_evaluate_metric = 'hf_evaluate' in metric_config and metric_config['hf_evaluate'] is True |
if self.config.process_results is not None: |
self._metric_fn_list[metric_name] = None |
self._metric_fn_kwargs[metric_name] = {} |
elif callable(metric_name): |
metric_fn = metric_name.__call__ |
metric_name = metric_name.__name__ |
self._metric_fn_list[metric_name] = metric_fn |
self._metric_fn_kwargs[metric_name] = kwargs |
else: |
self._metric_fn_list[metric_name] = get_metric(metric_name, hf_evaluate_metric) |
self._metric_fn_kwargs[metric_name] = kwargs |
if 'aggregation' in metric_config: |
agg_name = metric_config['aggregation'] |
if isinstance(agg_name, str): |
self._aggregation_list[metric_name] = get_aggregation(agg_name) |
elif callable(agg_name): |
self._aggregation_list[metric_name] = metric_config['aggregation'] |
else: |
INV_AGG_REGISTRY = {v: k for (k, v) in AGGREGATION_REGISTRY.items()} |
metric_agg = get_metric_aggregation(metric_name) |
eval_logger.warning(f'[Task: {self.config.task}] metric {metric_name} is defined, but aggregation is not. using default aggregation={INV_AGG_REGISTRY[metric_agg]}') |
self._aggregation_list[metric_name] = metric_agg |
if 'higher_is_better' in metric_config: |
self._higher_is_better[metric_name] = metric_config['higher_is_better'] |
else: |
eval_logger.warning(f'[Task: {self.config.task}] metric {metric_name} is defined, but higher_is_better is not. using default higher_is_better={is_higher_better(metric_name)}') |
self._higher_is_better[metric_name] = is_higher_better(metric_name) |
self.download(self.config.dataset_kwargs) |
self._training_docs = None |
self._fewshot_docs = None |
if self.config.filter_list is not None: |
self._filters = [] |
for filter_config in self.config.filter_list: |
filter_name = filter_config['name'] |
filter_functions = filter_config['filter'] |
components = [] |
for function in filter_functions: |
kwargs = {key: function[key] for key in function if key != 'function'} |
components.append([function['function'], kwargs]) |
filter_pipeline = build_filter_ensemble(filter_name, components) |
self._filters.append(filter_pipeline) |