from dataclasses import dataclass, make_dataclass from src.display.about import create_task_list def fields(raw_class): return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"] # These classes are for user facing column names, # to avoid having to change them all around the code # when a modif is needed @dataclass class ColumnContent: name: str type: str displayed_by_default: bool hidden: bool = False never_hidden: bool = False dummy: bool = False Tasks, Groups = create_task_list() ## Leaderboard columns auto_eval_column_dict = [] # Init auto_eval_column_dict.append(["model_submission_date", ColumnContent, ColumnContent("Submission Date", "str", True, never_hidden=True)]) auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)]) #Scores auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)]) for task in Tasks: auto_eval_column_dict.append([task.benchmark, ColumnContent, ColumnContent(task.col_name, "number", True)]) # Dummy column for the search bar (hidden by the custom CSS) auto_eval_column_dict.append(["dummy", ColumnContent, ColumnContent("model_name_for_query", "str", False, dummy=True)]) # We use make dataclass to dynamically fill the scores from Tasks AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True) ## For the queue columns in the submission tab @dataclass(frozen=True) class EvalQueueColumn: # Queue column model = ColumnContent("model", "markdown", True) submitted_time = ColumnContent("submitted_time", "str", True) status = ColumnContent("status", "str", True) # Column selection COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden] TYPES = [c.type for c in fields(AutoEvalColumn) if not c.hidden] EVAL_COLS = [c.name for c in fields(EvalQueueColumn)] EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)] BENCHMARK_COLS = [t.col_name for t in Tasks] #for grouping ## Leaderboard columns auto_eval_group_dict = [] # Init auto_eval_group_dict.append(["model_submission_date", ColumnContent, ColumnContent("Submission Date", "str", True, never_hidden=True)]) auto_eval_group_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)]) #Scores auto_eval_group_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)]) for task in Groups: auto_eval_group_dict.append([task.benchmark, ColumnContent, ColumnContent(task.col_name, "number", True)]) # Dummy column for the search bar (hidden by the custom CSS) auto_eval_group_dict.append(["dummy", ColumnContent, ColumnContent("model_name_for_query", "str", False, dummy=True)]) # We use make dataclass to dynamically fill the scores from Tasks AutoEvalColumnGroup = make_dataclass("AutoEvalColumnGroup", auto_eval_group_dict, frozen=True) ## For the queue columns in the submission tab @dataclass(frozen=True) class EvalQueueColumnGroup: # Queue column model = ColumnContent("model", "markdown", True) submitted_time = ColumnContent("submitted_time", "str", True) status = ColumnContent("status", "str", True) # Column selection COLS_GROUP = [c.name for c in fields(AutoEvalColumnGroup) if not c.hidden] TYPES_GROUP = [c.type for c in fields(AutoEvalColumnGroup) if not c.hidden] EVAL_COLS_GROUP = [c.name for c in fields(EvalQueueColumnGroup)] EVAL_TYPES_GROUP = [c.type for c in fields(EvalQueueColumnGroup)] BENCHMARK_COLS_GROUP = [t.col_name for t in Groups]